]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[PATCH] tg3: add 5714/5715 support
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40
41 #include <net/checksum.h>
42
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47
48 #ifdef CONFIG_SPARC64
49 #include <asm/idprom.h>
50 #include <asm/oplib.h>
51 #include <asm/pbm.h>
52 #endif
53
54 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55 #define TG3_VLAN_TAG_USED 1
56 #else
57 #define TG3_VLAN_TAG_USED 0
58 #endif
59
60 #ifdef NETIF_F_TSO
61 #define TG3_TSO_SUPPORT 1
62 #else
63 #define TG3_TSO_SUPPORT 0
64 #endif
65
66 #include "tg3.h"
67
68 #define DRV_MODULE_NAME         "tg3"
69 #define PFX DRV_MODULE_NAME     ": "
70 #define DRV_MODULE_VERSION      "3.42"
71 #define DRV_MODULE_RELDATE      "Oct 3, 2005"
72
73 #define TG3_DEF_MAC_MODE        0
74 #define TG3_DEF_RX_MODE         0
75 #define TG3_DEF_TX_MODE         0
76 #define TG3_DEF_MSG_ENABLE        \
77         (NETIF_MSG_DRV          | \
78          NETIF_MSG_PROBE        | \
79          NETIF_MSG_LINK         | \
80          NETIF_MSG_TIMER        | \
81          NETIF_MSG_IFDOWN       | \
82          NETIF_MSG_IFUP         | \
83          NETIF_MSG_RX_ERR       | \
84          NETIF_MSG_TX_ERR)
85
86 /* length of time before we decide the hardware is borked,
87  * and dev->tx_timeout() should be called to fix the problem
88  */
89 #define TG3_TX_TIMEOUT                  (5 * HZ)
90
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU                     60
93 #define TG3_MAX_MTU(tp) \
94         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97  * You can't change the ring sizes, but you can change where you place
98  * them in the NIC onboard memory.
99  */
100 #define TG3_RX_RING_SIZE                512
101 #define TG3_DEF_RX_RING_PENDING         200
102 #define TG3_RX_JUMBO_RING_SIZE          256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
104
105 /* Do not place this n-ring entries value into the tp struct itself,
106  * we really want to expose these constants to GCC so that modulo et
107  * al.  operations are done with shifts and masks instead of with
108  * hw multiply/modulo instructions.  Another solution would be to
109  * replace things like '% foo' with '& (foo - 1)'.
110  */
111 #define TG3_RX_RCB_RING_SIZE(tp)        \
112         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
113
114 #define TG3_TX_RING_SIZE                512
115 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
116
117 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
118                                  TG3_RX_RING_SIZE)
119 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_JUMBO_RING_SIZE)
121 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122                                    TG3_RX_RCB_RING_SIZE(tp))
123 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
124                                  TG3_TX_RING_SIZE)
125 #define TX_BUFFS_AVAIL(TP)                                              \
126         ((TP)->tx_pending -                                             \
127          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
128 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
131 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
132
133 /* minimum number of free TX descriptors required to wake up TX process */
134 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
135
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
139 #define TG3_NUM_TEST            6
140
141 static char version[] __devinitdata =
142         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143
144 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
145 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
146 MODULE_LICENSE("GPL");
147 MODULE_VERSION(DRV_MODULE_VERSION);
148
149 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
150 module_param(tg3_debug, int, 0);
151 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152
153 static struct pci_device_id tg3_pci_tbl[] = {
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { 0, }
247 };
248
249 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250
251 static struct {
252         const char string[ETH_GSTRING_LEN];
253 } ethtool_stats_keys[TG3_NUM_STATS] = {
254         { "rx_octets" },
255         { "rx_fragments" },
256         { "rx_ucast_packets" },
257         { "rx_mcast_packets" },
258         { "rx_bcast_packets" },
259         { "rx_fcs_errors" },
260         { "rx_align_errors" },
261         { "rx_xon_pause_rcvd" },
262         { "rx_xoff_pause_rcvd" },
263         { "rx_mac_ctrl_rcvd" },
264         { "rx_xoff_entered" },
265         { "rx_frame_too_long_errors" },
266         { "rx_jabbers" },
267         { "rx_undersize_packets" },
268         { "rx_in_length_errors" },
269         { "rx_out_length_errors" },
270         { "rx_64_or_less_octet_packets" },
271         { "rx_65_to_127_octet_packets" },
272         { "rx_128_to_255_octet_packets" },
273         { "rx_256_to_511_octet_packets" },
274         { "rx_512_to_1023_octet_packets" },
275         { "rx_1024_to_1522_octet_packets" },
276         { "rx_1523_to_2047_octet_packets" },
277         { "rx_2048_to_4095_octet_packets" },
278         { "rx_4096_to_8191_octet_packets" },
279         { "rx_8192_to_9022_octet_packets" },
280
281         { "tx_octets" },
282         { "tx_collisions" },
283
284         { "tx_xon_sent" },
285         { "tx_xoff_sent" },
286         { "tx_flow_control" },
287         { "tx_mac_errors" },
288         { "tx_single_collisions" },
289         { "tx_mult_collisions" },
290         { "tx_deferred" },
291         { "tx_excessive_collisions" },
292         { "tx_late_collisions" },
293         { "tx_collide_2times" },
294         { "tx_collide_3times" },
295         { "tx_collide_4times" },
296         { "tx_collide_5times" },
297         { "tx_collide_6times" },
298         { "tx_collide_7times" },
299         { "tx_collide_8times" },
300         { "tx_collide_9times" },
301         { "tx_collide_10times" },
302         { "tx_collide_11times" },
303         { "tx_collide_12times" },
304         { "tx_collide_13times" },
305         { "tx_collide_14times" },
306         { "tx_collide_15times" },
307         { "tx_ucast_packets" },
308         { "tx_mcast_packets" },
309         { "tx_bcast_packets" },
310         { "tx_carrier_sense_errors" },
311         { "tx_discards" },
312         { "tx_errors" },
313
314         { "dma_writeq_full" },
315         { "dma_write_prioq_full" },
316         { "rxbds_empty" },
317         { "rx_discards" },
318         { "rx_errors" },
319         { "rx_threshold_hit" },
320
321         { "dma_readq_full" },
322         { "dma_read_prioq_full" },
323         { "tx_comp_queue_full" },
324
325         { "ring_set_send_prod_index" },
326         { "ring_status_update" },
327         { "nic_irqs" },
328         { "nic_avoided_irqs" },
329         { "nic_tx_threshold_hit" }
330 };
331
332 static struct {
333         const char string[ETH_GSTRING_LEN];
334 } ethtool_test_keys[TG3_NUM_TEST] = {
335         { "nvram test     (online) " },
336         { "link test      (online) " },
337         { "register test  (offline)" },
338         { "memory test    (offline)" },
339         { "loopback test  (offline)" },
340         { "interrupt test (offline)" },
341 };
342
343 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         spin_lock_irqsave(&tp->indirect_lock, flags);
348         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
350         spin_unlock_irqrestore(&tp->indirect_lock, flags);
351 }
352
353 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
354 {
355         writel(val, tp->regs + off);
356         readl(tp->regs + off);
357 }
358
359 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
360 {
361         unsigned long flags;
362         u32 val;
363
364         spin_lock_irqsave(&tp->indirect_lock, flags);
365         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
367         spin_unlock_irqrestore(&tp->indirect_lock, flags);
368         return val;
369 }
370
371 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
381                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
382                                        TG3_64BIT_REG_LOW, val);
383                 return;
384         }
385
386         spin_lock_irqsave(&tp->indirect_lock, flags);
387         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390
391         /* In indirect mode when disabling interrupts, we also need
392          * to clear the interrupt bit in the GRC local ctrl register.
393          */
394         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
395             (val == 0x1)) {
396                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
397                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
398         }
399 }
400
401 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
402 {
403         unsigned long flags;
404         u32 val;
405
406         spin_lock_irqsave(&tp->indirect_lock, flags);
407         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
408         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
409         spin_unlock_irqrestore(&tp->indirect_lock, flags);
410         return val;
411 }
412
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
414 {
415         tp->write32(tp, off, val);
416         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
417             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32(tp, off);    /* flush */
420 }
421
422 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423 {
424         tp->write32_mbox(tp, off, val);
425         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427                 tp->read32_mbox(tp, off);
428 }
429
430 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
431 {
432         void __iomem *mbox = tp->regs + off;
433         writel(val, mbox);
434         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435                 writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437                 readl(mbox);
438 }
439
440 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
441 {
442         writel(val, tp->regs + off);
443 }
444
445 static u32 tg3_read32(struct tg3 *tp, u32 off)
446 {
447         return (readl(tp->regs + off)); 
448 }
449
450 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
451 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
452 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
453 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
454 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
455
456 #define tw32(reg,val)           tp->write32(tp, reg, val)
457 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
458 #define tr32(reg)               tp->read32(tp, reg)
459
460 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
461 {
462         unsigned long flags;
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468         /* Always leave this as zero. */
469         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471 }
472
473 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
474 {
475         unsigned long flags;
476
477         spin_lock_irqsave(&tp->indirect_lock, flags);
478         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
479         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
480
481         /* Always leave this as zero. */
482         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_disable_ints(struct tg3 *tp)
487 {
488         tw32(TG3PCI_MISC_HOST_CTRL,
489              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
490         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
491 }
492
493 static inline void tg3_cond_int(struct tg3 *tp)
494 {
495         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
496             (tp->hw_status->status & SD_STATUS_UPDATED))
497                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
498 }
499
500 static void tg3_enable_ints(struct tg3 *tp)
501 {
502         tp->irq_sync = 0;
503         wmb();
504
505         tw32(TG3PCI_MISC_HOST_CTRL,
506              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
507         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
508                        (tp->last_tag << 24));
509         tg3_cond_int(tp);
510 }
511
512 static inline unsigned int tg3_has_work(struct tg3 *tp)
513 {
514         struct tg3_hw_status *sblk = tp->hw_status;
515         unsigned int work_exists = 0;
516
517         /* check for phy events */
518         if (!(tp->tg3_flags &
519               (TG3_FLAG_USE_LINKCHG_REG |
520                TG3_FLAG_POLL_SERDES))) {
521                 if (sblk->status & SD_STATUS_LINK_CHG)
522                         work_exists = 1;
523         }
524         /* check for RX/TX work to do */
525         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
526             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
527                 work_exists = 1;
528
529         return work_exists;
530 }
531
532 /* tg3_restart_ints
533  *  similar to tg3_enable_ints, but it accurately determines whether there
534  *  is new work pending and can return without flushing the PIO write
535  *  which reenables interrupts 
536  */
537 static void tg3_restart_ints(struct tg3 *tp)
538 {
539         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
540                      tp->last_tag << 24);
541         mmiowb();
542
543         /* When doing tagged status, this work check is unnecessary.
544          * The last_tag we write above tells the chip which piece of
545          * work we've completed.
546          */
547         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548             tg3_has_work(tp))
549                 tw32(HOSTCC_MODE, tp->coalesce_mode |
550                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
551 }
552
553 static inline void tg3_netif_stop(struct tg3 *tp)
554 {
555         tp->dev->trans_start = jiffies; /* prevent tx timeout */
556         netif_poll_disable(tp->dev);
557         netif_tx_disable(tp->dev);
558 }
559
560 static inline void tg3_netif_start(struct tg3 *tp)
561 {
562         netif_wake_queue(tp->dev);
563         /* NOTE: unconditional netif_wake_queue is only appropriate
564          * so long as all callers are assured to have free tx slots
565          * (such as after tg3_init_hw)
566          */
567         netif_poll_enable(tp->dev);
568         tp->hw_status->status |= SD_STATUS_UPDATED;
569         tg3_enable_ints(tp);
570 }
571
572 static void tg3_switch_clocks(struct tg3 *tp)
573 {
574         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
575         u32 orig_clock_ctrl;
576
577         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
578                 return;
579
580         orig_clock_ctrl = clock_ctrl;
581         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
582                        CLOCK_CTRL_CLKRUN_OENABLE |
583                        0x1f);
584         tp->pci_clock_ctrl = clock_ctrl;
585
586         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
587                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
588                         tw32_f(TG3PCI_CLOCK_CTRL,
589                                clock_ctrl | CLOCK_CTRL_625_CORE);
590                         udelay(40);
591                 }
592         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
593                 tw32_f(TG3PCI_CLOCK_CTRL,
594                      clock_ctrl |
595                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
596                 udelay(40);
597                 tw32_f(TG3PCI_CLOCK_CTRL,
598                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
599                 udelay(40);
600         }
601         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
602         udelay(40);
603 }
604
605 #define PHY_BUSY_LOOPS  5000
606
607 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
608 {
609         u32 frame_val;
610         unsigned int loops;
611         int ret;
612
613         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
614                 tw32_f(MAC_MI_MODE,
615                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
616                 udelay(80);
617         }
618
619         *val = 0x0;
620
621         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
622                       MI_COM_PHY_ADDR_MASK);
623         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
624                       MI_COM_REG_ADDR_MASK);
625         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
626         
627         tw32_f(MAC_MI_COM, frame_val);
628
629         loops = PHY_BUSY_LOOPS;
630         while (loops != 0) {
631                 udelay(10);
632                 frame_val = tr32(MAC_MI_COM);
633
634                 if ((frame_val & MI_COM_BUSY) == 0) {
635                         udelay(5);
636                         frame_val = tr32(MAC_MI_COM);
637                         break;
638                 }
639                 loops -= 1;
640         }
641
642         ret = -EBUSY;
643         if (loops != 0) {
644                 *val = frame_val & MI_COM_DATA_MASK;
645                 ret = 0;
646         }
647
648         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
649                 tw32_f(MAC_MI_MODE, tp->mi_mode);
650                 udelay(80);
651         }
652
653         return ret;
654 }
655
656 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
657 {
658         u32 frame_val;
659         unsigned int loops;
660         int ret;
661
662         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
663                 tw32_f(MAC_MI_MODE,
664                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
665                 udelay(80);
666         }
667
668         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
669                       MI_COM_PHY_ADDR_MASK);
670         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
671                       MI_COM_REG_ADDR_MASK);
672         frame_val |= (val & MI_COM_DATA_MASK);
673         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
674         
675         tw32_f(MAC_MI_COM, frame_val);
676
677         loops = PHY_BUSY_LOOPS;
678         while (loops != 0) {
679                 udelay(10);
680                 frame_val = tr32(MAC_MI_COM);
681                 if ((frame_val & MI_COM_BUSY) == 0) {
682                         udelay(5);
683                         frame_val = tr32(MAC_MI_COM);
684                         break;
685                 }
686                 loops -= 1;
687         }
688
689         ret = -EBUSY;
690         if (loops != 0)
691                 ret = 0;
692
693         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
694                 tw32_f(MAC_MI_MODE, tp->mi_mode);
695                 udelay(80);
696         }
697
698         return ret;
699 }
700
701 static void tg3_phy_set_wirespeed(struct tg3 *tp)
702 {
703         u32 val;
704
705         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
706                 return;
707
708         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
709             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
710                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
711                              (val | (1 << 15) | (1 << 4)));
712 }
713
714 static int tg3_bmcr_reset(struct tg3 *tp)
715 {
716         u32 phy_control;
717         int limit, err;
718
719         /* OK, reset it, and poll the BMCR_RESET bit until it
720          * clears or we time out.
721          */
722         phy_control = BMCR_RESET;
723         err = tg3_writephy(tp, MII_BMCR, phy_control);
724         if (err != 0)
725                 return -EBUSY;
726
727         limit = 5000;
728         while (limit--) {
729                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
730                 if (err != 0)
731                         return -EBUSY;
732
733                 if ((phy_control & BMCR_RESET) == 0) {
734                         udelay(40);
735                         break;
736                 }
737                 udelay(10);
738         }
739         if (limit <= 0)
740                 return -EBUSY;
741
742         return 0;
743 }
744
745 static int tg3_wait_macro_done(struct tg3 *tp)
746 {
747         int limit = 100;
748
749         while (limit--) {
750                 u32 tmp32;
751
752                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
753                         if ((tmp32 & 0x1000) == 0)
754                                 break;
755                 }
756         }
757         if (limit <= 0)
758                 return -EBUSY;
759
760         return 0;
761 }
762
763 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
764 {
765         static const u32 test_pat[4][6] = {
766         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
767         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
768         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
769         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
770         };
771         int chan;
772
773         for (chan = 0; chan < 4; chan++) {
774                 int i;
775
776                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
777                              (chan * 0x2000) | 0x0200);
778                 tg3_writephy(tp, 0x16, 0x0002);
779
780                 for (i = 0; i < 6; i++)
781                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
782                                      test_pat[chan][i]);
783
784                 tg3_writephy(tp, 0x16, 0x0202);
785                 if (tg3_wait_macro_done(tp)) {
786                         *resetp = 1;
787                         return -EBUSY;
788                 }
789
790                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
791                              (chan * 0x2000) | 0x0200);
792                 tg3_writephy(tp, 0x16, 0x0082);
793                 if (tg3_wait_macro_done(tp)) {
794                         *resetp = 1;
795                         return -EBUSY;
796                 }
797
798                 tg3_writephy(tp, 0x16, 0x0802);
799                 if (tg3_wait_macro_done(tp)) {
800                         *resetp = 1;
801                         return -EBUSY;
802                 }
803
804                 for (i = 0; i < 6; i += 2) {
805                         u32 low, high;
806
807                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
808                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
809                             tg3_wait_macro_done(tp)) {
810                                 *resetp = 1;
811                                 return -EBUSY;
812                         }
813                         low &= 0x7fff;
814                         high &= 0x000f;
815                         if (low != test_pat[chan][i] ||
816                             high != test_pat[chan][i+1]) {
817                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
818                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
819                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
820
821                                 return -EBUSY;
822                         }
823                 }
824         }
825
826         return 0;
827 }
828
829 static int tg3_phy_reset_chanpat(struct tg3 *tp)
830 {
831         int chan;
832
833         for (chan = 0; chan < 4; chan++) {
834                 int i;
835
836                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
837                              (chan * 0x2000) | 0x0200);
838                 tg3_writephy(tp, 0x16, 0x0002);
839                 for (i = 0; i < 6; i++)
840                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
841                 tg3_writephy(tp, 0x16, 0x0202);
842                 if (tg3_wait_macro_done(tp))
843                         return -EBUSY;
844         }
845
846         return 0;
847 }
848
849 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
850 {
851         u32 reg32, phy9_orig;
852         int retries, do_phy_reset, err;
853
854         retries = 10;
855         do_phy_reset = 1;
856         do {
857                 if (do_phy_reset) {
858                         err = tg3_bmcr_reset(tp);
859                         if (err)
860                                 return err;
861                         do_phy_reset = 0;
862                 }
863
864                 /* Disable transmitter and interrupt.  */
865                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
866                         continue;
867
868                 reg32 |= 0x3000;
869                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
870
871                 /* Set full-duplex, 1000 mbps.  */
872                 tg3_writephy(tp, MII_BMCR,
873                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
874
875                 /* Set to master mode.  */
876                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
877                         continue;
878
879                 tg3_writephy(tp, MII_TG3_CTRL,
880                              (MII_TG3_CTRL_AS_MASTER |
881                               MII_TG3_CTRL_ENABLE_AS_MASTER));
882
883                 /* Enable SM_DSP_CLOCK and 6dB.  */
884                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
885
886                 /* Block the PHY control access.  */
887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
888                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
889
890                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
891                 if (!err)
892                         break;
893         } while (--retries);
894
895         err = tg3_phy_reset_chanpat(tp);
896         if (err)
897                 return err;
898
899         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
900         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
901
902         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
903         tg3_writephy(tp, 0x16, 0x0000);
904
905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
907                 /* Set Extended packet length bit for jumbo frames */
908                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
909         }
910         else {
911                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
912         }
913
914         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
915
916         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
917                 reg32 &= ~0x3000;
918                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
919         } else if (!err)
920                 err = -EBUSY;
921
922         return err;
923 }
924
925 /* This will reset the tigon3 PHY if there is no valid
926  * link unless the FORCE argument is non-zero.
927  */
928 static int tg3_phy_reset(struct tg3 *tp)
929 {
930         u32 phy_status;
931         int err;
932
933         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
934         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
935         if (err != 0)
936                 return -EBUSY;
937
938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
941                 err = tg3_phy_reset_5703_4_5(tp);
942                 if (err)
943                         return err;
944                 goto out;
945         }
946
947         err = tg3_bmcr_reset(tp);
948         if (err)
949                 return err;
950
951 out:
952         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
953                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
954                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
955                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
956                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
957                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
958                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
959         }
960         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
961                 tg3_writephy(tp, 0x1c, 0x8d68);
962                 tg3_writephy(tp, 0x1c, 0x8d68);
963         }
964         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
965                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
966                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
967                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
968                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
969                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
972                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
973         }
974         /* Set Extended packet length bit (bit 14) on all chips that */
975         /* support jumbo frames */
976         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
977                 /* Cannot do read-modify-write on 5401 */
978                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
979         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
980                 u32 phy_reg;
981
982                 /* Set bit 14 with read-modify-write to preserve other bits */
983                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
984                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
985                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
986         }
987
988         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
989          * jumbo frames transmission.
990          */
991         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
992                 u32 phy_reg;
993
994                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
995                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
996                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
997         }
998
999         tg3_phy_set_wirespeed(tp);
1000         return 0;
1001 }
1002
1003 static void tg3_frob_aux_power(struct tg3 *tp)
1004 {
1005         struct tg3 *tp_peer = tp;
1006
1007         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1008                 return;
1009
1010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1011                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1012                 if (!tp_peer)
1013                         BUG();
1014         }
1015
1016
1017         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1018             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1019                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1020                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1021                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1022                              (GRC_LCLCTRL_GPIO_OE0 |
1023                               GRC_LCLCTRL_GPIO_OE1 |
1024                               GRC_LCLCTRL_GPIO_OE2 |
1025                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1026                               GRC_LCLCTRL_GPIO_OUTPUT1));
1027                         udelay(100);
1028                 } else {
1029                         u32 no_gpio2;
1030                         u32 grc_local_ctrl;
1031
1032                         if (tp_peer != tp &&
1033                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1034                                 return;
1035
1036                         /* On 5753 and variants, GPIO2 cannot be used. */
1037                         no_gpio2 = tp->nic_sram_data_cfg &
1038                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1039
1040                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1041                                          GRC_LCLCTRL_GPIO_OE1 |
1042                                          GRC_LCLCTRL_GPIO_OE2 |
1043                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1044                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1045                         if (no_gpio2) {
1046                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1047                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1048                         }
1049                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1050                                                 grc_local_ctrl);
1051                         udelay(100);
1052
1053                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1054
1055                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056                                                 grc_local_ctrl);
1057                         udelay(100);
1058
1059                         if (!no_gpio2) {
1060                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1061                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1062                                        grc_local_ctrl);
1063                                 udelay(100);
1064                         }
1065                 }
1066         } else {
1067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1068                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1069                         if (tp_peer != tp &&
1070                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1071                                 return;
1072
1073                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1074                              (GRC_LCLCTRL_GPIO_OE1 |
1075                               GRC_LCLCTRL_GPIO_OUTPUT1));
1076                         udelay(100);
1077
1078                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1079                              (GRC_LCLCTRL_GPIO_OE1));
1080                         udelay(100);
1081
1082                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                              (GRC_LCLCTRL_GPIO_OE1 |
1084                               GRC_LCLCTRL_GPIO_OUTPUT1));
1085                         udelay(100);
1086                 }
1087         }
1088 }
1089
1090 static int tg3_setup_phy(struct tg3 *, int);
1091
1092 #define RESET_KIND_SHUTDOWN     0
1093 #define RESET_KIND_INIT         1
1094 #define RESET_KIND_SUSPEND      2
1095
1096 static void tg3_write_sig_post_reset(struct tg3 *, int);
1097 static int tg3_halt_cpu(struct tg3 *, u32);
1098
1099 static int tg3_set_power_state(struct tg3 *tp, int state)
1100 {
1101         u32 misc_host_ctrl;
1102         u16 power_control, power_caps;
1103         int pm = tp->pm_cap;
1104
1105         /* Make sure register accesses (indirect or otherwise)
1106          * will function correctly.
1107          */
1108         pci_write_config_dword(tp->pdev,
1109                                TG3PCI_MISC_HOST_CTRL,
1110                                tp->misc_host_ctrl);
1111
1112         pci_read_config_word(tp->pdev,
1113                              pm + PCI_PM_CTRL,
1114                              &power_control);
1115         power_control |= PCI_PM_CTRL_PME_STATUS;
1116         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1117         switch (state) {
1118         case 0:
1119                 power_control |= 0;
1120                 pci_write_config_word(tp->pdev,
1121                                       pm + PCI_PM_CTRL,
1122                                       power_control);
1123                 udelay(100);    /* Delay after power state change */
1124
1125                 /* Switch out of Vaux if it is not a LOM */
1126                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1127                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1128                         udelay(100);
1129                 }
1130
1131                 return 0;
1132
1133         case 1:
1134                 power_control |= 1;
1135                 break;
1136
1137         case 2:
1138                 power_control |= 2;
1139                 break;
1140
1141         case 3:
1142                 power_control |= 3;
1143                 break;
1144
1145         default:
1146                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1147                        "requested.\n",
1148                        tp->dev->name, state);
1149                 return -EINVAL;
1150         };
1151
1152         power_control |= PCI_PM_CTRL_PME_ENABLE;
1153
1154         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1155         tw32(TG3PCI_MISC_HOST_CTRL,
1156              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1157
1158         if (tp->link_config.phy_is_low_power == 0) {
1159                 tp->link_config.phy_is_low_power = 1;
1160                 tp->link_config.orig_speed = tp->link_config.speed;
1161                 tp->link_config.orig_duplex = tp->link_config.duplex;
1162                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1163         }
1164
1165         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1166                 tp->link_config.speed = SPEED_10;
1167                 tp->link_config.duplex = DUPLEX_HALF;
1168                 tp->link_config.autoneg = AUTONEG_ENABLE;
1169                 tg3_setup_phy(tp, 0);
1170         }
1171
1172         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1173
1174         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1175                 u32 mac_mode;
1176
1177                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1178                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1179                         udelay(40);
1180
1181                         mac_mode = MAC_MODE_PORT_MODE_MII;
1182
1183                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1184                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1185                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1186                 } else {
1187                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1188                 }
1189
1190                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1191                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1192
1193                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1194                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1195                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1196
1197                 tw32_f(MAC_MODE, mac_mode);
1198                 udelay(100);
1199
1200                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1201                 udelay(10);
1202         }
1203
1204         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1205             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1206              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1207                 u32 base_val;
1208
1209                 base_val = tp->pci_clock_ctrl;
1210                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1211                              CLOCK_CTRL_TXCLK_DISABLE);
1212
1213                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1214                      CLOCK_CTRL_ALTCLK |
1215                      CLOCK_CTRL_PWRDOWN_PLL133);
1216                 udelay(40);
1217         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1218                 /* do nothing */
1219         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1220                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1221                 u32 newbits1, newbits2;
1222
1223                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1224                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1225                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1226                                     CLOCK_CTRL_TXCLK_DISABLE |
1227                                     CLOCK_CTRL_ALTCLK);
1228                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1229                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1230                         newbits1 = CLOCK_CTRL_625_CORE;
1231                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1232                 } else {
1233                         newbits1 = CLOCK_CTRL_ALTCLK;
1234                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1235                 }
1236
1237                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1238                 udelay(40);
1239
1240                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1241                 udelay(40);
1242
1243                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1244                         u32 newbits3;
1245
1246                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1247                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1248                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1249                                             CLOCK_CTRL_TXCLK_DISABLE |
1250                                             CLOCK_CTRL_44MHZ_CORE);
1251                         } else {
1252                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1253                         }
1254
1255                         tw32_f(TG3PCI_CLOCK_CTRL,
1256                                          tp->pci_clock_ctrl | newbits3);
1257                         udelay(40);
1258                 }
1259         }
1260
1261         tg3_frob_aux_power(tp);
1262
1263         /* Workaround for unstable PLL clock */
1264         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1265             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1266                 u32 val = tr32(0x7d00);
1267
1268                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1269                 tw32(0x7d00, val);
1270                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1271                         tg3_halt_cpu(tp, RX_CPU_BASE);
1272         }
1273
1274         /* Finally, set the new power state. */
1275         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1276         udelay(100);    /* Delay after power state change */
1277
1278         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1279
1280         return 0;
1281 }
1282
1283 static void tg3_link_report(struct tg3 *tp)
1284 {
1285         if (!netif_carrier_ok(tp->dev)) {
1286                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1287         } else {
1288                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1289                        tp->dev->name,
1290                        (tp->link_config.active_speed == SPEED_1000 ?
1291                         1000 :
1292                         (tp->link_config.active_speed == SPEED_100 ?
1293                          100 : 10)),
1294                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1295                         "full" : "half"));
1296
1297                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1298                        "%s for RX.\n",
1299                        tp->dev->name,
1300                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1301                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1302         }
1303 }
1304
1305 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1306 {
1307         u32 new_tg3_flags = 0;
1308         u32 old_rx_mode = tp->rx_mode;
1309         u32 old_tx_mode = tp->tx_mode;
1310
1311         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1312
1313                 /* Convert 1000BaseX flow control bits to 1000BaseT
1314                  * bits before resolving flow control.
1315                  */
1316                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1317                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1318                                        ADVERTISE_PAUSE_ASYM);
1319                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1320
1321                         if (local_adv & ADVERTISE_1000XPAUSE)
1322                                 local_adv |= ADVERTISE_PAUSE_CAP;
1323                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1324                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1325                         if (remote_adv & LPA_1000XPAUSE)
1326                                 remote_adv |= LPA_PAUSE_CAP;
1327                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1328                                 remote_adv |= LPA_PAUSE_ASYM;
1329                 }
1330
1331                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1332                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1333                                 if (remote_adv & LPA_PAUSE_CAP)
1334                                         new_tg3_flags |=
1335                                                 (TG3_FLAG_RX_PAUSE |
1336                                                 TG3_FLAG_TX_PAUSE);
1337                                 else if (remote_adv & LPA_PAUSE_ASYM)
1338                                         new_tg3_flags |=
1339                                                 (TG3_FLAG_RX_PAUSE);
1340                         } else {
1341                                 if (remote_adv & LPA_PAUSE_CAP)
1342                                         new_tg3_flags |=
1343                                                 (TG3_FLAG_RX_PAUSE |
1344                                                 TG3_FLAG_TX_PAUSE);
1345                         }
1346                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1347                         if ((remote_adv & LPA_PAUSE_CAP) &&
1348                         (remote_adv & LPA_PAUSE_ASYM))
1349                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1350                 }
1351
1352                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1353                 tp->tg3_flags |= new_tg3_flags;
1354         } else {
1355                 new_tg3_flags = tp->tg3_flags;
1356         }
1357
1358         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1359                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1360         else
1361                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1362
1363         if (old_rx_mode != tp->rx_mode) {
1364                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1365         }
1366         
1367         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1368                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1369         else
1370                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1371
1372         if (old_tx_mode != tp->tx_mode) {
1373                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1374         }
1375 }
1376
1377 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1378 {
1379         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1380         case MII_TG3_AUX_STAT_10HALF:
1381                 *speed = SPEED_10;
1382                 *duplex = DUPLEX_HALF;
1383                 break;
1384
1385         case MII_TG3_AUX_STAT_10FULL:
1386                 *speed = SPEED_10;
1387                 *duplex = DUPLEX_FULL;
1388                 break;
1389
1390         case MII_TG3_AUX_STAT_100HALF:
1391                 *speed = SPEED_100;
1392                 *duplex = DUPLEX_HALF;
1393                 break;
1394
1395         case MII_TG3_AUX_STAT_100FULL:
1396                 *speed = SPEED_100;
1397                 *duplex = DUPLEX_FULL;
1398                 break;
1399
1400         case MII_TG3_AUX_STAT_1000HALF:
1401                 *speed = SPEED_1000;
1402                 *duplex = DUPLEX_HALF;
1403                 break;
1404
1405         case MII_TG3_AUX_STAT_1000FULL:
1406                 *speed = SPEED_1000;
1407                 *duplex = DUPLEX_FULL;
1408                 break;
1409
1410         default:
1411                 *speed = SPEED_INVALID;
1412                 *duplex = DUPLEX_INVALID;
1413                 break;
1414         };
1415 }
1416
1417 static void tg3_phy_copper_begin(struct tg3 *tp)
1418 {
1419         u32 new_adv;
1420         int i;
1421
1422         if (tp->link_config.phy_is_low_power) {
1423                 /* Entering low power mode.  Disable gigabit and
1424                  * 100baseT advertisements.
1425                  */
1426                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1427
1428                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1429                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1430                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1431                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1432
1433                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1434         } else if (tp->link_config.speed == SPEED_INVALID) {
1435                 tp->link_config.advertising =
1436                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1437                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1438                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1439                          ADVERTISED_Autoneg | ADVERTISED_MII);
1440
1441                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1442                         tp->link_config.advertising &=
1443                                 ~(ADVERTISED_1000baseT_Half |
1444                                   ADVERTISED_1000baseT_Full);
1445
1446                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1447                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1448                         new_adv |= ADVERTISE_10HALF;
1449                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1450                         new_adv |= ADVERTISE_10FULL;
1451                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1452                         new_adv |= ADVERTISE_100HALF;
1453                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1454                         new_adv |= ADVERTISE_100FULL;
1455                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1456
1457                 if (tp->link_config.advertising &
1458                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1459                         new_adv = 0;
1460                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1461                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1462                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1463                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1464                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1465                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1466                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1467                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1468                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1469                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1470                 } else {
1471                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1472                 }
1473         } else {
1474                 /* Asking for a specific link mode. */
1475                 if (tp->link_config.speed == SPEED_1000) {
1476                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1477                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1478
1479                         if (tp->link_config.duplex == DUPLEX_FULL)
1480                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1481                         else
1482                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1483                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1484                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1485                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1486                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1487                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1488                 } else {
1489                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1490
1491                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1492                         if (tp->link_config.speed == SPEED_100) {
1493                                 if (tp->link_config.duplex == DUPLEX_FULL)
1494                                         new_adv |= ADVERTISE_100FULL;
1495                                 else
1496                                         new_adv |= ADVERTISE_100HALF;
1497                         } else {
1498                                 if (tp->link_config.duplex == DUPLEX_FULL)
1499                                         new_adv |= ADVERTISE_10FULL;
1500                                 else
1501                                         new_adv |= ADVERTISE_10HALF;
1502                         }
1503                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1504                 }
1505         }
1506
1507         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1508             tp->link_config.speed != SPEED_INVALID) {
1509                 u32 bmcr, orig_bmcr;
1510
1511                 tp->link_config.active_speed = tp->link_config.speed;
1512                 tp->link_config.active_duplex = tp->link_config.duplex;
1513
1514                 bmcr = 0;
1515                 switch (tp->link_config.speed) {
1516                 default:
1517                 case SPEED_10:
1518                         break;
1519
1520                 case SPEED_100:
1521                         bmcr |= BMCR_SPEED100;
1522                         break;
1523
1524                 case SPEED_1000:
1525                         bmcr |= TG3_BMCR_SPEED1000;
1526                         break;
1527                 };
1528
1529                 if (tp->link_config.duplex == DUPLEX_FULL)
1530                         bmcr |= BMCR_FULLDPLX;
1531
1532                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1533                     (bmcr != orig_bmcr)) {
1534                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1535                         for (i = 0; i < 1500; i++) {
1536                                 u32 tmp;
1537
1538                                 udelay(10);
1539                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1540                                     tg3_readphy(tp, MII_BMSR, &tmp))
1541                                         continue;
1542                                 if (!(tmp & BMSR_LSTATUS)) {
1543                                         udelay(40);
1544                                         break;
1545                                 }
1546                         }
1547                         tg3_writephy(tp, MII_BMCR, bmcr);
1548                         udelay(40);
1549                 }
1550         } else {
1551                 tg3_writephy(tp, MII_BMCR,
1552                              BMCR_ANENABLE | BMCR_ANRESTART);
1553         }
1554 }
1555
1556 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1557 {
1558         int err;
1559
1560         /* Turn off tap power management. */
1561         /* Set Extended packet length bit */
1562         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1563
1564         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1565         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1566
1567         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1568         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1569
1570         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1571         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1572
1573         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1574         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1575
1576         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1577         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1578
1579         udelay(40);
1580
1581         return err;
1582 }
1583
1584 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1585 {
1586         u32 adv_reg, all_mask;
1587
1588         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1589                 return 0;
1590
1591         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1592                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1593         if ((adv_reg & all_mask) != all_mask)
1594                 return 0;
1595         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1596                 u32 tg3_ctrl;
1597
1598                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1599                         return 0;
1600
1601                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1602                             MII_TG3_CTRL_ADV_1000_FULL);
1603                 if ((tg3_ctrl & all_mask) != all_mask)
1604                         return 0;
1605         }
1606         return 1;
1607 }
1608
1609 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1610 {
1611         int current_link_up;
1612         u32 bmsr, dummy;
1613         u16 current_speed;
1614         u8 current_duplex;
1615         int i, err;
1616
1617         tw32(MAC_EVENT, 0);
1618
1619         tw32_f(MAC_STATUS,
1620              (MAC_STATUS_SYNC_CHANGED |
1621               MAC_STATUS_CFG_CHANGED |
1622               MAC_STATUS_MI_COMPLETION |
1623               MAC_STATUS_LNKSTATE_CHANGED));
1624         udelay(40);
1625
1626         tp->mi_mode = MAC_MI_MODE_BASE;
1627         tw32_f(MAC_MI_MODE, tp->mi_mode);
1628         udelay(80);
1629
1630         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1631
1632         /* Some third-party PHYs need to be reset on link going
1633          * down.
1634          */
1635         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1636              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1637              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1638             netif_carrier_ok(tp->dev)) {
1639                 tg3_readphy(tp, MII_BMSR, &bmsr);
1640                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1641                     !(bmsr & BMSR_LSTATUS))
1642                         force_reset = 1;
1643         }
1644         if (force_reset)
1645                 tg3_phy_reset(tp);
1646
1647         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1648                 tg3_readphy(tp, MII_BMSR, &bmsr);
1649                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1650                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1651                         bmsr = 0;
1652
1653                 if (!(bmsr & BMSR_LSTATUS)) {
1654                         err = tg3_init_5401phy_dsp(tp);
1655                         if (err)
1656                                 return err;
1657
1658                         tg3_readphy(tp, MII_BMSR, &bmsr);
1659                         for (i = 0; i < 1000; i++) {
1660                                 udelay(10);
1661                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1662                                     (bmsr & BMSR_LSTATUS)) {
1663                                         udelay(40);
1664                                         break;
1665                                 }
1666                         }
1667
1668                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1669                             !(bmsr & BMSR_LSTATUS) &&
1670                             tp->link_config.active_speed == SPEED_1000) {
1671                                 err = tg3_phy_reset(tp);
1672                                 if (!err)
1673                                         err = tg3_init_5401phy_dsp(tp);
1674                                 if (err)
1675                                         return err;
1676                         }
1677                 }
1678         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1679                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1680                 /* 5701 {A0,B0} CRC bug workaround */
1681                 tg3_writephy(tp, 0x15, 0x0a75);
1682                 tg3_writephy(tp, 0x1c, 0x8c68);
1683                 tg3_writephy(tp, 0x1c, 0x8d68);
1684                 tg3_writephy(tp, 0x1c, 0x8c68);
1685         }
1686
1687         /* Clear pending interrupts... */
1688         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1689         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1690
1691         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1692                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1693         else
1694                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1695
1696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1698                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1699                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1700                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1701                 else
1702                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1703         }
1704
1705         current_link_up = 0;
1706         current_speed = SPEED_INVALID;
1707         current_duplex = DUPLEX_INVALID;
1708
1709         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1710                 u32 val;
1711
1712                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1713                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1714                 if (!(val & (1 << 10))) {
1715                         val |= (1 << 10);
1716                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1717                         goto relink;
1718                 }
1719         }
1720
1721         bmsr = 0;
1722         for (i = 0; i < 100; i++) {
1723                 tg3_readphy(tp, MII_BMSR, &bmsr);
1724                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1725                     (bmsr & BMSR_LSTATUS))
1726                         break;
1727                 udelay(40);
1728         }
1729
1730         if (bmsr & BMSR_LSTATUS) {
1731                 u32 aux_stat, bmcr;
1732
1733                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1734                 for (i = 0; i < 2000; i++) {
1735                         udelay(10);
1736                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1737                             aux_stat)
1738                                 break;
1739                 }
1740
1741                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1742                                              &current_speed,
1743                                              &current_duplex);
1744
1745                 bmcr = 0;
1746                 for (i = 0; i < 200; i++) {
1747                         tg3_readphy(tp, MII_BMCR, &bmcr);
1748                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1749                                 continue;
1750                         if (bmcr && bmcr != 0x7fff)
1751                                 break;
1752                         udelay(10);
1753                 }
1754
1755                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1756                         if (bmcr & BMCR_ANENABLE) {
1757                                 current_link_up = 1;
1758
1759                                 /* Force autoneg restart if we are exiting
1760                                  * low power mode.
1761                                  */
1762                                 if (!tg3_copper_is_advertising_all(tp))
1763                                         current_link_up = 0;
1764                         } else {
1765                                 current_link_up = 0;
1766                         }
1767                 } else {
1768                         if (!(bmcr & BMCR_ANENABLE) &&
1769                             tp->link_config.speed == current_speed &&
1770                             tp->link_config.duplex == current_duplex) {
1771                                 current_link_up = 1;
1772                         } else {
1773                                 current_link_up = 0;
1774                         }
1775                 }
1776
1777                 tp->link_config.active_speed = current_speed;
1778                 tp->link_config.active_duplex = current_duplex;
1779         }
1780
1781         if (current_link_up == 1 &&
1782             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1783             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1784                 u32 local_adv, remote_adv;
1785
1786                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1787                         local_adv = 0;
1788                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1789
1790                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1791                         remote_adv = 0;
1792
1793                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1794
1795                 /* If we are not advertising full pause capability,
1796                  * something is wrong.  Bring the link down and reconfigure.
1797                  */
1798                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1799                         current_link_up = 0;
1800                 } else {
1801                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1802                 }
1803         }
1804 relink:
1805         if (current_link_up == 0) {
1806                 u32 tmp;
1807
1808                 tg3_phy_copper_begin(tp);
1809
1810                 tg3_readphy(tp, MII_BMSR, &tmp);
1811                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1812                     (tmp & BMSR_LSTATUS))
1813                         current_link_up = 1;
1814         }
1815
1816         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1817         if (current_link_up == 1) {
1818                 if (tp->link_config.active_speed == SPEED_100 ||
1819                     tp->link_config.active_speed == SPEED_10)
1820                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1821                 else
1822                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1823         } else
1824                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1825
1826         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1827         if (tp->link_config.active_duplex == DUPLEX_HALF)
1828                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1829
1830         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1832                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1833                     (current_link_up == 1 &&
1834                      tp->link_config.active_speed == SPEED_10))
1835                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1836         } else {
1837                 if (current_link_up == 1)
1838                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1839         }
1840
1841         /* ??? Without this setting Netgear GA302T PHY does not
1842          * ??? send/receive packets...
1843          */
1844         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1845             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1846                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1847                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1848                 udelay(80);
1849         }
1850
1851         tw32_f(MAC_MODE, tp->mac_mode);
1852         udelay(40);
1853
1854         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1855                 /* Polled via timer. */
1856                 tw32_f(MAC_EVENT, 0);
1857         } else {
1858                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1859         }
1860         udelay(40);
1861
1862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1863             current_link_up == 1 &&
1864             tp->link_config.active_speed == SPEED_1000 &&
1865             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1866              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1867                 udelay(120);
1868                 tw32_f(MAC_STATUS,
1869                      (MAC_STATUS_SYNC_CHANGED |
1870                       MAC_STATUS_CFG_CHANGED));
1871                 udelay(40);
1872                 tg3_write_mem(tp,
1873                               NIC_SRAM_FIRMWARE_MBOX,
1874                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1875         }
1876
1877         if (current_link_up != netif_carrier_ok(tp->dev)) {
1878                 if (current_link_up)
1879                         netif_carrier_on(tp->dev);
1880                 else
1881                         netif_carrier_off(tp->dev);
1882                 tg3_link_report(tp);
1883         }
1884
1885         return 0;
1886 }
1887
1888 struct tg3_fiber_aneginfo {
1889         int state;
1890 #define ANEG_STATE_UNKNOWN              0
1891 #define ANEG_STATE_AN_ENABLE            1
1892 #define ANEG_STATE_RESTART_INIT         2
1893 #define ANEG_STATE_RESTART              3
1894 #define ANEG_STATE_DISABLE_LINK_OK      4
1895 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1896 #define ANEG_STATE_ABILITY_DETECT       6
1897 #define ANEG_STATE_ACK_DETECT_INIT      7
1898 #define ANEG_STATE_ACK_DETECT           8
1899 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1900 #define ANEG_STATE_COMPLETE_ACK         10
1901 #define ANEG_STATE_IDLE_DETECT_INIT     11
1902 #define ANEG_STATE_IDLE_DETECT          12
1903 #define ANEG_STATE_LINK_OK              13
1904 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1905 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1906
1907         u32 flags;
1908 #define MR_AN_ENABLE            0x00000001
1909 #define MR_RESTART_AN           0x00000002
1910 #define MR_AN_COMPLETE          0x00000004
1911 #define MR_PAGE_RX              0x00000008
1912 #define MR_NP_LOADED            0x00000010
1913 #define MR_TOGGLE_TX            0x00000020
1914 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1915 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1916 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1917 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1918 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1919 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1920 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1921 #define MR_TOGGLE_RX            0x00002000
1922 #define MR_NP_RX                0x00004000
1923
1924 #define MR_LINK_OK              0x80000000
1925
1926         unsigned long link_time, cur_time;
1927
1928         u32 ability_match_cfg;
1929         int ability_match_count;
1930
1931         char ability_match, idle_match, ack_match;
1932
1933         u32 txconfig, rxconfig;
1934 #define ANEG_CFG_NP             0x00000080
1935 #define ANEG_CFG_ACK            0x00000040
1936 #define ANEG_CFG_RF2            0x00000020
1937 #define ANEG_CFG_RF1            0x00000010
1938 #define ANEG_CFG_PS2            0x00000001
1939 #define ANEG_CFG_PS1            0x00008000
1940 #define ANEG_CFG_HD             0x00004000
1941 #define ANEG_CFG_FD             0x00002000
1942 #define ANEG_CFG_INVAL          0x00001f06
1943
1944 };
1945 #define ANEG_OK         0
1946 #define ANEG_DONE       1
1947 #define ANEG_TIMER_ENAB 2
1948 #define ANEG_FAILED     -1
1949
1950 #define ANEG_STATE_SETTLE_TIME  10000
1951
1952 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1953                                    struct tg3_fiber_aneginfo *ap)
1954 {
1955         unsigned long delta;
1956         u32 rx_cfg_reg;
1957         int ret;
1958
1959         if (ap->state == ANEG_STATE_UNKNOWN) {
1960                 ap->rxconfig = 0;
1961                 ap->link_time = 0;
1962                 ap->cur_time = 0;
1963                 ap->ability_match_cfg = 0;
1964                 ap->ability_match_count = 0;
1965                 ap->ability_match = 0;
1966                 ap->idle_match = 0;
1967                 ap->ack_match = 0;
1968         }
1969         ap->cur_time++;
1970
1971         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1972                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1973
1974                 if (rx_cfg_reg != ap->ability_match_cfg) {
1975                         ap->ability_match_cfg = rx_cfg_reg;
1976                         ap->ability_match = 0;
1977                         ap->ability_match_count = 0;
1978                 } else {
1979                         if (++ap->ability_match_count > 1) {
1980                                 ap->ability_match = 1;
1981                                 ap->ability_match_cfg = rx_cfg_reg;
1982                         }
1983                 }
1984                 if (rx_cfg_reg & ANEG_CFG_ACK)
1985                         ap->ack_match = 1;
1986                 else
1987                         ap->ack_match = 0;
1988
1989                 ap->idle_match = 0;
1990         } else {
1991                 ap->idle_match = 1;
1992                 ap->ability_match_cfg = 0;
1993                 ap->ability_match_count = 0;
1994                 ap->ability_match = 0;
1995                 ap->ack_match = 0;
1996
1997                 rx_cfg_reg = 0;
1998         }
1999
2000         ap->rxconfig = rx_cfg_reg;
2001         ret = ANEG_OK;
2002
2003         switch(ap->state) {
2004         case ANEG_STATE_UNKNOWN:
2005                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2006                         ap->state = ANEG_STATE_AN_ENABLE;
2007
2008                 /* fallthru */
2009         case ANEG_STATE_AN_ENABLE:
2010                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2011                 if (ap->flags & MR_AN_ENABLE) {
2012                         ap->link_time = 0;
2013                         ap->cur_time = 0;
2014                         ap->ability_match_cfg = 0;
2015                         ap->ability_match_count = 0;
2016                         ap->ability_match = 0;
2017                         ap->idle_match = 0;
2018                         ap->ack_match = 0;
2019
2020                         ap->state = ANEG_STATE_RESTART_INIT;
2021                 } else {
2022                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2023                 }
2024                 break;
2025
2026         case ANEG_STATE_RESTART_INIT:
2027                 ap->link_time = ap->cur_time;
2028                 ap->flags &= ~(MR_NP_LOADED);
2029                 ap->txconfig = 0;
2030                 tw32(MAC_TX_AUTO_NEG, 0);
2031                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2032                 tw32_f(MAC_MODE, tp->mac_mode);
2033                 udelay(40);
2034
2035                 ret = ANEG_TIMER_ENAB;
2036                 ap->state = ANEG_STATE_RESTART;
2037
2038                 /* fallthru */
2039         case ANEG_STATE_RESTART:
2040                 delta = ap->cur_time - ap->link_time;
2041                 if (delta > ANEG_STATE_SETTLE_TIME) {
2042                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2043                 } else {
2044                         ret = ANEG_TIMER_ENAB;
2045                 }
2046                 break;
2047
2048         case ANEG_STATE_DISABLE_LINK_OK:
2049                 ret = ANEG_DONE;
2050                 break;
2051
2052         case ANEG_STATE_ABILITY_DETECT_INIT:
2053                 ap->flags &= ~(MR_TOGGLE_TX);
2054                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2055                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2056                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2057                 tw32_f(MAC_MODE, tp->mac_mode);
2058                 udelay(40);
2059
2060                 ap->state = ANEG_STATE_ABILITY_DETECT;
2061                 break;
2062
2063         case ANEG_STATE_ABILITY_DETECT:
2064                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2065                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2066                 }
2067                 break;
2068
2069         case ANEG_STATE_ACK_DETECT_INIT:
2070                 ap->txconfig |= ANEG_CFG_ACK;
2071                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2072                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2073                 tw32_f(MAC_MODE, tp->mac_mode);
2074                 udelay(40);
2075
2076                 ap->state = ANEG_STATE_ACK_DETECT;
2077
2078                 /* fallthru */
2079         case ANEG_STATE_ACK_DETECT:
2080                 if (ap->ack_match != 0) {
2081                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2082                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2083                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2084                         } else {
2085                                 ap->state = ANEG_STATE_AN_ENABLE;
2086                         }
2087                 } else if (ap->ability_match != 0 &&
2088                            ap->rxconfig == 0) {
2089                         ap->state = ANEG_STATE_AN_ENABLE;
2090                 }
2091                 break;
2092
2093         case ANEG_STATE_COMPLETE_ACK_INIT:
2094                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2095                         ret = ANEG_FAILED;
2096                         break;
2097                 }
2098                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2099                                MR_LP_ADV_HALF_DUPLEX |
2100                                MR_LP_ADV_SYM_PAUSE |
2101                                MR_LP_ADV_ASYM_PAUSE |
2102                                MR_LP_ADV_REMOTE_FAULT1 |
2103                                MR_LP_ADV_REMOTE_FAULT2 |
2104                                MR_LP_ADV_NEXT_PAGE |
2105                                MR_TOGGLE_RX |
2106                                MR_NP_RX);
2107                 if (ap->rxconfig & ANEG_CFG_FD)
2108                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2109                 if (ap->rxconfig & ANEG_CFG_HD)
2110                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2111                 if (ap->rxconfig & ANEG_CFG_PS1)
2112                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2113                 if (ap->rxconfig & ANEG_CFG_PS2)
2114                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2115                 if (ap->rxconfig & ANEG_CFG_RF1)
2116                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2117                 if (ap->rxconfig & ANEG_CFG_RF2)
2118                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2119                 if (ap->rxconfig & ANEG_CFG_NP)
2120                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2121
2122                 ap->link_time = ap->cur_time;
2123
2124                 ap->flags ^= (MR_TOGGLE_TX);
2125                 if (ap->rxconfig & 0x0008)
2126                         ap->flags |= MR_TOGGLE_RX;
2127                 if (ap->rxconfig & ANEG_CFG_NP)
2128                         ap->flags |= MR_NP_RX;
2129                 ap->flags |= MR_PAGE_RX;
2130
2131                 ap->state = ANEG_STATE_COMPLETE_ACK;
2132                 ret = ANEG_TIMER_ENAB;
2133                 break;
2134
2135         case ANEG_STATE_COMPLETE_ACK:
2136                 if (ap->ability_match != 0 &&
2137                     ap->rxconfig == 0) {
2138                         ap->state = ANEG_STATE_AN_ENABLE;
2139                         break;
2140                 }
2141                 delta = ap->cur_time - ap->link_time;
2142                 if (delta > ANEG_STATE_SETTLE_TIME) {
2143                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2144                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2145                         } else {
2146                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2147                                     !(ap->flags & MR_NP_RX)) {
2148                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2149                                 } else {
2150                                         ret = ANEG_FAILED;
2151                                 }
2152                         }
2153                 }
2154                 break;
2155
2156         case ANEG_STATE_IDLE_DETECT_INIT:
2157                 ap->link_time = ap->cur_time;
2158                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2159                 tw32_f(MAC_MODE, tp->mac_mode);
2160                 udelay(40);
2161
2162                 ap->state = ANEG_STATE_IDLE_DETECT;
2163                 ret = ANEG_TIMER_ENAB;
2164                 break;
2165
2166         case ANEG_STATE_IDLE_DETECT:
2167                 if (ap->ability_match != 0 &&
2168                     ap->rxconfig == 0) {
2169                         ap->state = ANEG_STATE_AN_ENABLE;
2170                         break;
2171                 }
2172                 delta = ap->cur_time - ap->link_time;
2173                 if (delta > ANEG_STATE_SETTLE_TIME) {
2174                         /* XXX another gem from the Broadcom driver :( */
2175                         ap->state = ANEG_STATE_LINK_OK;
2176                 }
2177                 break;
2178
2179         case ANEG_STATE_LINK_OK:
2180                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2181                 ret = ANEG_DONE;
2182                 break;
2183
2184         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2185                 /* ??? unimplemented */
2186                 break;
2187
2188         case ANEG_STATE_NEXT_PAGE_WAIT:
2189                 /* ??? unimplemented */
2190                 break;
2191
2192         default:
2193                 ret = ANEG_FAILED;
2194                 break;
2195         };
2196
2197         return ret;
2198 }
2199
2200 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2201 {
2202         int res = 0;
2203         struct tg3_fiber_aneginfo aninfo;
2204         int status = ANEG_FAILED;
2205         unsigned int tick;
2206         u32 tmp;
2207
2208         tw32_f(MAC_TX_AUTO_NEG, 0);
2209
2210         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2211         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2212         udelay(40);
2213
2214         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2215         udelay(40);
2216
2217         memset(&aninfo, 0, sizeof(aninfo));
2218         aninfo.flags |= MR_AN_ENABLE;
2219         aninfo.state = ANEG_STATE_UNKNOWN;
2220         aninfo.cur_time = 0;
2221         tick = 0;
2222         while (++tick < 195000) {
2223                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2224                 if (status == ANEG_DONE || status == ANEG_FAILED)
2225                         break;
2226
2227                 udelay(1);
2228         }
2229
2230         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2231         tw32_f(MAC_MODE, tp->mac_mode);
2232         udelay(40);
2233
2234         *flags = aninfo.flags;
2235
2236         if (status == ANEG_DONE &&
2237             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2238                              MR_LP_ADV_FULL_DUPLEX)))
2239                 res = 1;
2240
2241         return res;
2242 }
2243
2244 static void tg3_init_bcm8002(struct tg3 *tp)
2245 {
2246         u32 mac_status = tr32(MAC_STATUS);
2247         int i;
2248
2249         /* Reset when initting first time or we have a link. */
2250         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2251             !(mac_status & MAC_STATUS_PCS_SYNCED))
2252                 return;
2253
2254         /* Set PLL lock range. */
2255         tg3_writephy(tp, 0x16, 0x8007);
2256
2257         /* SW reset */
2258         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2259
2260         /* Wait for reset to complete. */
2261         /* XXX schedule_timeout() ... */
2262         for (i = 0; i < 500; i++)
2263                 udelay(10);
2264
2265         /* Config mode; select PMA/Ch 1 regs. */
2266         tg3_writephy(tp, 0x10, 0x8411);
2267
2268         /* Enable auto-lock and comdet, select txclk for tx. */
2269         tg3_writephy(tp, 0x11, 0x0a10);
2270
2271         tg3_writephy(tp, 0x18, 0x00a0);
2272         tg3_writephy(tp, 0x16, 0x41ff);
2273
2274         /* Assert and deassert POR. */
2275         tg3_writephy(tp, 0x13, 0x0400);
2276         udelay(40);
2277         tg3_writephy(tp, 0x13, 0x0000);
2278
2279         tg3_writephy(tp, 0x11, 0x0a50);
2280         udelay(40);
2281         tg3_writephy(tp, 0x11, 0x0a10);
2282
2283         /* Wait for signal to stabilize */
2284         /* XXX schedule_timeout() ... */
2285         for (i = 0; i < 15000; i++)
2286                 udelay(10);
2287
2288         /* Deselect the channel register so we can read the PHYID
2289          * later.
2290          */
2291         tg3_writephy(tp, 0x10, 0x8011);
2292 }
2293
2294 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2295 {
2296         u32 sg_dig_ctrl, sg_dig_status;
2297         u32 serdes_cfg, expected_sg_dig_ctrl;
2298         int workaround, port_a;
2299         int current_link_up;
2300
2301         serdes_cfg = 0;
2302         expected_sg_dig_ctrl = 0;
2303         workaround = 0;
2304         port_a = 1;
2305         current_link_up = 0;
2306
2307         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2308             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2309                 workaround = 1;
2310                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2311                         port_a = 0;
2312
2313                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2314                 /* preserve bits 20-23 for voltage regulator */
2315                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2316         }
2317
2318         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2319
2320         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2321                 if (sg_dig_ctrl & (1 << 31)) {
2322                         if (workaround) {
2323                                 u32 val = serdes_cfg;
2324
2325                                 if (port_a)
2326                                         val |= 0xc010000;
2327                                 else
2328                                         val |= 0x4010000;
2329                                 tw32_f(MAC_SERDES_CFG, val);
2330                         }
2331                         tw32_f(SG_DIG_CTRL, 0x01388400);
2332                 }
2333                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2334                         tg3_setup_flow_control(tp, 0, 0);
2335                         current_link_up = 1;
2336                 }
2337                 goto out;
2338         }
2339
2340         /* Want auto-negotiation.  */
2341         expected_sg_dig_ctrl = 0x81388400;
2342
2343         /* Pause capability */
2344         expected_sg_dig_ctrl |= (1 << 11);
2345
2346         /* Asymettric pause */
2347         expected_sg_dig_ctrl |= (1 << 12);
2348
2349         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2350                 if (workaround)
2351                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2352                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2353                 udelay(5);
2354                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2355
2356                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2357         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2358                                  MAC_STATUS_SIGNAL_DET)) {
2359                 int i;
2360
2361                 /* Giver time to negotiate (~200ms) */
2362                 for (i = 0; i < 40000; i++) {
2363                         sg_dig_status = tr32(SG_DIG_STATUS);
2364                         if (sg_dig_status & (0x3))
2365                                 break;
2366                         udelay(5);
2367                 }
2368                 mac_status = tr32(MAC_STATUS);
2369
2370                 if ((sg_dig_status & (1 << 1)) &&
2371                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2372                         u32 local_adv, remote_adv;
2373
2374                         local_adv = ADVERTISE_PAUSE_CAP;
2375                         remote_adv = 0;
2376                         if (sg_dig_status & (1 << 19))
2377                                 remote_adv |= LPA_PAUSE_CAP;
2378                         if (sg_dig_status & (1 << 20))
2379                                 remote_adv |= LPA_PAUSE_ASYM;
2380
2381                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2382                         current_link_up = 1;
2383                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2384                 } else if (!(sg_dig_status & (1 << 1))) {
2385                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2386                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2387                         else {
2388                                 if (workaround) {
2389                                         u32 val = serdes_cfg;
2390
2391                                         if (port_a)
2392                                                 val |= 0xc010000;
2393                                         else
2394                                                 val |= 0x4010000;
2395
2396                                         tw32_f(MAC_SERDES_CFG, val);
2397                                 }
2398
2399                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2400                                 udelay(40);
2401
2402                                 /* Link parallel detection - link is up */
2403                                 /* only if we have PCS_SYNC and not */
2404                                 /* receiving config code words */
2405                                 mac_status = tr32(MAC_STATUS);
2406                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2407                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2408                                         tg3_setup_flow_control(tp, 0, 0);
2409                                         current_link_up = 1;
2410                                 }
2411                         }
2412                 }
2413         }
2414
2415 out:
2416         return current_link_up;
2417 }
2418
2419 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2420 {
2421         int current_link_up = 0;
2422
2423         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2424                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2425                 goto out;
2426         }
2427
2428         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2429                 u32 flags;
2430                 int i;
2431   
2432                 if (fiber_autoneg(tp, &flags)) {
2433                         u32 local_adv, remote_adv;
2434
2435                         local_adv = ADVERTISE_PAUSE_CAP;
2436                         remote_adv = 0;
2437                         if (flags & MR_LP_ADV_SYM_PAUSE)
2438                                 remote_adv |= LPA_PAUSE_CAP;
2439                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2440                                 remote_adv |= LPA_PAUSE_ASYM;
2441
2442                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2443
2444                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2445                         current_link_up = 1;
2446                 }
2447                 for (i = 0; i < 30; i++) {
2448                         udelay(20);
2449                         tw32_f(MAC_STATUS,
2450                                (MAC_STATUS_SYNC_CHANGED |
2451                                 MAC_STATUS_CFG_CHANGED));
2452                         udelay(40);
2453                         if ((tr32(MAC_STATUS) &
2454                              (MAC_STATUS_SYNC_CHANGED |
2455                               MAC_STATUS_CFG_CHANGED)) == 0)
2456                                 break;
2457                 }
2458
2459                 mac_status = tr32(MAC_STATUS);
2460                 if (current_link_up == 0 &&
2461                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2462                     !(mac_status & MAC_STATUS_RCVD_CFG))
2463                         current_link_up = 1;
2464         } else {
2465                 /* Forcing 1000FD link up. */
2466                 current_link_up = 1;
2467                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2468
2469                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2470                 udelay(40);
2471         }
2472
2473 out:
2474         return current_link_up;
2475 }
2476
2477 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2478 {
2479         u32 orig_pause_cfg;
2480         u16 orig_active_speed;
2481         u8 orig_active_duplex;
2482         u32 mac_status;
2483         int current_link_up;
2484         int i;
2485
2486         orig_pause_cfg =
2487                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2488                                   TG3_FLAG_TX_PAUSE));
2489         orig_active_speed = tp->link_config.active_speed;
2490         orig_active_duplex = tp->link_config.active_duplex;
2491
2492         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2493             netif_carrier_ok(tp->dev) &&
2494             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2495                 mac_status = tr32(MAC_STATUS);
2496                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2497                                MAC_STATUS_SIGNAL_DET |
2498                                MAC_STATUS_CFG_CHANGED |
2499                                MAC_STATUS_RCVD_CFG);
2500                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2501                                    MAC_STATUS_SIGNAL_DET)) {
2502                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2503                                             MAC_STATUS_CFG_CHANGED));
2504                         return 0;
2505                 }
2506         }
2507
2508         tw32_f(MAC_TX_AUTO_NEG, 0);
2509
2510         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2511         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2512         tw32_f(MAC_MODE, tp->mac_mode);
2513         udelay(40);
2514
2515         if (tp->phy_id == PHY_ID_BCM8002)
2516                 tg3_init_bcm8002(tp);
2517
2518         /* Enable link change event even when serdes polling.  */
2519         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2520         udelay(40);
2521
2522         current_link_up = 0;
2523         mac_status = tr32(MAC_STATUS);
2524
2525         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2526                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2527         else
2528                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2529
2530         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2531         tw32_f(MAC_MODE, tp->mac_mode);
2532         udelay(40);
2533
2534         tp->hw_status->status =
2535                 (SD_STATUS_UPDATED |
2536                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2537
2538         for (i = 0; i < 100; i++) {
2539                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2540                                     MAC_STATUS_CFG_CHANGED));
2541                 udelay(5);
2542                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2543                                          MAC_STATUS_CFG_CHANGED)) == 0)
2544                         break;
2545         }
2546
2547         mac_status = tr32(MAC_STATUS);
2548         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2549                 current_link_up = 0;
2550                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2551                         tw32_f(MAC_MODE, (tp->mac_mode |
2552                                           MAC_MODE_SEND_CONFIGS));
2553                         udelay(1);
2554                         tw32_f(MAC_MODE, tp->mac_mode);
2555                 }
2556         }
2557
2558         if (current_link_up == 1) {
2559                 tp->link_config.active_speed = SPEED_1000;
2560                 tp->link_config.active_duplex = DUPLEX_FULL;
2561                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2562                                     LED_CTRL_LNKLED_OVERRIDE |
2563                                     LED_CTRL_1000MBPS_ON));
2564         } else {
2565                 tp->link_config.active_speed = SPEED_INVALID;
2566                 tp->link_config.active_duplex = DUPLEX_INVALID;
2567                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2568                                     LED_CTRL_LNKLED_OVERRIDE |
2569                                     LED_CTRL_TRAFFIC_OVERRIDE));
2570         }
2571
2572         if (current_link_up != netif_carrier_ok(tp->dev)) {
2573                 if (current_link_up)
2574                         netif_carrier_on(tp->dev);
2575                 else
2576                         netif_carrier_off(tp->dev);
2577                 tg3_link_report(tp);
2578         } else {
2579                 u32 now_pause_cfg =
2580                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2581                                          TG3_FLAG_TX_PAUSE);
2582                 if (orig_pause_cfg != now_pause_cfg ||
2583                     orig_active_speed != tp->link_config.active_speed ||
2584                     orig_active_duplex != tp->link_config.active_duplex)
2585                         tg3_link_report(tp);
2586         }
2587
2588         return 0;
2589 }
2590
2591 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2592 {
2593         int current_link_up, err = 0;
2594         u32 bmsr, bmcr;
2595         u16 current_speed;
2596         u8 current_duplex;
2597
2598         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2599         tw32_f(MAC_MODE, tp->mac_mode);
2600         udelay(40);
2601
2602         tw32(MAC_EVENT, 0);
2603
2604         tw32_f(MAC_STATUS,
2605              (MAC_STATUS_SYNC_CHANGED |
2606               MAC_STATUS_CFG_CHANGED |
2607               MAC_STATUS_MI_COMPLETION |
2608               MAC_STATUS_LNKSTATE_CHANGED));
2609         udelay(40);
2610
2611         if (force_reset)
2612                 tg3_phy_reset(tp);
2613
2614         current_link_up = 0;
2615         current_speed = SPEED_INVALID;
2616         current_duplex = DUPLEX_INVALID;
2617
2618         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2619         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2620
2621         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2622
2623         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2624             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2625                 /* do nothing, just check for link up at the end */
2626         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2627                 u32 adv, new_adv;
2628
2629                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2630                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2631                                   ADVERTISE_1000XPAUSE |
2632                                   ADVERTISE_1000XPSE_ASYM |
2633                                   ADVERTISE_SLCT);
2634
2635                 /* Always advertise symmetric PAUSE just like copper */
2636                 new_adv |= ADVERTISE_1000XPAUSE;
2637
2638                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2639                         new_adv |= ADVERTISE_1000XHALF;
2640                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2641                         new_adv |= ADVERTISE_1000XFULL;
2642
2643                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2644                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2645                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2646                         tg3_writephy(tp, MII_BMCR, bmcr);
2647
2648                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2649                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2650                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2651
2652                         return err;
2653                 }
2654         } else {
2655                 u32 new_bmcr;
2656
2657                 bmcr &= ~BMCR_SPEED1000;
2658                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2659
2660                 if (tp->link_config.duplex == DUPLEX_FULL)
2661                         new_bmcr |= BMCR_FULLDPLX;
2662
2663                 if (new_bmcr != bmcr) {
2664                         /* BMCR_SPEED1000 is a reserved bit that needs
2665                          * to be set on write.
2666                          */
2667                         new_bmcr |= BMCR_SPEED1000;
2668
2669                         /* Force a linkdown */
2670                         if (netif_carrier_ok(tp->dev)) {
2671                                 u32 adv;
2672
2673                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2674                                 adv &= ~(ADVERTISE_1000XFULL |
2675                                          ADVERTISE_1000XHALF |
2676                                          ADVERTISE_SLCT);
2677                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2678                                 tg3_writephy(tp, MII_BMCR, bmcr |
2679                                                            BMCR_ANRESTART |
2680                                                            BMCR_ANENABLE);
2681                                 udelay(10);
2682                                 netif_carrier_off(tp->dev);
2683                         }
2684                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2685                         bmcr = new_bmcr;
2686                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2687                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2688                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2689                 }
2690         }
2691
2692         if (bmsr & BMSR_LSTATUS) {
2693                 current_speed = SPEED_1000;
2694                 current_link_up = 1;
2695                 if (bmcr & BMCR_FULLDPLX)
2696                         current_duplex = DUPLEX_FULL;
2697                 else
2698                         current_duplex = DUPLEX_HALF;
2699
2700                 if (bmcr & BMCR_ANENABLE) {
2701                         u32 local_adv, remote_adv, common;
2702
2703                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2704                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2705                         common = local_adv & remote_adv;
2706                         if (common & (ADVERTISE_1000XHALF |
2707                                       ADVERTISE_1000XFULL)) {
2708                                 if (common & ADVERTISE_1000XFULL)
2709                                         current_duplex = DUPLEX_FULL;
2710                                 else
2711                                         current_duplex = DUPLEX_HALF;
2712
2713                                 tg3_setup_flow_control(tp, local_adv,
2714                                                        remote_adv);
2715                         }
2716                         else
2717                                 current_link_up = 0;
2718                 }
2719         }
2720
2721         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2722         if (tp->link_config.active_duplex == DUPLEX_HALF)
2723                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2724
2725         tw32_f(MAC_MODE, tp->mac_mode);
2726         udelay(40);
2727
2728         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2729
2730         tp->link_config.active_speed = current_speed;
2731         tp->link_config.active_duplex = current_duplex;
2732
2733         if (current_link_up != netif_carrier_ok(tp->dev)) {
2734                 if (current_link_up)
2735                         netif_carrier_on(tp->dev);
2736                 else {
2737                         netif_carrier_off(tp->dev);
2738                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2739                 }
2740                 tg3_link_report(tp);
2741         }
2742         return err;
2743 }
2744
2745 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2746 {
2747         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2748                 /* Give autoneg time to complete. */
2749                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2750                 return;
2751         }
2752         if (!netif_carrier_ok(tp->dev) &&
2753             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2754                 u32 bmcr;
2755
2756                 tg3_readphy(tp, MII_BMCR, &bmcr);
2757                 if (bmcr & BMCR_ANENABLE) {
2758                         u32 phy1, phy2;
2759
2760                         /* Select shadow register 0x1f */
2761                         tg3_writephy(tp, 0x1c, 0x7c00);
2762                         tg3_readphy(tp, 0x1c, &phy1);
2763
2764                         /* Select expansion interrupt status register */
2765                         tg3_writephy(tp, 0x17, 0x0f01);
2766                         tg3_readphy(tp, 0x15, &phy2);
2767                         tg3_readphy(tp, 0x15, &phy2);
2768
2769                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2770                                 /* We have signal detect and not receiving
2771                                  * config code words, link is up by parallel
2772                                  * detection.
2773                                  */
2774
2775                                 bmcr &= ~BMCR_ANENABLE;
2776                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2777                                 tg3_writephy(tp, MII_BMCR, bmcr);
2778                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2779                         }
2780                 }
2781         }
2782         else if (netif_carrier_ok(tp->dev) &&
2783                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2784                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2785                 u32 phy2;
2786
2787                 /* Select expansion interrupt status register */
2788                 tg3_writephy(tp, 0x17, 0x0f01);
2789                 tg3_readphy(tp, 0x15, &phy2);
2790                 if (phy2 & 0x20) {
2791                         u32 bmcr;
2792
2793                         /* Config code words received, turn on autoneg. */
2794                         tg3_readphy(tp, MII_BMCR, &bmcr);
2795                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2796
2797                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2798
2799                 }
2800         }
2801 }
2802
2803 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2804 {
2805         int err;
2806
2807         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2808                 err = tg3_setup_fiber_phy(tp, force_reset);
2809         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2810                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2811         } else {
2812                 err = tg3_setup_copper_phy(tp, force_reset);
2813         }
2814
2815         if (tp->link_config.active_speed == SPEED_1000 &&
2816             tp->link_config.active_duplex == DUPLEX_HALF)
2817                 tw32(MAC_TX_LENGTHS,
2818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2819                       (6 << TX_LENGTHS_IPG_SHIFT) |
2820                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2821         else
2822                 tw32(MAC_TX_LENGTHS,
2823                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2824                       (6 << TX_LENGTHS_IPG_SHIFT) |
2825                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2826
2827         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2828                 if (netif_carrier_ok(tp->dev)) {
2829                         tw32(HOSTCC_STAT_COAL_TICKS,
2830                              tp->coal.stats_block_coalesce_usecs);
2831                 } else {
2832                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2833                 }
2834         }
2835
2836         return err;
2837 }
2838
2839 /* Tigon3 never reports partial packet sends.  So we do not
2840  * need special logic to handle SKBs that have not had all
2841  * of their frags sent yet, like SunGEM does.
2842  */
2843 static void tg3_tx(struct tg3 *tp)
2844 {
2845         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2846         u32 sw_idx = tp->tx_cons;
2847
2848         while (sw_idx != hw_idx) {
2849                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2850                 struct sk_buff *skb = ri->skb;
2851                 int i;
2852
2853                 if (unlikely(skb == NULL))
2854                         BUG();
2855
2856                 pci_unmap_single(tp->pdev,
2857                                  pci_unmap_addr(ri, mapping),
2858                                  skb_headlen(skb),
2859                                  PCI_DMA_TODEVICE);
2860
2861                 ri->skb = NULL;
2862
2863                 sw_idx = NEXT_TX(sw_idx);
2864
2865                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2866                         if (unlikely(sw_idx == hw_idx))
2867                                 BUG();
2868
2869                         ri = &tp->tx_buffers[sw_idx];
2870                         if (unlikely(ri->skb != NULL))
2871                                 BUG();
2872
2873                         pci_unmap_page(tp->pdev,
2874                                        pci_unmap_addr(ri, mapping),
2875                                        skb_shinfo(skb)->frags[i].size,
2876                                        PCI_DMA_TODEVICE);
2877
2878                         sw_idx = NEXT_TX(sw_idx);
2879                 }
2880
2881                 dev_kfree_skb(skb);
2882         }
2883
2884         tp->tx_cons = sw_idx;
2885
2886         if (unlikely(netif_queue_stopped(tp->dev))) {
2887                 spin_lock(&tp->tx_lock);
2888                 if (netif_queue_stopped(tp->dev) &&
2889                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2890                         netif_wake_queue(tp->dev);
2891                 spin_unlock(&tp->tx_lock);
2892         }
2893 }
2894
2895 /* Returns size of skb allocated or < 0 on error.
2896  *
2897  * We only need to fill in the address because the other members
2898  * of the RX descriptor are invariant, see tg3_init_rings.
2899  *
2900  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2901  * posting buffers we only dirty the first cache line of the RX
2902  * descriptor (containing the address).  Whereas for the RX status
2903  * buffers the cpu only reads the last cacheline of the RX descriptor
2904  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2905  */
2906 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2907                             int src_idx, u32 dest_idx_unmasked)
2908 {
2909         struct tg3_rx_buffer_desc *desc;
2910         struct ring_info *map, *src_map;
2911         struct sk_buff *skb;
2912         dma_addr_t mapping;
2913         int skb_size, dest_idx;
2914
2915         src_map = NULL;
2916         switch (opaque_key) {
2917         case RXD_OPAQUE_RING_STD:
2918                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2919                 desc = &tp->rx_std[dest_idx];
2920                 map = &tp->rx_std_buffers[dest_idx];
2921                 if (src_idx >= 0)
2922                         src_map = &tp->rx_std_buffers[src_idx];
2923                 skb_size = tp->rx_pkt_buf_sz;
2924                 break;
2925
2926         case RXD_OPAQUE_RING_JUMBO:
2927                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2928                 desc = &tp->rx_jumbo[dest_idx];
2929                 map = &tp->rx_jumbo_buffers[dest_idx];
2930                 if (src_idx >= 0)
2931                         src_map = &tp->rx_jumbo_buffers[src_idx];
2932                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2933                 break;
2934
2935         default:
2936                 return -EINVAL;
2937         };
2938
2939         /* Do not overwrite any of the map or rp information
2940          * until we are sure we can commit to a new buffer.
2941          *
2942          * Callers depend upon this behavior and assume that
2943          * we leave everything unchanged if we fail.
2944          */
2945         skb = dev_alloc_skb(skb_size);
2946         if (skb == NULL)
2947                 return -ENOMEM;
2948
2949         skb->dev = tp->dev;
2950         skb_reserve(skb, tp->rx_offset);
2951
2952         mapping = pci_map_single(tp->pdev, skb->data,
2953                                  skb_size - tp->rx_offset,
2954                                  PCI_DMA_FROMDEVICE);
2955
2956         map->skb = skb;
2957         pci_unmap_addr_set(map, mapping, mapping);
2958
2959         if (src_map != NULL)
2960                 src_map->skb = NULL;
2961
2962         desc->addr_hi = ((u64)mapping >> 32);
2963         desc->addr_lo = ((u64)mapping & 0xffffffff);
2964
2965         return skb_size;
2966 }
2967
2968 /* We only need to move over in the address because the other
2969  * members of the RX descriptor are invariant.  See notes above
2970  * tg3_alloc_rx_skb for full details.
2971  */
2972 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2973                            int src_idx, u32 dest_idx_unmasked)
2974 {
2975         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2976         struct ring_info *src_map, *dest_map;
2977         int dest_idx;
2978
2979         switch (opaque_key) {
2980         case RXD_OPAQUE_RING_STD:
2981                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2982                 dest_desc = &tp->rx_std[dest_idx];
2983                 dest_map = &tp->rx_std_buffers[dest_idx];
2984                 src_desc = &tp->rx_std[src_idx];
2985                 src_map = &tp->rx_std_buffers[src_idx];
2986                 break;
2987
2988         case RXD_OPAQUE_RING_JUMBO:
2989                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2990                 dest_desc = &tp->rx_jumbo[dest_idx];
2991                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2992                 src_desc = &tp->rx_jumbo[src_idx];
2993                 src_map = &tp->rx_jumbo_buffers[src_idx];
2994                 break;
2995
2996         default:
2997                 return;
2998         };
2999
3000         dest_map->skb = src_map->skb;
3001         pci_unmap_addr_set(dest_map, mapping,
3002                            pci_unmap_addr(src_map, mapping));
3003         dest_desc->addr_hi = src_desc->addr_hi;
3004         dest_desc->addr_lo = src_desc->addr_lo;
3005
3006         src_map->skb = NULL;
3007 }
3008
3009 #if TG3_VLAN_TAG_USED
3010 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3011 {
3012         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3013 }
3014 #endif
3015
3016 /* The RX ring scheme is composed of multiple rings which post fresh
3017  * buffers to the chip, and one special ring the chip uses to report
3018  * status back to the host.
3019  *
3020  * The special ring reports the status of received packets to the
3021  * host.  The chip does not write into the original descriptor the
3022  * RX buffer was obtained from.  The chip simply takes the original
3023  * descriptor as provided by the host, updates the status and length
3024  * field, then writes this into the next status ring entry.
3025  *
3026  * Each ring the host uses to post buffers to the chip is described
3027  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3028  * it is first placed into the on-chip ram.  When the packet's length
3029  * is known, it walks down the TG3_BDINFO entries to select the ring.
3030  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3031  * which is within the range of the new packet's length is chosen.
3032  *
3033  * The "separate ring for rx status" scheme may sound queer, but it makes
3034  * sense from a cache coherency perspective.  If only the host writes
3035  * to the buffer post rings, and only the chip writes to the rx status
3036  * rings, then cache lines never move beyond shared-modified state.
3037  * If both the host and chip were to write into the same ring, cache line
3038  * eviction could occur since both entities want it in an exclusive state.
3039  */
3040 static int tg3_rx(struct tg3 *tp, int budget)
3041 {
3042         u32 work_mask;
3043         u32 sw_idx = tp->rx_rcb_ptr;
3044         u16 hw_idx;
3045         int received;
3046
3047         hw_idx = tp->hw_status->idx[0].rx_producer;
3048         /*
3049          * We need to order the read of hw_idx and the read of
3050          * the opaque cookie.
3051          */
3052         rmb();
3053         work_mask = 0;
3054         received = 0;
3055         while (sw_idx != hw_idx && budget > 0) {
3056                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3057                 unsigned int len;
3058                 struct sk_buff *skb;
3059                 dma_addr_t dma_addr;
3060                 u32 opaque_key, desc_idx, *post_ptr;
3061
3062                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3063                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3064                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3065                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3066                                                   mapping);
3067                         skb = tp->rx_std_buffers[desc_idx].skb;
3068                         post_ptr = &tp->rx_std_ptr;
3069                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3070                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3071                                                   mapping);
3072                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3073                         post_ptr = &tp->rx_jumbo_ptr;
3074                 }
3075                 else {
3076                         goto next_pkt_nopost;
3077                 }
3078
3079                 work_mask |= opaque_key;
3080
3081                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3082                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3083                 drop_it:
3084                         tg3_recycle_rx(tp, opaque_key,
3085                                        desc_idx, *post_ptr);
3086                 drop_it_no_recycle:
3087                         /* Other statistics kept track of by card. */
3088                         tp->net_stats.rx_dropped++;
3089                         goto next_pkt;
3090                 }
3091
3092                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3093
3094                 if (len > RX_COPY_THRESHOLD 
3095                         && tp->rx_offset == 2
3096                         /* rx_offset != 2 iff this is a 5701 card running
3097                          * in PCI-X mode [see tg3_get_invariants()] */
3098                 ) {
3099                         int skb_size;
3100
3101                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3102                                                     desc_idx, *post_ptr);
3103                         if (skb_size < 0)
3104                                 goto drop_it;
3105
3106                         pci_unmap_single(tp->pdev, dma_addr,
3107                                          skb_size - tp->rx_offset,
3108                                          PCI_DMA_FROMDEVICE);
3109
3110                         skb_put(skb, len);
3111                 } else {
3112                         struct sk_buff *copy_skb;
3113
3114                         tg3_recycle_rx(tp, opaque_key,
3115                                        desc_idx, *post_ptr);
3116
3117                         copy_skb = dev_alloc_skb(len + 2);
3118                         if (copy_skb == NULL)
3119                                 goto drop_it_no_recycle;
3120
3121                         copy_skb->dev = tp->dev;
3122                         skb_reserve(copy_skb, 2);
3123                         skb_put(copy_skb, len);
3124                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3125                         memcpy(copy_skb->data, skb->data, len);
3126                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3127
3128                         /* We'll reuse the original ring buffer. */
3129                         skb = copy_skb;
3130                 }
3131
3132                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3133                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3134                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3135                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3136                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3137                 else
3138                         skb->ip_summed = CHECKSUM_NONE;
3139
3140                 skb->protocol = eth_type_trans(skb, tp->dev);
3141 #if TG3_VLAN_TAG_USED
3142                 if (tp->vlgrp != NULL &&
3143                     desc->type_flags & RXD_FLAG_VLAN) {
3144                         tg3_vlan_rx(tp, skb,
3145                                     desc->err_vlan & RXD_VLAN_MASK);
3146                 } else
3147 #endif
3148                         netif_receive_skb(skb);
3149
3150                 tp->dev->last_rx = jiffies;
3151                 received++;
3152                 budget--;
3153
3154 next_pkt:
3155                 (*post_ptr)++;
3156 next_pkt_nopost:
3157                 sw_idx++;
3158                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3159
3160                 /* Refresh hw_idx to see if there is new work */
3161                 if (sw_idx == hw_idx) {
3162                         hw_idx = tp->hw_status->idx[0].rx_producer;
3163                         rmb();
3164                 }
3165         }
3166
3167         /* ACK the status ring. */
3168         tp->rx_rcb_ptr = sw_idx;
3169         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3170
3171         /* Refill RX ring(s). */
3172         if (work_mask & RXD_OPAQUE_RING_STD) {
3173                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3174                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3175                              sw_idx);
3176         }
3177         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3178                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3179                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3180                              sw_idx);
3181         }
3182         mmiowb();
3183
3184         return received;
3185 }
3186
3187 static int tg3_poll(struct net_device *netdev, int *budget)
3188 {
3189         struct tg3 *tp = netdev_priv(netdev);
3190         struct tg3_hw_status *sblk = tp->hw_status;
3191         int done;
3192
3193         /* handle link change and other phy events */
3194         if (!(tp->tg3_flags &
3195               (TG3_FLAG_USE_LINKCHG_REG |
3196                TG3_FLAG_POLL_SERDES))) {
3197                 if (sblk->status & SD_STATUS_LINK_CHG) {
3198                         sblk->status = SD_STATUS_UPDATED |
3199                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3200                         spin_lock(&tp->lock);
3201                         tg3_setup_phy(tp, 0);
3202                         spin_unlock(&tp->lock);
3203                 }
3204         }
3205
3206         /* run TX completion thread */
3207         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3208                 tg3_tx(tp);
3209         }
3210
3211         /* run RX thread, within the bounds set by NAPI.
3212          * All RX "locking" is done by ensuring outside
3213          * code synchronizes with dev->poll()
3214          */
3215         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3216                 int orig_budget = *budget;
3217                 int work_done;
3218
3219                 if (orig_budget > netdev->quota)
3220                         orig_budget = netdev->quota;
3221
3222                 work_done = tg3_rx(tp, orig_budget);
3223
3224                 *budget -= work_done;
3225                 netdev->quota -= work_done;
3226         }
3227
3228         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3229                 tp->last_tag = sblk->status_tag;
3230                 rmb();
3231         } else
3232                 sblk->status &= ~SD_STATUS_UPDATED;
3233
3234         /* if no more work, tell net stack and NIC we're done */
3235         done = !tg3_has_work(tp);
3236         if (done) {
3237                 netif_rx_complete(netdev);
3238                 tg3_restart_ints(tp);
3239         }
3240
3241         return (done ? 0 : 1);
3242 }
3243
3244 static void tg3_irq_quiesce(struct tg3 *tp)
3245 {
3246         BUG_ON(tp->irq_sync);
3247
3248         tp->irq_sync = 1;
3249         smp_mb();
3250
3251         synchronize_irq(tp->pdev->irq);
3252 }
3253
3254 static inline int tg3_irq_sync(struct tg3 *tp)
3255 {
3256         return tp->irq_sync;
3257 }
3258
3259 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3260  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3261  * with as well.  Most of the time, this is not necessary except when
3262  * shutting down the device.
3263  */
3264 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3265 {
3266         if (irq_sync)
3267                 tg3_irq_quiesce(tp);
3268         spin_lock_bh(&tp->lock);
3269         spin_lock(&tp->tx_lock);
3270 }
3271
3272 static inline void tg3_full_unlock(struct tg3 *tp)
3273 {
3274         spin_unlock(&tp->tx_lock);
3275         spin_unlock_bh(&tp->lock);
3276 }
3277
3278 /* MSI ISR - No need to check for interrupt sharing and no need to
3279  * flush status block and interrupt mailbox. PCI ordering rules
3280  * guarantee that MSI will arrive after the status block.
3281  */
3282 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3283 {
3284         struct net_device *dev = dev_id;
3285         struct tg3 *tp = netdev_priv(dev);
3286
3287         prefetch(tp->hw_status);
3288         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3289         /*
3290          * Writing any value to intr-mbox-0 clears PCI INTA# and
3291          * chip-internal interrupt pending events.
3292          * Writing non-zero to intr-mbox-0 additional tells the
3293          * NIC to stop sending us irqs, engaging "in-intr-handler"
3294          * event coalescing.
3295          */
3296         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3297         if (likely(!tg3_irq_sync(tp)))
3298                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3299
3300         return IRQ_RETVAL(1);
3301 }
3302
3303 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3304 {
3305         struct net_device *dev = dev_id;
3306         struct tg3 *tp = netdev_priv(dev);
3307         struct tg3_hw_status *sblk = tp->hw_status;
3308         unsigned int handled = 1;
3309
3310         /* In INTx mode, it is possible for the interrupt to arrive at
3311          * the CPU before the status block posted prior to the interrupt.
3312          * Reading the PCI State register will confirm whether the
3313          * interrupt is ours and will flush the status block.
3314          */
3315         if ((sblk->status & SD_STATUS_UPDATED) ||
3316             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3317                 /*
3318                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3319                  * chip-internal interrupt pending events.
3320                  * Writing non-zero to intr-mbox-0 additional tells the
3321                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3322                  * event coalescing.
3323                  */
3324                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3325                              0x00000001);
3326                 if (tg3_irq_sync(tp))
3327                         goto out;
3328                 sblk->status &= ~SD_STATUS_UPDATED;
3329                 if (likely(tg3_has_work(tp))) {
3330                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3331                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3332                 } else {
3333                         /* No work, shared interrupt perhaps?  re-enable
3334                          * interrupts, and flush that PCI write
3335                          */
3336                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3337                                 0x00000000);
3338                 }
3339         } else {        /* shared interrupt */
3340                 handled = 0;
3341         }
3342 out:
3343         return IRQ_RETVAL(handled);
3344 }
3345
3346 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3347 {
3348         struct net_device *dev = dev_id;
3349         struct tg3 *tp = netdev_priv(dev);
3350         struct tg3_hw_status *sblk = tp->hw_status;
3351         unsigned int handled = 1;
3352
3353         /* In INTx mode, it is possible for the interrupt to arrive at
3354          * the CPU before the status block posted prior to the interrupt.
3355          * Reading the PCI State register will confirm whether the
3356          * interrupt is ours and will flush the status block.
3357          */
3358         if ((sblk->status_tag != tp->last_tag) ||
3359             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3360                 /*
3361                  * writing any value to intr-mbox-0 clears PCI INTA# and
3362                  * chip-internal interrupt pending events.
3363                  * writing non-zero to intr-mbox-0 additional tells the
3364                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3365                  * event coalescing.
3366                  */
3367                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3368                              0x00000001);
3369                 if (tg3_irq_sync(tp))
3370                         goto out;
3371                 if (netif_rx_schedule_prep(dev)) {
3372                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3373                         /* Update last_tag to mark that this status has been
3374                          * seen. Because interrupt may be shared, we may be
3375                          * racing with tg3_poll(), so only update last_tag
3376                          * if tg3_poll() is not scheduled.
3377                          */
3378                         tp->last_tag = sblk->status_tag;
3379                         __netif_rx_schedule(dev);
3380                 }
3381         } else {        /* shared interrupt */
3382                 handled = 0;
3383         }
3384 out:
3385         return IRQ_RETVAL(handled);
3386 }
3387
3388 /* ISR for interrupt test */
3389 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3390                 struct pt_regs *regs)
3391 {
3392         struct net_device *dev = dev_id;
3393         struct tg3 *tp = netdev_priv(dev);
3394         struct tg3_hw_status *sblk = tp->hw_status;
3395
3396         if ((sblk->status & SD_STATUS_UPDATED) ||
3397             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3398                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3399                              0x00000001);
3400                 return IRQ_RETVAL(1);
3401         }
3402         return IRQ_RETVAL(0);
3403 }
3404
3405 static int tg3_init_hw(struct tg3 *);
3406 static int tg3_halt(struct tg3 *, int, int);
3407
3408 #ifdef CONFIG_NET_POLL_CONTROLLER
3409 static void tg3_poll_controller(struct net_device *dev)
3410 {
3411         struct tg3 *tp = netdev_priv(dev);
3412
3413         tg3_interrupt(tp->pdev->irq, dev, NULL);
3414 }
3415 #endif
3416
3417 static void tg3_reset_task(void *_data)
3418 {
3419         struct tg3 *tp = _data;
3420         unsigned int restart_timer;
3421
3422         tg3_netif_stop(tp);
3423
3424         tg3_full_lock(tp, 1);
3425
3426         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3427         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3428
3429         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3430         tg3_init_hw(tp);
3431
3432         tg3_netif_start(tp);
3433
3434         tg3_full_unlock(tp);
3435
3436         if (restart_timer)
3437                 mod_timer(&tp->timer, jiffies + 1);
3438 }
3439
3440 static void tg3_tx_timeout(struct net_device *dev)
3441 {
3442         struct tg3 *tp = netdev_priv(dev);
3443
3444         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3445                dev->name);
3446
3447         schedule_work(&tp->reset_task);
3448 }
3449
3450 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3451 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3452 {
3453         u32 base = (u32) mapping & 0xffffffff;
3454
3455         return ((base > 0xffffdcc0) &&
3456                 (base + len + 8 < base));
3457 }
3458
3459 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3460
3461 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3462                                        u32 last_plus_one, u32 *start,
3463                                        u32 base_flags, u32 mss)
3464 {
3465         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3466         dma_addr_t new_addr = 0;
3467         u32 entry = *start;
3468         int i, ret = 0;
3469
3470         if (!new_skb) {
3471                 ret = -1;
3472         } else {
3473                 /* New SKB is guaranteed to be linear. */
3474                 entry = *start;
3475                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3476                                           PCI_DMA_TODEVICE);
3477                 /* Make sure new skb does not cross any 4G boundaries.
3478                  * Drop the packet if it does.
3479                  */
3480                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3481                         ret = -1;
3482                         dev_kfree_skb(new_skb);
3483                         new_skb = NULL;
3484                 } else {
3485                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3486                                     base_flags, 1 | (mss << 1));
3487                         *start = NEXT_TX(entry);
3488                 }
3489         }
3490
3491         /* Now clean up the sw ring entries. */
3492         i = 0;
3493         while (entry != last_plus_one) {
3494                 int len;
3495
3496                 if (i == 0)
3497                         len = skb_headlen(skb);
3498                 else
3499                         len = skb_shinfo(skb)->frags[i-1].size;
3500                 pci_unmap_single(tp->pdev,
3501                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3502                                  len, PCI_DMA_TODEVICE);
3503                 if (i == 0) {
3504                         tp->tx_buffers[entry].skb = new_skb;
3505                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3506                 } else {
3507                         tp->tx_buffers[entry].skb = NULL;
3508                 }
3509                 entry = NEXT_TX(entry);
3510                 i++;
3511         }
3512
3513         dev_kfree_skb(skb);
3514
3515         return ret;
3516 }
3517
3518 static void tg3_set_txd(struct tg3 *tp, int entry,
3519                         dma_addr_t mapping, int len, u32 flags,
3520                         u32 mss_and_is_end)
3521 {
3522         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3523         int is_end = (mss_and_is_end & 0x1);
3524         u32 mss = (mss_and_is_end >> 1);
3525         u32 vlan_tag = 0;
3526
3527         if (is_end)
3528                 flags |= TXD_FLAG_END;
3529         if (flags & TXD_FLAG_VLAN) {
3530                 vlan_tag = flags >> 16;
3531                 flags &= 0xffff;
3532         }
3533         vlan_tag |= (mss << TXD_MSS_SHIFT);
3534
3535         txd->addr_hi = ((u64) mapping >> 32);
3536         txd->addr_lo = ((u64) mapping & 0xffffffff);
3537         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3538         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3539 }
3540
3541 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3542 {
3543         struct tg3 *tp = netdev_priv(dev);
3544         dma_addr_t mapping;
3545         u32 len, entry, base_flags, mss;
3546         int would_hit_hwbug;
3547
3548         len = skb_headlen(skb);
3549
3550         /* No BH disabling for tx_lock here.  We are running in BH disabled
3551          * context and TX reclaim runs via tp->poll inside of a software
3552          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3553          * no IRQ context deadlocks to worry about either.  Rejoice!
3554          */
3555         if (!spin_trylock(&tp->tx_lock))
3556                 return NETDEV_TX_LOCKED; 
3557
3558         /* This is a hard error, log it. */
3559         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3560                 netif_stop_queue(dev);
3561                 spin_unlock(&tp->tx_lock);
3562                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3563                        dev->name);
3564                 return NETDEV_TX_BUSY;
3565         }
3566
3567         entry = tp->tx_prod;
3568         base_flags = 0;
3569         if (skb->ip_summed == CHECKSUM_HW)
3570                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3571 #if TG3_TSO_SUPPORT != 0
3572         mss = 0;
3573         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3574             (mss = skb_shinfo(skb)->tso_size) != 0) {
3575                 int tcp_opt_len, ip_tcp_len;
3576
3577                 if (skb_header_cloned(skb) &&
3578                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3579                         dev_kfree_skb(skb);
3580                         goto out_unlock;
3581                 }
3582
3583                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3584                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3585
3586                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3587                                TXD_FLAG_CPU_POST_DMA);
3588
3589                 skb->nh.iph->check = 0;
3590                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3591                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3592                         skb->h.th->check = 0;
3593                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3594                 }
3595                 else {
3596                         skb->h.th->check =
3597                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3598                                                    skb->nh.iph->daddr,
3599                                                    0, IPPROTO_TCP, 0);
3600                 }
3601
3602                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3603                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3604                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3605                                 int tsflags;
3606
3607                                 tsflags = ((skb->nh.iph->ihl - 5) +
3608                                            (tcp_opt_len >> 2));
3609                                 mss |= (tsflags << 11);
3610                         }
3611                 } else {
3612                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3613                                 int tsflags;
3614
3615                                 tsflags = ((skb->nh.iph->ihl - 5) +
3616                                            (tcp_opt_len >> 2));
3617                                 base_flags |= tsflags << 12;
3618                         }
3619                 }
3620         }
3621 #else
3622         mss = 0;
3623 #endif
3624 #if TG3_VLAN_TAG_USED
3625         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3626                 base_flags |= (TXD_FLAG_VLAN |
3627                                (vlan_tx_tag_get(skb) << 16));
3628 #endif
3629
3630         /* Queue skb data, a.k.a. the main skb fragment. */
3631         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3632
3633         tp->tx_buffers[entry].skb = skb;
3634         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3635
3636         would_hit_hwbug = 0;
3637
3638         if (tg3_4g_overflow_test(mapping, len))
3639                 would_hit_hwbug = 1;
3640
3641         tg3_set_txd(tp, entry, mapping, len, base_flags,
3642                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3643
3644         entry = NEXT_TX(entry);
3645
3646         /* Now loop through additional data fragments, and queue them. */
3647         if (skb_shinfo(skb)->nr_frags > 0) {
3648                 unsigned int i, last;
3649
3650                 last = skb_shinfo(skb)->nr_frags - 1;
3651                 for (i = 0; i <= last; i++) {
3652                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3653
3654                         len = frag->size;
3655                         mapping = pci_map_page(tp->pdev,
3656                                                frag->page,
3657                                                frag->page_offset,
3658                                                len, PCI_DMA_TODEVICE);
3659
3660                         tp->tx_buffers[entry].skb = NULL;
3661                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3662
3663                         if (tg3_4g_overflow_test(mapping, len))
3664                                 would_hit_hwbug = 1;
3665
3666                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3667                                 tg3_set_txd(tp, entry, mapping, len,
3668                                             base_flags, (i == last)|(mss << 1));
3669                         else
3670                                 tg3_set_txd(tp, entry, mapping, len,
3671                                             base_flags, (i == last));
3672
3673                         entry = NEXT_TX(entry);
3674                 }
3675         }
3676
3677         if (would_hit_hwbug) {
3678                 u32 last_plus_one = entry;
3679                 u32 start;
3680
3681                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3682                 start &= (TG3_TX_RING_SIZE - 1);
3683
3684                 /* If the workaround fails due to memory/mapping
3685                  * failure, silently drop this packet.
3686                  */
3687                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3688                                                 &start, base_flags, mss))
3689                         goto out_unlock;
3690
3691                 entry = start;
3692         }
3693
3694         /* Packets are ready, update Tx producer idx local and on card. */
3695         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3696
3697         tp->tx_prod = entry;
3698         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3699                 netif_stop_queue(dev);
3700                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3701                         netif_wake_queue(tp->dev);
3702         }
3703
3704 out_unlock:
3705         mmiowb();
3706         spin_unlock(&tp->tx_lock);
3707
3708         dev->trans_start = jiffies;
3709
3710         return NETDEV_TX_OK;
3711 }
3712
3713 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3714                                int new_mtu)
3715 {
3716         dev->mtu = new_mtu;
3717
3718         if (new_mtu > ETH_DATA_LEN) {
3719                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3720                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3721                         ethtool_op_set_tso(dev, 0);
3722                 }
3723                 else
3724                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3725         } else {
3726                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3727                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3728                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3729         }
3730 }
3731
3732 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3733 {
3734         struct tg3 *tp = netdev_priv(dev);
3735
3736         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3737                 return -EINVAL;
3738
3739         if (!netif_running(dev)) {
3740                 /* We'll just catch it later when the
3741                  * device is up'd.
3742                  */
3743                 tg3_set_mtu(dev, tp, new_mtu);
3744                 return 0;
3745         }
3746
3747         tg3_netif_stop(tp);
3748
3749         tg3_full_lock(tp, 1);
3750
3751         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3752
3753         tg3_set_mtu(dev, tp, new_mtu);
3754
3755         tg3_init_hw(tp);
3756
3757         tg3_netif_start(tp);
3758
3759         tg3_full_unlock(tp);
3760
3761         return 0;
3762 }
3763
3764 /* Free up pending packets in all rx/tx rings.
3765  *
3766  * The chip has been shut down and the driver detached from
3767  * the networking, so no interrupts or new tx packets will
3768  * end up in the driver.  tp->{tx,}lock is not held and we are not
3769  * in an interrupt context and thus may sleep.
3770  */
3771 static void tg3_free_rings(struct tg3 *tp)
3772 {
3773         struct ring_info *rxp;
3774         int i;
3775
3776         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3777                 rxp = &tp->rx_std_buffers[i];
3778
3779                 if (rxp->skb == NULL)
3780                         continue;
3781                 pci_unmap_single(tp->pdev,
3782                                  pci_unmap_addr(rxp, mapping),
3783                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3784                                  PCI_DMA_FROMDEVICE);
3785                 dev_kfree_skb_any(rxp->skb);
3786                 rxp->skb = NULL;
3787         }
3788
3789         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3790                 rxp = &tp->rx_jumbo_buffers[i];
3791
3792                 if (rxp->skb == NULL)
3793                         continue;
3794                 pci_unmap_single(tp->pdev,
3795                                  pci_unmap_addr(rxp, mapping),
3796                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3797                                  PCI_DMA_FROMDEVICE);
3798                 dev_kfree_skb_any(rxp->skb);
3799                 rxp->skb = NULL;
3800         }
3801
3802         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3803                 struct tx_ring_info *txp;
3804                 struct sk_buff *skb;
3805                 int j;
3806
3807                 txp = &tp->tx_buffers[i];
3808                 skb = txp->skb;
3809
3810                 if (skb == NULL) {
3811                         i++;
3812                         continue;
3813                 }
3814
3815                 pci_unmap_single(tp->pdev,
3816                                  pci_unmap_addr(txp, mapping),
3817                                  skb_headlen(skb),
3818                                  PCI_DMA_TODEVICE);
3819                 txp->skb = NULL;
3820
3821                 i++;
3822
3823                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3824                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3825                         pci_unmap_page(tp->pdev,
3826                                        pci_unmap_addr(txp, mapping),
3827                                        skb_shinfo(skb)->frags[j].size,
3828                                        PCI_DMA_TODEVICE);
3829                         i++;
3830                 }
3831
3832                 dev_kfree_skb_any(skb);
3833         }
3834 }
3835
3836 /* Initialize tx/rx rings for packet processing.
3837  *
3838  * The chip has been shut down and the driver detached from
3839  * the networking, so no interrupts or new tx packets will
3840  * end up in the driver.  tp->{tx,}lock are held and thus
3841  * we may not sleep.
3842  */
3843 static void tg3_init_rings(struct tg3 *tp)
3844 {
3845         u32 i;
3846
3847         /* Free up all the SKBs. */
3848         tg3_free_rings(tp);
3849
3850         /* Zero out all descriptors. */
3851         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3852         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3853         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3854         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3855
3856         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3857         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3858             (tp->dev->mtu > ETH_DATA_LEN))
3859                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3860
3861         /* Initialize invariants of the rings, we only set this
3862          * stuff once.  This works because the card does not
3863          * write into the rx buffer posting rings.
3864          */
3865         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3866                 struct tg3_rx_buffer_desc *rxd;
3867
3868                 rxd = &tp->rx_std[i];
3869                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3870                         << RXD_LEN_SHIFT;
3871                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3872                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3873                                (i << RXD_OPAQUE_INDEX_SHIFT));
3874         }
3875
3876         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3877                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3878                         struct tg3_rx_buffer_desc *rxd;
3879
3880                         rxd = &tp->rx_jumbo[i];
3881                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3882                                 << RXD_LEN_SHIFT;
3883                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3884                                 RXD_FLAG_JUMBO;
3885                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3886                                (i << RXD_OPAQUE_INDEX_SHIFT));
3887                 }
3888         }
3889
3890         /* Now allocate fresh SKBs for each rx ring. */
3891         for (i = 0; i < tp->rx_pending; i++) {
3892                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3893                                      -1, i) < 0)
3894                         break;
3895         }
3896
3897         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3898                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3899                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3900                                              -1, i) < 0)
3901                                 break;
3902                 }
3903         }
3904 }
3905
3906 /*
3907  * Must not be invoked with interrupt sources disabled and
3908  * the hardware shutdown down.
3909  */
3910 static void tg3_free_consistent(struct tg3 *tp)
3911 {
3912         if (tp->rx_std_buffers) {
3913                 kfree(tp->rx_std_buffers);
3914                 tp->rx_std_buffers = NULL;
3915         }
3916         if (tp->rx_std) {
3917                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3918                                     tp->rx_std, tp->rx_std_mapping);
3919                 tp->rx_std = NULL;
3920         }
3921         if (tp->rx_jumbo) {
3922                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3923                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3924                 tp->rx_jumbo = NULL;
3925         }
3926         if (tp->rx_rcb) {
3927                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3928                                     tp->rx_rcb, tp->rx_rcb_mapping);
3929                 tp->rx_rcb = NULL;
3930         }
3931         if (tp->tx_ring) {
3932                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3933                         tp->tx_ring, tp->tx_desc_mapping);
3934                 tp->tx_ring = NULL;
3935         }
3936         if (tp->hw_status) {
3937                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3938                                     tp->hw_status, tp->status_mapping);
3939                 tp->hw_status = NULL;
3940         }
3941         if (tp->hw_stats) {
3942                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3943                                     tp->hw_stats, tp->stats_mapping);
3944                 tp->hw_stats = NULL;
3945         }
3946 }
3947
3948 /*
3949  * Must not be invoked with interrupt sources disabled and
3950  * the hardware shutdown down.  Can sleep.
3951  */
3952 static int tg3_alloc_consistent(struct tg3 *tp)
3953 {
3954         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3955                                       (TG3_RX_RING_SIZE +
3956                                        TG3_RX_JUMBO_RING_SIZE)) +
3957                                      (sizeof(struct tx_ring_info) *
3958                                       TG3_TX_RING_SIZE),
3959                                      GFP_KERNEL);
3960         if (!tp->rx_std_buffers)
3961                 return -ENOMEM;
3962
3963         memset(tp->rx_std_buffers, 0,
3964                (sizeof(struct ring_info) *
3965                 (TG3_RX_RING_SIZE +
3966                  TG3_RX_JUMBO_RING_SIZE)) +
3967                (sizeof(struct tx_ring_info) *
3968                 TG3_TX_RING_SIZE));
3969
3970         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3971         tp->tx_buffers = (struct tx_ring_info *)
3972                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3973
3974         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3975                                           &tp->rx_std_mapping);
3976         if (!tp->rx_std)
3977                 goto err_out;
3978
3979         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3980                                             &tp->rx_jumbo_mapping);
3981
3982         if (!tp->rx_jumbo)
3983                 goto err_out;
3984
3985         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3986                                           &tp->rx_rcb_mapping);
3987         if (!tp->rx_rcb)
3988                 goto err_out;
3989
3990         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3991                                            &tp->tx_desc_mapping);
3992         if (!tp->tx_ring)
3993                 goto err_out;
3994
3995         tp->hw_status = pci_alloc_consistent(tp->pdev,
3996                                              TG3_HW_STATUS_SIZE,
3997                                              &tp->status_mapping);
3998         if (!tp->hw_status)
3999                 goto err_out;
4000
4001         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4002                                             sizeof(struct tg3_hw_stats),
4003                                             &tp->stats_mapping);
4004         if (!tp->hw_stats)
4005                 goto err_out;
4006
4007         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4008         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4009
4010         return 0;
4011
4012 err_out:
4013         tg3_free_consistent(tp);
4014         return -ENOMEM;
4015 }
4016
4017 #define MAX_WAIT_CNT 1000
4018
4019 /* To stop a block, clear the enable bit and poll till it
4020  * clears.  tp->lock is held.
4021  */
4022 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4023 {
4024         unsigned int i;
4025         u32 val;
4026
4027         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4028                 switch (ofs) {
4029                 case RCVLSC_MODE:
4030                 case DMAC_MODE:
4031                 case MBFREE_MODE:
4032                 case BUFMGR_MODE:
4033                 case MEMARB_MODE:
4034                         /* We can't enable/disable these bits of the
4035                          * 5705/5750, just say success.
4036                          */
4037                         return 0;
4038
4039                 default:
4040                         break;
4041                 };
4042         }
4043
4044         val = tr32(ofs);
4045         val &= ~enable_bit;
4046         tw32_f(ofs, val);
4047
4048         for (i = 0; i < MAX_WAIT_CNT; i++) {
4049                 udelay(100);
4050                 val = tr32(ofs);
4051                 if ((val & enable_bit) == 0)
4052                         break;
4053         }
4054
4055         if (i == MAX_WAIT_CNT && !silent) {
4056                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4057                        "ofs=%lx enable_bit=%x\n",
4058                        ofs, enable_bit);
4059                 return -ENODEV;
4060         }
4061
4062         return 0;
4063 }
4064
4065 /* tp->lock is held. */
4066 static int tg3_abort_hw(struct tg3 *tp, int silent)
4067 {
4068         int i, err;
4069
4070         tg3_disable_ints(tp);
4071
4072         tp->rx_mode &= ~RX_MODE_ENABLE;
4073         tw32_f(MAC_RX_MODE, tp->rx_mode);
4074         udelay(10);
4075
4076         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4077         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4078         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4079         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4080         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4081         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4082
4083         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4084         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4085         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4086         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4087         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4088         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4089         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4090
4091         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4092         tw32_f(MAC_MODE, tp->mac_mode);
4093         udelay(40);
4094
4095         tp->tx_mode &= ~TX_MODE_ENABLE;
4096         tw32_f(MAC_TX_MODE, tp->tx_mode);
4097
4098         for (i = 0; i < MAX_WAIT_CNT; i++) {
4099                 udelay(100);
4100                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4101                         break;
4102         }
4103         if (i >= MAX_WAIT_CNT) {
4104                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4105                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4106                        tp->dev->name, tr32(MAC_TX_MODE));
4107                 err |= -ENODEV;
4108         }
4109
4110         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4111         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4112         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4113
4114         tw32(FTQ_RESET, 0xffffffff);
4115         tw32(FTQ_RESET, 0x00000000);
4116
4117         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4118         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4119
4120         if (tp->hw_status)
4121                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4122         if (tp->hw_stats)
4123                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4124
4125         return err;
4126 }
4127
4128 /* tp->lock is held. */
4129 static int tg3_nvram_lock(struct tg3 *tp)
4130 {
4131         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4132                 int i;
4133
4134                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4135                 for (i = 0; i < 8000; i++) {
4136                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4137                                 break;
4138                         udelay(20);
4139                 }
4140                 if (i == 8000)
4141                         return -ENODEV;
4142         }
4143         return 0;
4144 }
4145
4146 /* tp->lock is held. */
4147 static void tg3_nvram_unlock(struct tg3 *tp)
4148 {
4149         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4150                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4151 }
4152
4153 /* tp->lock is held. */
4154 static void tg3_enable_nvram_access(struct tg3 *tp)
4155 {
4156         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4157             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4158                 u32 nvaccess = tr32(NVRAM_ACCESS);
4159
4160                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4161         }
4162 }
4163
4164 /* tp->lock is held. */
4165 static void tg3_disable_nvram_access(struct tg3 *tp)
4166 {
4167         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4168             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4169                 u32 nvaccess = tr32(NVRAM_ACCESS);
4170
4171                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4172         }
4173 }
4174
4175 /* tp->lock is held. */
4176 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4177 {
4178         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4179                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4180                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4181
4182         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4183                 switch (kind) {
4184                 case RESET_KIND_INIT:
4185                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4186                                       DRV_STATE_START);
4187                         break;
4188
4189                 case RESET_KIND_SHUTDOWN:
4190                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4191                                       DRV_STATE_UNLOAD);
4192                         break;
4193
4194                 case RESET_KIND_SUSPEND:
4195                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4196                                       DRV_STATE_SUSPEND);
4197                         break;
4198
4199                 default:
4200                         break;
4201                 };
4202         }
4203 }
4204
4205 /* tp->lock is held. */
4206 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4207 {
4208         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4209                 switch (kind) {
4210                 case RESET_KIND_INIT:
4211                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4212                                       DRV_STATE_START_DONE);
4213                         break;
4214
4215                 case RESET_KIND_SHUTDOWN:
4216                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4217                                       DRV_STATE_UNLOAD_DONE);
4218                         break;
4219
4220                 default:
4221                         break;
4222                 };
4223         }
4224 }
4225
4226 /* tp->lock is held. */
4227 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4228 {
4229         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4230                 switch (kind) {
4231                 case RESET_KIND_INIT:
4232                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4233                                       DRV_STATE_START);
4234                         break;
4235
4236                 case RESET_KIND_SHUTDOWN:
4237                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4238                                       DRV_STATE_UNLOAD);
4239                         break;
4240
4241                 case RESET_KIND_SUSPEND:
4242                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4243                                       DRV_STATE_SUSPEND);
4244                         break;
4245
4246                 default:
4247                         break;
4248                 };
4249         }
4250 }
4251
4252 static void tg3_stop_fw(struct tg3 *);
4253
4254 /* tp->lock is held. */
4255 static int tg3_chip_reset(struct tg3 *tp)
4256 {
4257         u32 val;
4258         void (*write_op)(struct tg3 *, u32, u32);
4259         int i;
4260
4261         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4262                 tg3_nvram_lock(tp);
4263
4264         /*
4265          * We must avoid the readl() that normally takes place.
4266          * It locks machines, causes machine checks, and other
4267          * fun things.  So, temporarily disable the 5701
4268          * hardware workaround, while we do the reset.
4269          */
4270         write_op = tp->write32;
4271         if (write_op == tg3_write_flush_reg32)
4272                 tp->write32 = tg3_write32;
4273
4274         /* do the reset */
4275         val = GRC_MISC_CFG_CORECLK_RESET;
4276
4277         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4278                 if (tr32(0x7e2c) == 0x60) {
4279                         tw32(0x7e2c, 0x20);
4280                 }
4281                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4282                         tw32(GRC_MISC_CFG, (1 << 29));
4283                         val |= (1 << 29);
4284                 }
4285         }
4286
4287         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4288                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4289         tw32(GRC_MISC_CFG, val);
4290
4291         /* restore 5701 hardware bug workaround write method */
4292         tp->write32 = write_op;
4293
4294         /* Unfortunately, we have to delay before the PCI read back.
4295          * Some 575X chips even will not respond to a PCI cfg access
4296          * when the reset command is given to the chip.
4297          *
4298          * How do these hardware designers expect things to work
4299          * properly if the PCI write is posted for a long period
4300          * of time?  It is always necessary to have some method by
4301          * which a register read back can occur to push the write
4302          * out which does the reset.
4303          *
4304          * For most tg3 variants the trick below was working.
4305          * Ho hum...
4306          */
4307         udelay(120);
4308
4309         /* Flush PCI posted writes.  The normal MMIO registers
4310          * are inaccessible at this time so this is the only
4311          * way to make this reliably (actually, this is no longer
4312          * the case, see above).  I tried to use indirect
4313          * register read/write but this upset some 5701 variants.
4314          */
4315         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4316
4317         udelay(120);
4318
4319         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4320                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4321                         int i;
4322                         u32 cfg_val;
4323
4324                         /* Wait for link training to complete.  */
4325                         for (i = 0; i < 5000; i++)
4326                                 udelay(100);
4327
4328                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4329                         pci_write_config_dword(tp->pdev, 0xc4,
4330                                                cfg_val | (1 << 15));
4331                 }
4332                 /* Set PCIE max payload size and clear error status.  */
4333                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4334         }
4335
4336         /* Re-enable indirect register accesses. */
4337         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4338                                tp->misc_host_ctrl);
4339
4340         /* Set MAX PCI retry to zero. */
4341         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4342         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4343             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4344                 val |= PCISTATE_RETRY_SAME_DMA;
4345         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4346
4347         pci_restore_state(tp->pdev);
4348
4349         /* Make sure PCI-X relaxed ordering bit is clear. */
4350         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4351         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4352         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4353
4354         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4355                 u32 val;
4356
4357                 /* Chip reset on 5780 will reset MSI enable bit,
4358                  * so need to restore it.
4359                  */
4360                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4361                         u16 ctrl;
4362
4363                         pci_read_config_word(tp->pdev,
4364                                              tp->msi_cap + PCI_MSI_FLAGS,
4365                                              &ctrl);
4366                         pci_write_config_word(tp->pdev,
4367                                               tp->msi_cap + PCI_MSI_FLAGS,
4368                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4369                         val = tr32(MSGINT_MODE);
4370                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4371                 }
4372
4373                 val = tr32(MEMARB_MODE);
4374                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4375
4376         } else
4377                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4378
4379         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4380                 tg3_stop_fw(tp);
4381                 tw32(0x5000, 0x400);
4382         }
4383
4384         tw32(GRC_MODE, tp->grc_mode);
4385
4386         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4387                 u32 val = tr32(0xc4);
4388
4389                 tw32(0xc4, val | (1 << 15));
4390         }
4391
4392         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4394                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4395                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4396                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4397                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4398         }
4399
4400         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4401                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4402                 tw32_f(MAC_MODE, tp->mac_mode);
4403         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4404                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4405                 tw32_f(MAC_MODE, tp->mac_mode);
4406         } else
4407                 tw32_f(MAC_MODE, 0);
4408         udelay(40);
4409
4410         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4411                 /* Wait for firmware initialization to complete. */
4412                 for (i = 0; i < 100000; i++) {
4413                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4414                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4415                                 break;
4416                         udelay(10);
4417                 }
4418                 if (i >= 100000) {
4419                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4420                                "firmware will not restart magic=%08x\n",
4421                                tp->dev->name, val);
4422                         return -ENODEV;
4423                 }
4424         }
4425
4426         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4427             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4428                 u32 val = tr32(0x7c00);
4429
4430                 tw32(0x7c00, val | (1 << 25));
4431         }
4432
4433         /* Reprobe ASF enable state.  */
4434         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4435         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4436         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4437         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4438                 u32 nic_cfg;
4439
4440                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4441                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4442                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4443                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4444                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4445                 }
4446         }
4447
4448         return 0;
4449 }
4450
4451 /* tp->lock is held. */
4452 static void tg3_stop_fw(struct tg3 *tp)
4453 {
4454         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4455                 u32 val;
4456                 int i;
4457
4458                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4459                 val = tr32(GRC_RX_CPU_EVENT);
4460                 val |= (1 << 14);
4461                 tw32(GRC_RX_CPU_EVENT, val);
4462
4463                 /* Wait for RX cpu to ACK the event.  */
4464                 for (i = 0; i < 100; i++) {
4465                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4466                                 break;
4467                         udelay(1);
4468                 }
4469         }
4470 }
4471
4472 /* tp->lock is held. */
4473 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4474 {
4475         int err;
4476
4477         tg3_stop_fw(tp);
4478
4479         tg3_write_sig_pre_reset(tp, kind);
4480
4481         tg3_abort_hw(tp, silent);
4482         err = tg3_chip_reset(tp);
4483
4484         tg3_write_sig_legacy(tp, kind);
4485         tg3_write_sig_post_reset(tp, kind);
4486
4487         if (err)
4488                 return err;
4489
4490         return 0;
4491 }
4492
4493 #define TG3_FW_RELEASE_MAJOR    0x0
4494 #define TG3_FW_RELASE_MINOR     0x0
4495 #define TG3_FW_RELEASE_FIX      0x0
4496 #define TG3_FW_START_ADDR       0x08000000
4497 #define TG3_FW_TEXT_ADDR        0x08000000
4498 #define TG3_FW_TEXT_LEN         0x9c0
4499 #define TG3_FW_RODATA_ADDR      0x080009c0
4500 #define TG3_FW_RODATA_LEN       0x60
4501 #define TG3_FW_DATA_ADDR        0x08000a40
4502 #define TG3_FW_DATA_LEN         0x20
4503 #define TG3_FW_SBSS_ADDR        0x08000a60
4504 #define TG3_FW_SBSS_LEN         0xc
4505 #define TG3_FW_BSS_ADDR         0x08000a70
4506 #define TG3_FW_BSS_LEN          0x10
4507
4508 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4509         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4510         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4511         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4512         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4513         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4514         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4515         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4516         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4517         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4518         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4519         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4520         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4521         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4522         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4523         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4524         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4525         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4526         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4527         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4528         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4529         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4530         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4531         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4532         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4533         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4534         0, 0, 0, 0, 0, 0,
4535         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4536         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4537         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4538         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4539         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4540         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4541         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4542         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4543         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4544         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4545         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4546         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4547         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4548         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4549         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4550         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4551         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4552         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4553         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4554         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4555         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4556         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4557         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4558         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4559         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4560         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4561         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4562         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4563         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4564         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4565         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4566         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4567         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4568         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4569         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4570         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4571         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4572         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4573         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4574         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4575         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4576         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4577         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4578         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4579         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4580         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4581         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4582         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4583         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4584         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4585         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4586         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4587         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4588         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4589         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4590         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4591         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4592         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4593         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4594         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4595         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4596         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4597         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4598         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4599         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4600 };
4601
4602 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4603         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4604         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4605         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4606         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4607         0x00000000
4608 };
4609
4610 #if 0 /* All zeros, don't eat up space with it. */
4611 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4612         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4613         0x00000000, 0x00000000, 0x00000000, 0x00000000
4614 };
4615 #endif
4616
4617 #define RX_CPU_SCRATCH_BASE     0x30000
4618 #define RX_CPU_SCRATCH_SIZE     0x04000
4619 #define TX_CPU_SCRATCH_BASE     0x34000
4620 #define TX_CPU_SCRATCH_SIZE     0x04000
4621
4622 /* tp->lock is held. */
4623 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4624 {
4625         int i;
4626
4627         if (offset == TX_CPU_BASE &&
4628             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4629                 BUG();
4630
4631         if (offset == RX_CPU_BASE) {
4632                 for (i = 0; i < 10000; i++) {
4633                         tw32(offset + CPU_STATE, 0xffffffff);
4634                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4635                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4636                                 break;
4637                 }
4638
4639                 tw32(offset + CPU_STATE, 0xffffffff);
4640                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4641                 udelay(10);
4642         } else {
4643                 for (i = 0; i < 10000; i++) {
4644                         tw32(offset + CPU_STATE, 0xffffffff);
4645                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4646                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4647                                 break;
4648                 }
4649         }
4650
4651         if (i >= 10000) {
4652                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4653                        "and %s CPU\n",
4654                        tp->dev->name,
4655                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4656                 return -ENODEV;
4657         }
4658         return 0;
4659 }
4660
4661 struct fw_info {
4662         unsigned int text_base;
4663         unsigned int text_len;
4664         u32 *text_data;
4665         unsigned int rodata_base;
4666         unsigned int rodata_len;
4667         u32 *rodata_data;
4668         unsigned int data_base;
4669         unsigned int data_len;
4670         u32 *data_data;
4671 };
4672
4673 /* tp->lock is held. */
4674 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4675                                  int cpu_scratch_size, struct fw_info *info)
4676 {
4677         int err, i;
4678         void (*write_op)(struct tg3 *, u32, u32);
4679
4680         if (cpu_base == TX_CPU_BASE &&
4681             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4682                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4683                        "TX cpu firmware on %s which is 5705.\n",
4684                        tp->dev->name);
4685                 return -EINVAL;
4686         }
4687
4688         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4689                 write_op = tg3_write_mem;
4690         else
4691                 write_op = tg3_write_indirect_reg32;
4692
4693         /* It is possible that bootcode is still loading at this point.
4694          * Get the nvram lock first before halting the cpu.
4695          */
4696         tg3_nvram_lock(tp);
4697         err = tg3_halt_cpu(tp, cpu_base);
4698         tg3_nvram_unlock(tp);
4699         if (err)
4700                 goto out;
4701
4702         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4703                 write_op(tp, cpu_scratch_base + i, 0);
4704         tw32(cpu_base + CPU_STATE, 0xffffffff);
4705         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4706         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4707                 write_op(tp, (cpu_scratch_base +
4708                               (info->text_base & 0xffff) +
4709                               (i * sizeof(u32))),
4710                          (info->text_data ?
4711                           info->text_data[i] : 0));
4712         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4713                 write_op(tp, (cpu_scratch_base +
4714                               (info->rodata_base & 0xffff) +
4715                               (i * sizeof(u32))),
4716                          (info->rodata_data ?
4717                           info->rodata_data[i] : 0));
4718         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4719                 write_op(tp, (cpu_scratch_base +
4720                               (info->data_base & 0xffff) +
4721                               (i * sizeof(u32))),
4722                          (info->data_data ?
4723                           info->data_data[i] : 0));
4724
4725         err = 0;
4726
4727 out:
4728         return err;
4729 }
4730
4731 /* tp->lock is held. */
4732 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4733 {
4734         struct fw_info info;
4735         int err, i;
4736
4737         info.text_base = TG3_FW_TEXT_ADDR;
4738         info.text_len = TG3_FW_TEXT_LEN;
4739         info.text_data = &tg3FwText[0];
4740         info.rodata_base = TG3_FW_RODATA_ADDR;
4741         info.rodata_len = TG3_FW_RODATA_LEN;
4742         info.rodata_data = &tg3FwRodata[0];
4743         info.data_base = TG3_FW_DATA_ADDR;
4744         info.data_len = TG3_FW_DATA_LEN;
4745         info.data_data = NULL;
4746
4747         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4748                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4749                                     &info);
4750         if (err)
4751                 return err;
4752
4753         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4754                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4755                                     &info);
4756         if (err)
4757                 return err;
4758
4759         /* Now startup only the RX cpu. */
4760         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4761         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4762
4763         for (i = 0; i < 5; i++) {
4764                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4765                         break;
4766                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4767                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4768                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4769                 udelay(1000);
4770         }
4771         if (i >= 5) {
4772                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4773                        "to set RX CPU PC, is %08x should be %08x\n",
4774                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4775                        TG3_FW_TEXT_ADDR);
4776                 return -ENODEV;
4777         }
4778         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4779         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4780
4781         return 0;
4782 }
4783
4784 #if TG3_TSO_SUPPORT != 0
4785
4786 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4787 #define TG3_TSO_FW_RELASE_MINOR         0x6
4788 #define TG3_TSO_FW_RELEASE_FIX          0x0
4789 #define TG3_TSO_FW_START_ADDR           0x08000000
4790 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4791 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4792 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4793 #define TG3_TSO_FW_RODATA_LEN           0x60
4794 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4795 #define TG3_TSO_FW_DATA_LEN             0x30
4796 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4797 #define TG3_TSO_FW_SBSS_LEN             0x2c
4798 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4799 #define TG3_TSO_FW_BSS_LEN              0x894
4800
4801 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4802         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4803         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4804         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4805         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4806         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4807         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4808         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4809         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4810         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4811         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4812         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4813         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4814         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4815         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4816         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4817         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4818         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4819         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4820         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4821         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4822         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4823         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4824         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4825         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4826         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4827         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4828         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4829         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4830         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4831         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4832         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4833         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4834         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4835         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4836         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4837         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4838         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4839         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4840         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4841         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4842         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4843         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4844         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4845         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4846         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4847         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4848         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4849         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4850         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4851         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4852         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4853         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4854         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4855         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4856         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4857         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4858         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4859         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4860         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4861         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4862         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4863         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4864         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4865         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4866         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4867         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4868         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4869         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4870         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4871         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4872         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4873         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4874         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4875         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4876         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4877         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4878         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4879         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4880         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4881         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4882         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4883         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4884         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4885         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4886         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4887         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4888         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4889         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4890         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4891         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4892         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4893         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4894         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4895         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4896         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4897         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4898         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4899         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4900         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4901         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4902         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4903         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4904         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4905         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4906         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4907         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4908         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4909         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4910         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4911         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4912         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4913         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4914         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4915         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4916         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4917         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4918         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4919         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4920         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4921         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4922         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4923         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4924         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4925         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4926         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4927         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4928         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4929         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4930         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4931         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4932         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4933         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4934         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4935         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4936         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4937         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4938         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4939         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4940         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4941         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4942         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4943         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4944         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4945         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4946         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4947         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4948         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4949         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4950         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4951         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4952         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4953         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4954         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4955         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4956         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4957         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4958         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4959         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4960         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4961         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4962         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4963         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4964         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4965         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4966         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4967         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4968         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4969         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4970         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4971         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4972         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4973         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4974         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4975         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4976         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4977         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4978         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4979         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4980         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4981         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4982         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4983         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4984         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4985         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4986         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4987         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4988         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4989         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4990         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4991         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4992         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4993         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4994         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4995         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4996         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4997         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4998         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4999         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5000         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5001         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5002         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5003         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5004         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5005         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5006         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5007         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5008         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5009         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5010         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5011         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5012         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5013         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5014         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5015         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5016         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5017         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5018         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5019         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5020         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5021         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5022         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5023         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5024         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5025         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5026         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5027         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5028         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5029         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5030         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5031         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5032         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5033         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5034         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5035         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5036         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5037         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5038         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5039         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5040         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5041         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5042         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5043         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5044         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5045         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5046         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5047         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5048         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5049         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5050         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5051         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5052         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5053         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5054         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5055         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5056         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5057         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5058         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5059         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5060         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5061         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5062         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5063         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5064         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5065         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5066         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5067         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5068         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5069         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5070         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5071         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5072         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5073         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5074         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5075         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5076         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5077         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5078         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5079         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5080         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5081         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5082         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5083         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5084         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5085         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5086 };
5087
5088 static u32 tg3TsoFwRodata[] = {
5089         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5090         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5091         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5092         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5093         0x00000000,
5094 };
5095
5096 static u32 tg3TsoFwData[] = {
5097         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5098         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5099         0x00000000,
5100 };
5101
5102 /* 5705 needs a special version of the TSO firmware.  */
5103 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5104 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5105 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5106 #define TG3_TSO5_FW_START_ADDR          0x00010000
5107 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5108 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5109 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5110 #define TG3_TSO5_FW_RODATA_LEN          0x50
5111 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5112 #define TG3_TSO5_FW_DATA_LEN            0x20
5113 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5114 #define TG3_TSO5_FW_SBSS_LEN            0x28
5115 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5116 #define TG3_TSO5_FW_BSS_LEN             0x88
5117
5118 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5119         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5120         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5121         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5122         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5123         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5124         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5125         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5126         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5127         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5128         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5129         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5130         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5131         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5132         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5133         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5134         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5135         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5136         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5137         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5138         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5139         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5140         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5141         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5142         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5143         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5144         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5145         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5146         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5147         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5148         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5149         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5150         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5151         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5152         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5153         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5154         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5155         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5156         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5157         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5158         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5159         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5160         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5161         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5162         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5163         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5164         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5165         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5166         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5167         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5168         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5169         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5170         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5171         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5172         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5173         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5174         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5175         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5176         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5177         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5178         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5179         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5180         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5181         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5182         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5183         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5184         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5185         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5186         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5187         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5188         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5189         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5190         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5191         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5192         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5193         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5194         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5195         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5196         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5197         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5198         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5199         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5200         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5201         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5202         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5203         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5204         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5205         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5206         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5207         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5208         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5209         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5210         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5211         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5212         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5213         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5214         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5215         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5216         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5217         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5218         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5219         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5220         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5221         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5222         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5223         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5224         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5225         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5226         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5227         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5228         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5229         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5230         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5231         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5232         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5233         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5234         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5235         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5236         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5237         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5238         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5239         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5240         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5241         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5242         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5243         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5244         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5245         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5246         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5247         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5248         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5249         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5250         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5251         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5252         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5253         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5254         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5255         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5256         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5257         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5258         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5259         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5260         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5261         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5262         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5263         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5264         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5265         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5266         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5267         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5268         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5269         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5270         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5271         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5272         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5273         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5274         0x00000000, 0x00000000, 0x00000000,
5275 };
5276
5277 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5278         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5279         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5280         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5281         0x00000000, 0x00000000, 0x00000000,
5282 };
5283
5284 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5285         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5286         0x00000000, 0x00000000, 0x00000000,
5287 };
5288
5289 /* tp->lock is held. */
5290 static int tg3_load_tso_firmware(struct tg3 *tp)
5291 {
5292         struct fw_info info;
5293         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5294         int err, i;
5295
5296         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5297                 return 0;
5298
5299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5300                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5301                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5302                 info.text_data = &tg3Tso5FwText[0];
5303                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5304                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5305                 info.rodata_data = &tg3Tso5FwRodata[0];
5306                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5307                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5308                 info.data_data = &tg3Tso5FwData[0];
5309                 cpu_base = RX_CPU_BASE;
5310                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5311                 cpu_scratch_size = (info.text_len +
5312                                     info.rodata_len +
5313                                     info.data_len +
5314                                     TG3_TSO5_FW_SBSS_LEN +
5315                                     TG3_TSO5_FW_BSS_LEN);
5316         } else {
5317                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5318                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5319                 info.text_data = &tg3TsoFwText[0];
5320                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5321                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5322                 info.rodata_data = &tg3TsoFwRodata[0];
5323                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5324                 info.data_len = TG3_TSO_FW_DATA_LEN;
5325                 info.data_data = &tg3TsoFwData[0];
5326                 cpu_base = TX_CPU_BASE;
5327                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5328                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5329         }
5330
5331         err = tg3_load_firmware_cpu(tp, cpu_base,
5332                                     cpu_scratch_base, cpu_scratch_size,
5333                                     &info);
5334         if (err)
5335                 return err;
5336
5337         /* Now startup the cpu. */
5338         tw32(cpu_base + CPU_STATE, 0xffffffff);
5339         tw32_f(cpu_base + CPU_PC,    info.text_base);
5340
5341         for (i = 0; i < 5; i++) {
5342                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5343                         break;
5344                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5345                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5346                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5347                 udelay(1000);
5348         }
5349         if (i >= 5) {
5350                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5351                        "to set CPU PC, is %08x should be %08x\n",
5352                        tp->dev->name, tr32(cpu_base + CPU_PC),
5353                        info.text_base);
5354                 return -ENODEV;
5355         }
5356         tw32(cpu_base + CPU_STATE, 0xffffffff);
5357         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5358         return 0;
5359 }
5360
5361 #endif /* TG3_TSO_SUPPORT != 0 */
5362
5363 /* tp->lock is held. */
5364 static void __tg3_set_mac_addr(struct tg3 *tp)
5365 {
5366         u32 addr_high, addr_low;
5367         int i;
5368
5369         addr_high = ((tp->dev->dev_addr[0] << 8) |
5370                      tp->dev->dev_addr[1]);
5371         addr_low = ((tp->dev->dev_addr[2] << 24) |
5372                     (tp->dev->dev_addr[3] << 16) |
5373                     (tp->dev->dev_addr[4] <<  8) |
5374                     (tp->dev->dev_addr[5] <<  0));
5375         for (i = 0; i < 4; i++) {
5376                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5377                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5378         }
5379
5380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5382                 for (i = 0; i < 12; i++) {
5383                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5384                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5385                 }
5386         }
5387
5388         addr_high = (tp->dev->dev_addr[0] +
5389                      tp->dev->dev_addr[1] +
5390                      tp->dev->dev_addr[2] +
5391                      tp->dev->dev_addr[3] +
5392                      tp->dev->dev_addr[4] +
5393                      tp->dev->dev_addr[5]) &
5394                 TX_BACKOFF_SEED_MASK;
5395         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5396 }
5397
5398 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5399 {
5400         struct tg3 *tp = netdev_priv(dev);
5401         struct sockaddr *addr = p;
5402
5403         if (!is_valid_ether_addr(addr->sa_data))
5404                 return -EINVAL;
5405
5406         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5407
5408         spin_lock_bh(&tp->lock);
5409         __tg3_set_mac_addr(tp);
5410         spin_unlock_bh(&tp->lock);
5411
5412         return 0;
5413 }
5414
5415 /* tp->lock is held. */
5416 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5417                            dma_addr_t mapping, u32 maxlen_flags,
5418                            u32 nic_addr)
5419 {
5420         tg3_write_mem(tp,
5421                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5422                       ((u64) mapping >> 32));
5423         tg3_write_mem(tp,
5424                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5425                       ((u64) mapping & 0xffffffff));
5426         tg3_write_mem(tp,
5427                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5428                        maxlen_flags);
5429
5430         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5431                 tg3_write_mem(tp,
5432                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5433                               nic_addr);
5434 }
5435
5436 static void __tg3_set_rx_mode(struct net_device *);
5437 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5438 {
5439         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5440         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5441         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5442         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5443         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5444                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5445                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5446         }
5447         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5448         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5449         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5450                 u32 val = ec->stats_block_coalesce_usecs;
5451
5452                 if (!netif_carrier_ok(tp->dev))
5453                         val = 0;
5454
5455                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5456         }
5457 }
5458
5459 /* tp->lock is held. */
5460 static int tg3_reset_hw(struct tg3 *tp)
5461 {
5462         u32 val, rdmac_mode;
5463         int i, err, limit;
5464
5465         tg3_disable_ints(tp);
5466
5467         tg3_stop_fw(tp);
5468
5469         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5470
5471         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5472                 tg3_abort_hw(tp, 1);
5473         }
5474
5475         err = tg3_chip_reset(tp);
5476         if (err)
5477                 return err;
5478
5479         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5480
5481         /* This works around an issue with Athlon chipsets on
5482          * B3 tigon3 silicon.  This bit has no effect on any
5483          * other revision.  But do not set this on PCI Express
5484          * chips.
5485          */
5486         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5487                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5488         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5489
5490         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5491             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5492                 val = tr32(TG3PCI_PCISTATE);
5493                 val |= PCISTATE_RETRY_SAME_DMA;
5494                 tw32(TG3PCI_PCISTATE, val);
5495         }
5496
5497         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5498                 /* Enable some hw fixes.  */
5499                 val = tr32(TG3PCI_MSI_DATA);
5500                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5501                 tw32(TG3PCI_MSI_DATA, val);
5502         }
5503
5504         /* Descriptor ring init may make accesses to the
5505          * NIC SRAM area to setup the TX descriptors, so we
5506          * can only do this after the hardware has been
5507          * successfully reset.
5508          */
5509         tg3_init_rings(tp);
5510
5511         /* This value is determined during the probe time DMA
5512          * engine test, tg3_test_dma.
5513          */
5514         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5515
5516         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5517                           GRC_MODE_4X_NIC_SEND_RINGS |
5518                           GRC_MODE_NO_TX_PHDR_CSUM |
5519                           GRC_MODE_NO_RX_PHDR_CSUM);
5520         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5521         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5522                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5523         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5524                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5525
5526         tw32(GRC_MODE,
5527              tp->grc_mode |
5528              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5529
5530         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5531         val = tr32(GRC_MISC_CFG);
5532         val &= ~0xff;
5533         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5534         tw32(GRC_MISC_CFG, val);
5535
5536         /* Initialize MBUF/DESC pool. */
5537         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5538                 /* Do nothing.  */
5539         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5540                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5542                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5543                 else
5544                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5545                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5546                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5547         }
5548 #if TG3_TSO_SUPPORT != 0
5549         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5550                 int fw_len;
5551
5552                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5553                           TG3_TSO5_FW_RODATA_LEN +
5554                           TG3_TSO5_FW_DATA_LEN +
5555                           TG3_TSO5_FW_SBSS_LEN +
5556                           TG3_TSO5_FW_BSS_LEN);
5557                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5558                 tw32(BUFMGR_MB_POOL_ADDR,
5559                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5560                 tw32(BUFMGR_MB_POOL_SIZE,
5561                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5562         }
5563 #endif
5564
5565         if (tp->dev->mtu <= ETH_DATA_LEN) {
5566                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5567                      tp->bufmgr_config.mbuf_read_dma_low_water);
5568                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5569                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5570                 tw32(BUFMGR_MB_HIGH_WATER,
5571                      tp->bufmgr_config.mbuf_high_water);
5572         } else {
5573                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5574                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5575                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5576                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5577                 tw32(BUFMGR_MB_HIGH_WATER,
5578                      tp->bufmgr_config.mbuf_high_water_jumbo);
5579         }
5580         tw32(BUFMGR_DMA_LOW_WATER,
5581              tp->bufmgr_config.dma_low_water);
5582         tw32(BUFMGR_DMA_HIGH_WATER,
5583              tp->bufmgr_config.dma_high_water);
5584
5585         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5586         for (i = 0; i < 2000; i++) {
5587                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5588                         break;
5589                 udelay(10);
5590         }
5591         if (i >= 2000) {
5592                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5593                        tp->dev->name);
5594                 return -ENODEV;
5595         }
5596
5597         /* Setup replenish threshold. */
5598         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5599
5600         /* Initialize TG3_BDINFO's at:
5601          *  RCVDBDI_STD_BD:     standard eth size rx ring
5602          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5603          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5604          *
5605          * like so:
5606          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5607          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5608          *                              ring attribute flags
5609          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5610          *
5611          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5612          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5613          *
5614          * The size of each ring is fixed in the firmware, but the location is
5615          * configurable.
5616          */
5617         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5618              ((u64) tp->rx_std_mapping >> 32));
5619         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5620              ((u64) tp->rx_std_mapping & 0xffffffff));
5621         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5622              NIC_SRAM_RX_BUFFER_DESC);
5623
5624         /* Don't even try to program the JUMBO/MINI buffer descriptor
5625          * configs on 5705.
5626          */
5627         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5628                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5629                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5630         } else {
5631                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5632                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5633
5634                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5635                      BDINFO_FLAGS_DISABLED);
5636
5637                 /* Setup replenish threshold. */
5638                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5639
5640                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5641                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5642                              ((u64) tp->rx_jumbo_mapping >> 32));
5643                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5644                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5645                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5646                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5647                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5648                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5649                 } else {
5650                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5651                              BDINFO_FLAGS_DISABLED);
5652                 }
5653
5654         }
5655
5656         /* There is only one send ring on 5705/5750, no need to explicitly
5657          * disable the others.
5658          */
5659         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5660                 /* Clear out send RCB ring in SRAM. */
5661                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5662                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5663                                       BDINFO_FLAGS_DISABLED);
5664         }
5665
5666         tp->tx_prod = 0;
5667         tp->tx_cons = 0;
5668         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5669         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5670
5671         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5672                        tp->tx_desc_mapping,
5673                        (TG3_TX_RING_SIZE <<
5674                         BDINFO_FLAGS_MAXLEN_SHIFT),
5675                        NIC_SRAM_TX_BUFFER_DESC);
5676
5677         /* There is only one receive return ring on 5705/5750, no need
5678          * to explicitly disable the others.
5679          */
5680         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5681                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5682                      i += TG3_BDINFO_SIZE) {
5683                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5684                                       BDINFO_FLAGS_DISABLED);
5685                 }
5686         }
5687
5688         tp->rx_rcb_ptr = 0;
5689         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5690
5691         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5692                        tp->rx_rcb_mapping,
5693                        (TG3_RX_RCB_RING_SIZE(tp) <<
5694                         BDINFO_FLAGS_MAXLEN_SHIFT),
5695                        0);
5696
5697         tp->rx_std_ptr = tp->rx_pending;
5698         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5699                      tp->rx_std_ptr);
5700
5701         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5702                                                 tp->rx_jumbo_pending : 0;
5703         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5704                      tp->rx_jumbo_ptr);
5705
5706         /* Initialize MAC address and backoff seed. */
5707         __tg3_set_mac_addr(tp);
5708
5709         /* MTU + ethernet header + FCS + optional VLAN tag */
5710         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5711
5712         /* The slot time is changed by tg3_setup_phy if we
5713          * run at gigabit with half duplex.
5714          */
5715         tw32(MAC_TX_LENGTHS,
5716              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5717              (6 << TX_LENGTHS_IPG_SHIFT) |
5718              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5719
5720         /* Receive rules. */
5721         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5722         tw32(RCVLPC_CONFIG, 0x0181);
5723
5724         /* Calculate RDMAC_MODE setting early, we need it to determine
5725          * the RCVLPC_STATE_ENABLE mask.
5726          */
5727         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5728                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5729                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5730                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5731                       RDMAC_MODE_LNGREAD_ENAB);
5732         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5733                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5734
5735         /* If statement applies to 5705 and 5750 PCI devices only */
5736         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5737              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5738             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5739                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5740                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5741                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5742                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5743                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5744                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5745                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5746                 }
5747         }
5748
5749         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5750                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5751
5752 #if TG3_TSO_SUPPORT != 0
5753         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5754                 rdmac_mode |= (1 << 27);
5755 #endif
5756
5757         /* Receive/send statistics. */
5758         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5759             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5760                 val = tr32(RCVLPC_STATS_ENABLE);
5761                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5762                 tw32(RCVLPC_STATS_ENABLE, val);
5763         } else {
5764                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5765         }
5766         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5767         tw32(SNDDATAI_STATSENAB, 0xffffff);
5768         tw32(SNDDATAI_STATSCTRL,
5769              (SNDDATAI_SCTRL_ENABLE |
5770               SNDDATAI_SCTRL_FASTUPD));
5771
5772         /* Setup host coalescing engine. */
5773         tw32(HOSTCC_MODE, 0);
5774         for (i = 0; i < 2000; i++) {
5775                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5776                         break;
5777                 udelay(10);
5778         }
5779
5780         __tg3_set_coalesce(tp, &tp->coal);
5781
5782         /* set status block DMA address */
5783         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5784              ((u64) tp->status_mapping >> 32));
5785         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5786              ((u64) tp->status_mapping & 0xffffffff));
5787
5788         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5789                 /* Status/statistics block address.  See tg3_timer,
5790                  * the tg3_periodic_fetch_stats call there, and
5791                  * tg3_get_stats to see how this works for 5705/5750 chips.
5792                  */
5793                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5794                      ((u64) tp->stats_mapping >> 32));
5795                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5796                      ((u64) tp->stats_mapping & 0xffffffff));
5797                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5798                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5799         }
5800
5801         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5802
5803         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5804         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5805         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5806                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5807
5808         /* Clear statistics/status block in chip, and status block in ram. */
5809         for (i = NIC_SRAM_STATS_BLK;
5810              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5811              i += sizeof(u32)) {
5812                 tg3_write_mem(tp, i, 0);
5813                 udelay(40);
5814         }
5815         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5816
5817         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5818                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5819                 /* reset to prevent losing 1st rx packet intermittently */
5820                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5821                 udelay(10);
5822         }
5823
5824         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5825                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5826         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5827         udelay(40);
5828
5829         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5830          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5831          * register to preserve the GPIO settings for LOMs. The GPIOs,
5832          * whether used as inputs or outputs, are set by boot code after
5833          * reset.
5834          */
5835         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5836                 u32 gpio_mask;
5837
5838                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5839                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5840
5841                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5842                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5843                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5844
5845                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5846
5847                 /* GPIO1 must be driven high for eeprom write protect */
5848                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5849                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5850         }
5851         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5852         udelay(100);
5853
5854         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5855         tp->last_tag = 0;
5856
5857         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5858                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5859                 udelay(40);
5860         }
5861
5862         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5863                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5864                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5865                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5866                WDMAC_MODE_LNGREAD_ENAB);
5867
5868         /* If statement applies to 5705 and 5750 PCI devices only */
5869         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5870              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5871             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5872                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5873                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5874                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5875                         /* nothing */
5876                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5877                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5878                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5879                         val |= WDMAC_MODE_RX_ACCEL;
5880                 }
5881         }
5882
5883         tw32_f(WDMAC_MODE, val);
5884         udelay(40);
5885
5886         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5887                 val = tr32(TG3PCI_X_CAPS);
5888                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5889                         val &= ~PCIX_CAPS_BURST_MASK;
5890                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5891                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5892                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5893                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5894                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5895                                 val |= (tp->split_mode_max_reqs <<
5896                                         PCIX_CAPS_SPLIT_SHIFT);
5897                 }
5898                 tw32(TG3PCI_X_CAPS, val);
5899         }
5900
5901         tw32_f(RDMAC_MODE, rdmac_mode);
5902         udelay(40);
5903
5904         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5905         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5906                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5907         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5908         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5909         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5910         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5911         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5912 #if TG3_TSO_SUPPORT != 0
5913         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5914                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5915 #endif
5916         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5917         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5918
5919         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5920                 err = tg3_load_5701_a0_firmware_fix(tp);
5921                 if (err)
5922                         return err;
5923         }
5924
5925 #if TG3_TSO_SUPPORT != 0
5926         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5927                 err = tg3_load_tso_firmware(tp);
5928                 if (err)
5929                         return err;
5930         }
5931 #endif
5932
5933         tp->tx_mode = TX_MODE_ENABLE;
5934         tw32_f(MAC_TX_MODE, tp->tx_mode);
5935         udelay(100);
5936
5937         tp->rx_mode = RX_MODE_ENABLE;
5938         tw32_f(MAC_RX_MODE, tp->rx_mode);
5939         udelay(10);
5940
5941         if (tp->link_config.phy_is_low_power) {
5942                 tp->link_config.phy_is_low_power = 0;
5943                 tp->link_config.speed = tp->link_config.orig_speed;
5944                 tp->link_config.duplex = tp->link_config.orig_duplex;
5945                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5946         }
5947
5948         tp->mi_mode = MAC_MI_MODE_BASE;
5949         tw32_f(MAC_MI_MODE, tp->mi_mode);
5950         udelay(80);
5951
5952         tw32(MAC_LED_CTRL, tp->led_ctrl);
5953
5954         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5955         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5956                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5957                 udelay(10);
5958         }
5959         tw32_f(MAC_RX_MODE, tp->rx_mode);
5960         udelay(10);
5961
5962         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5963                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5964                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5965                         /* Set drive transmission level to 1.2V  */
5966                         /* only if the signal pre-emphasis bit is not set  */
5967                         val = tr32(MAC_SERDES_CFG);
5968                         val &= 0xfffff000;
5969                         val |= 0x880;
5970                         tw32(MAC_SERDES_CFG, val);
5971                 }
5972                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5973                         tw32(MAC_SERDES_CFG, 0x616000);
5974         }
5975
5976         /* Prevent chip from dropping frames when flow control
5977          * is enabled.
5978          */
5979         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5980
5981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5982             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5983                 /* Use hardware link auto-negotiation */
5984                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5985         }
5986
5987         err = tg3_setup_phy(tp, 1);
5988         if (err)
5989                 return err;
5990
5991         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5992                 u32 tmp;
5993
5994                 /* Clear CRC stats. */
5995                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5996                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5997                         tg3_readphy(tp, 0x14, &tmp);
5998                 }
5999         }
6000
6001         __tg3_set_rx_mode(tp->dev);
6002
6003         /* Initialize receive rules. */
6004         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6005         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6006         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6007         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6008
6009         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6010             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6011                 limit = 8;
6012         else
6013                 limit = 16;
6014         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6015                 limit -= 4;
6016         switch (limit) {
6017         case 16:
6018                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6019         case 15:
6020                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6021         case 14:
6022                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6023         case 13:
6024                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6025         case 12:
6026                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6027         case 11:
6028                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6029         case 10:
6030                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6031         case 9:
6032                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6033         case 8:
6034                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6035         case 7:
6036                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6037         case 6:
6038                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6039         case 5:
6040                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6041         case 4:
6042                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6043         case 3:
6044                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6045         case 2:
6046         case 1:
6047
6048         default:
6049                 break;
6050         };
6051
6052         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6053
6054         return 0;
6055 }
6056
6057 /* Called at device open time to get the chip ready for
6058  * packet processing.  Invoked with tp->lock held.
6059  */
6060 static int tg3_init_hw(struct tg3 *tp)
6061 {
6062         int err;
6063
6064         /* Force the chip into D0. */
6065         err = tg3_set_power_state(tp, 0);
6066         if (err)
6067                 goto out;
6068
6069         tg3_switch_clocks(tp);
6070
6071         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6072
6073         err = tg3_reset_hw(tp);
6074
6075 out:
6076         return err;
6077 }
6078
6079 #define TG3_STAT_ADD32(PSTAT, REG) \
6080 do {    u32 __val = tr32(REG); \
6081         (PSTAT)->low += __val; \
6082         if ((PSTAT)->low < __val) \
6083                 (PSTAT)->high += 1; \
6084 } while (0)
6085
6086 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6087 {
6088         struct tg3_hw_stats *sp = tp->hw_stats;
6089
6090         if (!netif_carrier_ok(tp->dev))
6091                 return;
6092
6093         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6094         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6095         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6096         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6097         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6098         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6099         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6100         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6101         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6102         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6103         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6104         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6105         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6106
6107         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6108         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6109         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6110         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6111         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6112         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6113         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6114         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6115         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6116         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6117         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6118         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6119         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6120         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6121 }
6122
6123 static void tg3_timer(unsigned long __opaque)
6124 {
6125         struct tg3 *tp = (struct tg3 *) __opaque;
6126
6127         spin_lock(&tp->lock);
6128
6129         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6130                 /* All of this garbage is because when using non-tagged
6131                  * IRQ status the mailbox/status_block protocol the chip
6132                  * uses with the cpu is race prone.
6133                  */
6134                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6135                         tw32(GRC_LOCAL_CTRL,
6136                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6137                 } else {
6138                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6139                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6140                 }
6141
6142                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6143                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6144                         spin_unlock(&tp->lock);
6145                         schedule_work(&tp->reset_task);
6146                         return;
6147                 }
6148         }
6149
6150         /* This part only runs once per second. */
6151         if (!--tp->timer_counter) {
6152                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6153                         tg3_periodic_fetch_stats(tp);
6154
6155                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6156                         u32 mac_stat;
6157                         int phy_event;
6158
6159                         mac_stat = tr32(MAC_STATUS);
6160
6161                         phy_event = 0;
6162                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6163                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6164                                         phy_event = 1;
6165                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6166                                 phy_event = 1;
6167
6168                         if (phy_event)
6169                                 tg3_setup_phy(tp, 0);
6170                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6171                         u32 mac_stat = tr32(MAC_STATUS);
6172                         int need_setup = 0;
6173
6174                         if (netif_carrier_ok(tp->dev) &&
6175                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6176                                 need_setup = 1;
6177                         }
6178                         if (! netif_carrier_ok(tp->dev) &&
6179                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6180                                          MAC_STATUS_SIGNAL_DET))) {
6181                                 need_setup = 1;
6182                         }
6183                         if (need_setup) {
6184                                 tw32_f(MAC_MODE,
6185                                      (tp->mac_mode &
6186                                       ~MAC_MODE_PORT_MODE_MASK));
6187                                 udelay(40);
6188                                 tw32_f(MAC_MODE, tp->mac_mode);
6189                                 udelay(40);
6190                                 tg3_setup_phy(tp, 0);
6191                         }
6192                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6193                         tg3_serdes_parallel_detect(tp);
6194
6195                 tp->timer_counter = tp->timer_multiplier;
6196         }
6197
6198         /* Heartbeat is only sent once every 120 seconds.  */
6199         if (!--tp->asf_counter) {
6200                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6201                         u32 val;
6202
6203                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6204                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6205                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6206                         val = tr32(GRC_RX_CPU_EVENT);
6207                         val |= (1 << 14);
6208                         tw32(GRC_RX_CPU_EVENT, val);
6209                 }
6210                 tp->asf_counter = tp->asf_multiplier;
6211         }
6212
6213         spin_unlock(&tp->lock);
6214
6215         tp->timer.expires = jiffies + tp->timer_offset;
6216         add_timer(&tp->timer);
6217 }
6218
6219 static int tg3_test_interrupt(struct tg3 *tp)
6220 {
6221         struct net_device *dev = tp->dev;
6222         int err, i;
6223         u32 int_mbox = 0;
6224
6225         if (!netif_running(dev))
6226                 return -ENODEV;
6227
6228         tg3_disable_ints(tp);
6229
6230         free_irq(tp->pdev->irq, dev);
6231
6232         err = request_irq(tp->pdev->irq, tg3_test_isr,
6233                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6234         if (err)
6235                 return err;
6236
6237         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6238         tg3_enable_ints(tp);
6239
6240         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6241                HOSTCC_MODE_NOW);
6242
6243         for (i = 0; i < 5; i++) {
6244                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6245                                         TG3_64BIT_REG_LOW);
6246                 if (int_mbox != 0)
6247                         break;
6248                 msleep(10);
6249         }
6250
6251         tg3_disable_ints(tp);
6252
6253         free_irq(tp->pdev->irq, dev);
6254         
6255         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6256                 err = request_irq(tp->pdev->irq, tg3_msi,
6257                                   SA_SAMPLE_RANDOM, dev->name, dev);
6258         else {
6259                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6260                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6261                         fn = tg3_interrupt_tagged;
6262                 err = request_irq(tp->pdev->irq, fn,
6263                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6264         }
6265
6266         if (err)
6267                 return err;
6268
6269         if (int_mbox != 0)
6270                 return 0;
6271
6272         return -EIO;
6273 }
6274
6275 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6276  * successfully restored
6277  */
6278 static int tg3_test_msi(struct tg3 *tp)
6279 {
6280         struct net_device *dev = tp->dev;
6281         int err;
6282         u16 pci_cmd;
6283
6284         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6285                 return 0;
6286
6287         /* Turn off SERR reporting in case MSI terminates with Master
6288          * Abort.
6289          */
6290         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6291         pci_write_config_word(tp->pdev, PCI_COMMAND,
6292                               pci_cmd & ~PCI_COMMAND_SERR);
6293
6294         err = tg3_test_interrupt(tp);
6295
6296         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6297
6298         if (!err)
6299                 return 0;
6300
6301         /* other failures */
6302         if (err != -EIO)
6303                 return err;
6304
6305         /* MSI test failed, go back to INTx mode */
6306         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6307                "switching to INTx mode. Please report this failure to "
6308                "the PCI maintainer and include system chipset information.\n",
6309                        tp->dev->name);
6310
6311         free_irq(tp->pdev->irq, dev);
6312         pci_disable_msi(tp->pdev);
6313
6314         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6315
6316         {
6317                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6318                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6319                         fn = tg3_interrupt_tagged;
6320
6321                 err = request_irq(tp->pdev->irq, fn,
6322                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6323         }
6324         if (err)
6325                 return err;
6326
6327         /* Need to reset the chip because the MSI cycle may have terminated
6328          * with Master Abort.
6329          */
6330         tg3_full_lock(tp, 1);
6331
6332         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6333         err = tg3_init_hw(tp);
6334
6335         tg3_full_unlock(tp);
6336
6337         if (err)
6338                 free_irq(tp->pdev->irq, dev);
6339
6340         return err;
6341 }
6342
6343 static int tg3_open(struct net_device *dev)
6344 {
6345         struct tg3 *tp = netdev_priv(dev);
6346         int err;
6347
6348         tg3_full_lock(tp, 0);
6349
6350         tg3_disable_ints(tp);
6351         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6352
6353         tg3_full_unlock(tp);
6354
6355         /* The placement of this call is tied
6356          * to the setup and use of Host TX descriptors.
6357          */
6358         err = tg3_alloc_consistent(tp);
6359         if (err)
6360                 return err;
6361
6362         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6363             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6364             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6365                 /* All MSI supporting chips should support tagged
6366                  * status.  Assert that this is the case.
6367                  */
6368                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6369                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6370                                "Not using MSI.\n", tp->dev->name);
6371                 } else if (pci_enable_msi(tp->pdev) == 0) {
6372                         u32 msi_mode;
6373
6374                         msi_mode = tr32(MSGINT_MODE);
6375                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6376                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6377                 }
6378         }
6379         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6380                 err = request_irq(tp->pdev->irq, tg3_msi,
6381                                   SA_SAMPLE_RANDOM, dev->name, dev);
6382         else {
6383                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6384                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6385                         fn = tg3_interrupt_tagged;
6386
6387                 err = request_irq(tp->pdev->irq, fn,
6388                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6389         }
6390
6391         if (err) {
6392                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6393                         pci_disable_msi(tp->pdev);
6394                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6395                 }
6396                 tg3_free_consistent(tp);
6397                 return err;
6398         }
6399
6400         tg3_full_lock(tp, 0);
6401
6402         err = tg3_init_hw(tp);
6403         if (err) {
6404                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6405                 tg3_free_rings(tp);
6406         } else {
6407                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6408                         tp->timer_offset = HZ;
6409                 else
6410                         tp->timer_offset = HZ / 10;
6411
6412                 BUG_ON(tp->timer_offset > HZ);
6413                 tp->timer_counter = tp->timer_multiplier =
6414                         (HZ / tp->timer_offset);
6415                 tp->asf_counter = tp->asf_multiplier =
6416                         ((HZ / tp->timer_offset) * 120);
6417
6418                 init_timer(&tp->timer);
6419                 tp->timer.expires = jiffies + tp->timer_offset;
6420                 tp->timer.data = (unsigned long) tp;
6421                 tp->timer.function = tg3_timer;
6422         }
6423
6424         tg3_full_unlock(tp);
6425
6426         if (err) {
6427                 free_irq(tp->pdev->irq, dev);
6428                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6429                         pci_disable_msi(tp->pdev);
6430                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6431                 }
6432                 tg3_free_consistent(tp);
6433                 return err;
6434         }
6435
6436         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6437                 err = tg3_test_msi(tp);
6438
6439                 if (err) {
6440                         tg3_full_lock(tp, 0);
6441
6442                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6443                                 pci_disable_msi(tp->pdev);
6444                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6445                         }
6446                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6447                         tg3_free_rings(tp);
6448                         tg3_free_consistent(tp);
6449
6450                         tg3_full_unlock(tp);
6451
6452                         return err;
6453                 }
6454         }
6455
6456         tg3_full_lock(tp, 0);
6457
6458         add_timer(&tp->timer);
6459         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6460         tg3_enable_ints(tp);
6461
6462         tg3_full_unlock(tp);
6463
6464         netif_start_queue(dev);
6465
6466         return 0;
6467 }
6468
6469 #if 0
6470 /*static*/ void tg3_dump_state(struct tg3 *tp)
6471 {
6472         u32 val32, val32_2, val32_3, val32_4, val32_5;
6473         u16 val16;
6474         int i;
6475
6476         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6477         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6478         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6479                val16, val32);
6480
6481         /* MAC block */
6482         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6483                tr32(MAC_MODE), tr32(MAC_STATUS));
6484         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6485                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6486         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6487                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6488         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6489                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6490
6491         /* Send data initiator control block */
6492         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6493                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6494         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6495                tr32(SNDDATAI_STATSCTRL));
6496
6497         /* Send data completion control block */
6498         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6499
6500         /* Send BD ring selector block */
6501         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6502                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6503
6504         /* Send BD initiator control block */
6505         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6506                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6507
6508         /* Send BD completion control block */
6509         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6510
6511         /* Receive list placement control block */
6512         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6513                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6514         printk("       RCVLPC_STATSCTRL[%08x]\n",
6515                tr32(RCVLPC_STATSCTRL));
6516
6517         /* Receive data and receive BD initiator control block */
6518         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6519                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6520
6521         /* Receive data completion control block */
6522         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6523                tr32(RCVDCC_MODE));
6524
6525         /* Receive BD initiator control block */
6526         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6527                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6528
6529         /* Receive BD completion control block */
6530         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6531                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6532
6533         /* Receive list selector control block */
6534         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6535                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6536
6537         /* Mbuf cluster free block */
6538         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6539                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6540
6541         /* Host coalescing control block */
6542         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6543                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6544         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6545                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6546                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6547         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6548                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6549                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6550         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6551                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6552         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6553                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6554
6555         /* Memory arbiter control block */
6556         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6557                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6558
6559         /* Buffer manager control block */
6560         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6561                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6562         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6563                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6564         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6565                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6566                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6567                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6568
6569         /* Read DMA control block */
6570         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6571                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6572
6573         /* Write DMA control block */
6574         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6575                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6576
6577         /* DMA completion block */
6578         printk("DEBUG: DMAC_MODE[%08x]\n",
6579                tr32(DMAC_MODE));
6580
6581         /* GRC block */
6582         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6583                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6584         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6585                tr32(GRC_LOCAL_CTRL));
6586
6587         /* TG3_BDINFOs */
6588         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6589                tr32(RCVDBDI_JUMBO_BD + 0x0),
6590                tr32(RCVDBDI_JUMBO_BD + 0x4),
6591                tr32(RCVDBDI_JUMBO_BD + 0x8),
6592                tr32(RCVDBDI_JUMBO_BD + 0xc));
6593         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6594                tr32(RCVDBDI_STD_BD + 0x0),
6595                tr32(RCVDBDI_STD_BD + 0x4),
6596                tr32(RCVDBDI_STD_BD + 0x8),
6597                tr32(RCVDBDI_STD_BD + 0xc));
6598         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6599                tr32(RCVDBDI_MINI_BD + 0x0),
6600                tr32(RCVDBDI_MINI_BD + 0x4),
6601                tr32(RCVDBDI_MINI_BD + 0x8),
6602                tr32(RCVDBDI_MINI_BD + 0xc));
6603
6604         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6605         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6606         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6607         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6608         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6609                val32, val32_2, val32_3, val32_4);
6610
6611         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6612         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6613         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6614         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6615         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6616                val32, val32_2, val32_3, val32_4);
6617
6618         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6619         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6620         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6621         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6622         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6623         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6624                val32, val32_2, val32_3, val32_4, val32_5);
6625
6626         /* SW status block */
6627         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6628                tp->hw_status->status,
6629                tp->hw_status->status_tag,
6630                tp->hw_status->rx_jumbo_consumer,
6631                tp->hw_status->rx_consumer,
6632                tp->hw_status->rx_mini_consumer,
6633                tp->hw_status->idx[0].rx_producer,
6634                tp->hw_status->idx[0].tx_consumer);
6635
6636         /* SW statistics block */
6637         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6638                ((u32 *)tp->hw_stats)[0],
6639                ((u32 *)tp->hw_stats)[1],
6640                ((u32 *)tp->hw_stats)[2],
6641                ((u32 *)tp->hw_stats)[3]);
6642
6643         /* Mailboxes */
6644         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6645                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6646                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6647                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6648                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6649
6650         /* NIC side send descriptors. */
6651         for (i = 0; i < 6; i++) {
6652                 unsigned long txd;
6653
6654                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6655                         + (i * sizeof(struct tg3_tx_buffer_desc));
6656                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6657                        i,
6658                        readl(txd + 0x0), readl(txd + 0x4),
6659                        readl(txd + 0x8), readl(txd + 0xc));
6660         }
6661
6662         /* NIC side RX descriptors. */
6663         for (i = 0; i < 6; i++) {
6664                 unsigned long rxd;
6665
6666                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6667                         + (i * sizeof(struct tg3_rx_buffer_desc));
6668                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6669                        i,
6670                        readl(rxd + 0x0), readl(rxd + 0x4),
6671                        readl(rxd + 0x8), readl(rxd + 0xc));
6672                 rxd += (4 * sizeof(u32));
6673                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6674                        i,
6675                        readl(rxd + 0x0), readl(rxd + 0x4),
6676                        readl(rxd + 0x8), readl(rxd + 0xc));
6677         }
6678
6679         for (i = 0; i < 6; i++) {
6680                 unsigned long rxd;
6681
6682                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6683                         + (i * sizeof(struct tg3_rx_buffer_desc));
6684                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6685                        i,
6686                        readl(rxd + 0x0), readl(rxd + 0x4),
6687                        readl(rxd + 0x8), readl(rxd + 0xc));
6688                 rxd += (4 * sizeof(u32));
6689                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6690                        i,
6691                        readl(rxd + 0x0), readl(rxd + 0x4),
6692                        readl(rxd + 0x8), readl(rxd + 0xc));
6693         }
6694 }
6695 #endif
6696
6697 static struct net_device_stats *tg3_get_stats(struct net_device *);
6698 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6699
6700 static int tg3_close(struct net_device *dev)
6701 {
6702         struct tg3 *tp = netdev_priv(dev);
6703
6704         netif_stop_queue(dev);
6705
6706         del_timer_sync(&tp->timer);
6707
6708         tg3_full_lock(tp, 1);
6709 #if 0
6710         tg3_dump_state(tp);
6711 #endif
6712
6713         tg3_disable_ints(tp);
6714
6715         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6716         tg3_free_rings(tp);
6717         tp->tg3_flags &=
6718                 ~(TG3_FLAG_INIT_COMPLETE |
6719                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6720         netif_carrier_off(tp->dev);
6721
6722         tg3_full_unlock(tp);
6723
6724         free_irq(tp->pdev->irq, dev);
6725         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6726                 pci_disable_msi(tp->pdev);
6727                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6728         }
6729
6730         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6731                sizeof(tp->net_stats_prev));
6732         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6733                sizeof(tp->estats_prev));
6734
6735         tg3_free_consistent(tp);
6736
6737         return 0;
6738 }
6739
6740 static inline unsigned long get_stat64(tg3_stat64_t *val)
6741 {
6742         unsigned long ret;
6743
6744 #if (BITS_PER_LONG == 32)
6745         ret = val->low;
6746 #else
6747         ret = ((u64)val->high << 32) | ((u64)val->low);
6748 #endif
6749         return ret;
6750 }
6751
6752 static unsigned long calc_crc_errors(struct tg3 *tp)
6753 {
6754         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6755
6756         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6757             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6758              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6759                 u32 val;
6760
6761                 spin_lock_bh(&tp->lock);
6762                 if (!tg3_readphy(tp, 0x1e, &val)) {
6763                         tg3_writephy(tp, 0x1e, val | 0x8000);
6764                         tg3_readphy(tp, 0x14, &val);
6765                 } else
6766                         val = 0;
6767                 spin_unlock_bh(&tp->lock);
6768
6769                 tp->phy_crc_errors += val;
6770
6771                 return tp->phy_crc_errors;
6772         }
6773
6774         return get_stat64(&hw_stats->rx_fcs_errors);
6775 }
6776
6777 #define ESTAT_ADD(member) \
6778         estats->member =        old_estats->member + \
6779                                 get_stat64(&hw_stats->member)
6780
6781 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6782 {
6783         struct tg3_ethtool_stats *estats = &tp->estats;
6784         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6785         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6786
6787         if (!hw_stats)
6788                 return old_estats;
6789
6790         ESTAT_ADD(rx_octets);
6791         ESTAT_ADD(rx_fragments);
6792         ESTAT_ADD(rx_ucast_packets);
6793         ESTAT_ADD(rx_mcast_packets);
6794         ESTAT_ADD(rx_bcast_packets);
6795         ESTAT_ADD(rx_fcs_errors);
6796         ESTAT_ADD(rx_align_errors);
6797         ESTAT_ADD(rx_xon_pause_rcvd);
6798         ESTAT_ADD(rx_xoff_pause_rcvd);
6799         ESTAT_ADD(rx_mac_ctrl_rcvd);
6800         ESTAT_ADD(rx_xoff_entered);
6801         ESTAT_ADD(rx_frame_too_long_errors);
6802         ESTAT_ADD(rx_jabbers);
6803         ESTAT_ADD(rx_undersize_packets);
6804         ESTAT_ADD(rx_in_length_errors);
6805         ESTAT_ADD(rx_out_length_errors);
6806         ESTAT_ADD(rx_64_or_less_octet_packets);
6807         ESTAT_ADD(rx_65_to_127_octet_packets);
6808         ESTAT_ADD(rx_128_to_255_octet_packets);
6809         ESTAT_ADD(rx_256_to_511_octet_packets);
6810         ESTAT_ADD(rx_512_to_1023_octet_packets);
6811         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6812         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6813         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6814         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6815         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6816
6817         ESTAT_ADD(tx_octets);
6818         ESTAT_ADD(tx_collisions);
6819         ESTAT_ADD(tx_xon_sent);
6820         ESTAT_ADD(tx_xoff_sent);
6821         ESTAT_ADD(tx_flow_control);
6822         ESTAT_ADD(tx_mac_errors);
6823         ESTAT_ADD(tx_single_collisions);
6824         ESTAT_ADD(tx_mult_collisions);
6825         ESTAT_ADD(tx_deferred);
6826         ESTAT_ADD(tx_excessive_collisions);
6827         ESTAT_ADD(tx_late_collisions);
6828         ESTAT_ADD(tx_collide_2times);
6829         ESTAT_ADD(tx_collide_3times);
6830         ESTAT_ADD(tx_collide_4times);
6831         ESTAT_ADD(tx_collide_5times);
6832         ESTAT_ADD(tx_collide_6times);
6833         ESTAT_ADD(tx_collide_7times);
6834         ESTAT_ADD(tx_collide_8times);
6835         ESTAT_ADD(tx_collide_9times);
6836         ESTAT_ADD(tx_collide_10times);
6837         ESTAT_ADD(tx_collide_11times);
6838         ESTAT_ADD(tx_collide_12times);
6839         ESTAT_ADD(tx_collide_13times);
6840         ESTAT_ADD(tx_collide_14times);
6841         ESTAT_ADD(tx_collide_15times);
6842         ESTAT_ADD(tx_ucast_packets);
6843         ESTAT_ADD(tx_mcast_packets);
6844         ESTAT_ADD(tx_bcast_packets);
6845         ESTAT_ADD(tx_carrier_sense_errors);
6846         ESTAT_ADD(tx_discards);
6847         ESTAT_ADD(tx_errors);
6848
6849         ESTAT_ADD(dma_writeq_full);
6850         ESTAT_ADD(dma_write_prioq_full);
6851         ESTAT_ADD(rxbds_empty);
6852         ESTAT_ADD(rx_discards);
6853         ESTAT_ADD(rx_errors);
6854         ESTAT_ADD(rx_threshold_hit);
6855
6856         ESTAT_ADD(dma_readq_full);
6857         ESTAT_ADD(dma_read_prioq_full);
6858         ESTAT_ADD(tx_comp_queue_full);
6859
6860         ESTAT_ADD(ring_set_send_prod_index);
6861         ESTAT_ADD(ring_status_update);
6862         ESTAT_ADD(nic_irqs);
6863         ESTAT_ADD(nic_avoided_irqs);
6864         ESTAT_ADD(nic_tx_threshold_hit);
6865
6866         return estats;
6867 }
6868
6869 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6870 {
6871         struct tg3 *tp = netdev_priv(dev);
6872         struct net_device_stats *stats = &tp->net_stats;
6873         struct net_device_stats *old_stats = &tp->net_stats_prev;
6874         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6875
6876         if (!hw_stats)
6877                 return old_stats;
6878
6879         stats->rx_packets = old_stats->rx_packets +
6880                 get_stat64(&hw_stats->rx_ucast_packets) +
6881                 get_stat64(&hw_stats->rx_mcast_packets) +
6882                 get_stat64(&hw_stats->rx_bcast_packets);
6883                 
6884         stats->tx_packets = old_stats->tx_packets +
6885                 get_stat64(&hw_stats->tx_ucast_packets) +
6886                 get_stat64(&hw_stats->tx_mcast_packets) +
6887                 get_stat64(&hw_stats->tx_bcast_packets);
6888
6889         stats->rx_bytes = old_stats->rx_bytes +
6890                 get_stat64(&hw_stats->rx_octets);
6891         stats->tx_bytes = old_stats->tx_bytes +
6892                 get_stat64(&hw_stats->tx_octets);
6893
6894         stats->rx_errors = old_stats->rx_errors +
6895                 get_stat64(&hw_stats->rx_errors);
6896         stats->tx_errors = old_stats->tx_errors +
6897                 get_stat64(&hw_stats->tx_errors) +
6898                 get_stat64(&hw_stats->tx_mac_errors) +
6899                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6900                 get_stat64(&hw_stats->tx_discards);
6901
6902         stats->multicast = old_stats->multicast +
6903                 get_stat64(&hw_stats->rx_mcast_packets);
6904         stats->collisions = old_stats->collisions +
6905                 get_stat64(&hw_stats->tx_collisions);
6906
6907         stats->rx_length_errors = old_stats->rx_length_errors +
6908                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6909                 get_stat64(&hw_stats->rx_undersize_packets);
6910
6911         stats->rx_over_errors = old_stats->rx_over_errors +
6912                 get_stat64(&hw_stats->rxbds_empty);
6913         stats->rx_frame_errors = old_stats->rx_frame_errors +
6914                 get_stat64(&hw_stats->rx_align_errors);
6915         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6916                 get_stat64(&hw_stats->tx_discards);
6917         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6918                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6919
6920         stats->rx_crc_errors = old_stats->rx_crc_errors +
6921                 calc_crc_errors(tp);
6922
6923         stats->rx_missed_errors = old_stats->rx_missed_errors +
6924                 get_stat64(&hw_stats->rx_discards);
6925
6926         return stats;
6927 }
6928
6929 static inline u32 calc_crc(unsigned char *buf, int len)
6930 {
6931         u32 reg;
6932         u32 tmp;
6933         int j, k;
6934
6935         reg = 0xffffffff;
6936
6937         for (j = 0; j < len; j++) {
6938                 reg ^= buf[j];
6939
6940                 for (k = 0; k < 8; k++) {
6941                         tmp = reg & 0x01;
6942
6943                         reg >>= 1;
6944
6945                         if (tmp) {
6946                                 reg ^= 0xedb88320;
6947                         }
6948                 }
6949         }
6950
6951         return ~reg;
6952 }
6953
6954 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6955 {
6956         /* accept or reject all multicast frames */
6957         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6958         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6959         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6960         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6961 }
6962
6963 static void __tg3_set_rx_mode(struct net_device *dev)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966         u32 rx_mode;
6967
6968         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6969                                   RX_MODE_KEEP_VLAN_TAG);
6970
6971         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6972          * flag clear.
6973          */
6974 #if TG3_VLAN_TAG_USED
6975         if (!tp->vlgrp &&
6976             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6977                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6978 #else
6979         /* By definition, VLAN is disabled always in this
6980          * case.
6981          */
6982         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6983                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6984 #endif
6985
6986         if (dev->flags & IFF_PROMISC) {
6987                 /* Promiscuous mode. */
6988                 rx_mode |= RX_MODE_PROMISC;
6989         } else if (dev->flags & IFF_ALLMULTI) {
6990                 /* Accept all multicast. */
6991                 tg3_set_multi (tp, 1);
6992         } else if (dev->mc_count < 1) {
6993                 /* Reject all multicast. */
6994                 tg3_set_multi (tp, 0);
6995         } else {
6996                 /* Accept one or more multicast(s). */
6997                 struct dev_mc_list *mclist;
6998                 unsigned int i;
6999                 u32 mc_filter[4] = { 0, };
7000                 u32 regidx;
7001                 u32 bit;
7002                 u32 crc;
7003
7004                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7005                      i++, mclist = mclist->next) {
7006
7007                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7008                         bit = ~crc & 0x7f;
7009                         regidx = (bit & 0x60) >> 5;
7010                         bit &= 0x1f;
7011                         mc_filter[regidx] |= (1 << bit);
7012                 }
7013
7014                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7015                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7016                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7017                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7018         }
7019
7020         if (rx_mode != tp->rx_mode) {
7021                 tp->rx_mode = rx_mode;
7022                 tw32_f(MAC_RX_MODE, rx_mode);
7023                 udelay(10);
7024         }
7025 }
7026
7027 static void tg3_set_rx_mode(struct net_device *dev)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030
7031         tg3_full_lock(tp, 0);
7032         __tg3_set_rx_mode(dev);
7033         tg3_full_unlock(tp);
7034 }
7035
7036 #define TG3_REGDUMP_LEN         (32 * 1024)
7037
7038 static int tg3_get_regs_len(struct net_device *dev)
7039 {
7040         return TG3_REGDUMP_LEN;
7041 }
7042
7043 static void tg3_get_regs(struct net_device *dev,
7044                 struct ethtool_regs *regs, void *_p)
7045 {
7046         u32 *p = _p;
7047         struct tg3 *tp = netdev_priv(dev);
7048         u8 *orig_p = _p;
7049         int i;
7050
7051         regs->version = 0;
7052
7053         memset(p, 0, TG3_REGDUMP_LEN);
7054
7055         tg3_full_lock(tp, 0);
7056
7057 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7058 #define GET_REG32_LOOP(base,len)                \
7059 do {    p = (u32 *)(orig_p + (base));           \
7060         for (i = 0; i < len; i += 4)            \
7061                 __GET_REG32((base) + i);        \
7062 } while (0)
7063 #define GET_REG32_1(reg)                        \
7064 do {    p = (u32 *)(orig_p + (reg));            \
7065         __GET_REG32((reg));                     \
7066 } while (0)
7067
7068         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7069         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7070         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7071         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7072         GET_REG32_1(SNDDATAC_MODE);
7073         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7074         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7075         GET_REG32_1(SNDBDC_MODE);
7076         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7077         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7078         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7079         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7080         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7081         GET_REG32_1(RCVDCC_MODE);
7082         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7083         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7084         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7085         GET_REG32_1(MBFREE_MODE);
7086         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7087         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7088         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7089         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7090         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7091         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7092         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7093         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7094         GET_REG32_LOOP(FTQ_RESET, 0x120);
7095         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7096         GET_REG32_1(DMAC_MODE);
7097         GET_REG32_LOOP(GRC_MODE, 0x4c);
7098         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7099                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7100
7101 #undef __GET_REG32
7102 #undef GET_REG32_LOOP
7103 #undef GET_REG32_1
7104
7105         tg3_full_unlock(tp);
7106 }
7107
7108 static int tg3_get_eeprom_len(struct net_device *dev)
7109 {
7110         struct tg3 *tp = netdev_priv(dev);
7111
7112         return tp->nvram_size;
7113 }
7114
7115 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7116
7117 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7118 {
7119         struct tg3 *tp = netdev_priv(dev);
7120         int ret;
7121         u8  *pd;
7122         u32 i, offset, len, val, b_offset, b_count;
7123
7124         offset = eeprom->offset;
7125         len = eeprom->len;
7126         eeprom->len = 0;
7127
7128         eeprom->magic = TG3_EEPROM_MAGIC;
7129
7130         if (offset & 3) {
7131                 /* adjustments to start on required 4 byte boundary */
7132                 b_offset = offset & 3;
7133                 b_count = 4 - b_offset;
7134                 if (b_count > len) {
7135                         /* i.e. offset=1 len=2 */
7136                         b_count = len;
7137                 }
7138                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7139                 if (ret)
7140                         return ret;
7141                 val = cpu_to_le32(val);
7142                 memcpy(data, ((char*)&val) + b_offset, b_count);
7143                 len -= b_count;
7144                 offset += b_count;
7145                 eeprom->len += b_count;
7146         }
7147
7148         /* read bytes upto the last 4 byte boundary */
7149         pd = &data[eeprom->len];
7150         for (i = 0; i < (len - (len & 3)); i += 4) {
7151                 ret = tg3_nvram_read(tp, offset + i, &val);
7152                 if (ret) {
7153                         eeprom->len += i;
7154                         return ret;
7155                 }
7156                 val = cpu_to_le32(val);
7157                 memcpy(pd + i, &val, 4);
7158         }
7159         eeprom->len += i;
7160
7161         if (len & 3) {
7162                 /* read last bytes not ending on 4 byte boundary */
7163                 pd = &data[eeprom->len];
7164                 b_count = len & 3;
7165                 b_offset = offset + len - b_count;
7166                 ret = tg3_nvram_read(tp, b_offset, &val);
7167                 if (ret)
7168                         return ret;
7169                 val = cpu_to_le32(val);
7170                 memcpy(pd, ((char*)&val), b_count);
7171                 eeprom->len += b_count;
7172         }
7173         return 0;
7174 }
7175
7176 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7177
7178 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7179 {
7180         struct tg3 *tp = netdev_priv(dev);
7181         int ret;
7182         u32 offset, len, b_offset, odd_len, start, end;
7183         u8 *buf;
7184
7185         if (eeprom->magic != TG3_EEPROM_MAGIC)
7186                 return -EINVAL;
7187
7188         offset = eeprom->offset;
7189         len = eeprom->len;
7190
7191         if ((b_offset = (offset & 3))) {
7192                 /* adjustments to start on required 4 byte boundary */
7193                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7194                 if (ret)
7195                         return ret;
7196                 start = cpu_to_le32(start);
7197                 len += b_offset;
7198                 offset &= ~3;
7199                 if (len < 4)
7200                         len = 4;
7201         }
7202
7203         odd_len = 0;
7204         if (len & 3) {
7205                 /* adjustments to end on required 4 byte boundary */
7206                 odd_len = 1;
7207                 len = (len + 3) & ~3;
7208                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7209                 if (ret)
7210                         return ret;
7211                 end = cpu_to_le32(end);
7212         }
7213
7214         buf = data;
7215         if (b_offset || odd_len) {
7216                 buf = kmalloc(len, GFP_KERNEL);
7217                 if (buf == 0)
7218                         return -ENOMEM;
7219                 if (b_offset)
7220                         memcpy(buf, &start, 4);
7221                 if (odd_len)
7222                         memcpy(buf+len-4, &end, 4);
7223                 memcpy(buf + b_offset, data, eeprom->len);
7224         }
7225
7226         ret = tg3_nvram_write_block(tp, offset, len, buf);
7227
7228         if (buf != data)
7229                 kfree(buf);
7230
7231         return ret;
7232 }
7233
7234 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7235 {
7236         struct tg3 *tp = netdev_priv(dev);
7237   
7238         cmd->supported = (SUPPORTED_Autoneg);
7239
7240         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7241                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7242                                    SUPPORTED_1000baseT_Full);
7243
7244         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7245                 cmd->supported |= (SUPPORTED_100baseT_Half |
7246                                   SUPPORTED_100baseT_Full |
7247                                   SUPPORTED_10baseT_Half |
7248                                   SUPPORTED_10baseT_Full |
7249                                   SUPPORTED_MII);
7250         else
7251                 cmd->supported |= SUPPORTED_FIBRE;
7252   
7253         cmd->advertising = tp->link_config.advertising;
7254         if (netif_running(dev)) {
7255                 cmd->speed = tp->link_config.active_speed;
7256                 cmd->duplex = tp->link_config.active_duplex;
7257         }
7258         cmd->port = 0;
7259         cmd->phy_address = PHY_ADDR;
7260         cmd->transceiver = 0;
7261         cmd->autoneg = tp->link_config.autoneg;
7262         cmd->maxtxpkt = 0;
7263         cmd->maxrxpkt = 0;
7264         return 0;
7265 }
7266   
7267 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7268 {
7269         struct tg3 *tp = netdev_priv(dev);
7270   
7271         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7272                 /* These are the only valid advertisement bits allowed.  */
7273                 if (cmd->autoneg == AUTONEG_ENABLE &&
7274                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7275                                           ADVERTISED_1000baseT_Full |
7276                                           ADVERTISED_Autoneg |
7277                                           ADVERTISED_FIBRE)))
7278                         return -EINVAL;
7279         }
7280
7281         tg3_full_lock(tp, 0);
7282
7283         tp->link_config.autoneg = cmd->autoneg;
7284         if (cmd->autoneg == AUTONEG_ENABLE) {
7285                 tp->link_config.advertising = cmd->advertising;
7286                 tp->link_config.speed = SPEED_INVALID;
7287                 tp->link_config.duplex = DUPLEX_INVALID;
7288         } else {
7289                 tp->link_config.advertising = 0;
7290                 tp->link_config.speed = cmd->speed;
7291                 tp->link_config.duplex = cmd->duplex;
7292         }
7293   
7294         if (netif_running(dev))
7295                 tg3_setup_phy(tp, 1);
7296
7297         tg3_full_unlock(tp);
7298   
7299         return 0;
7300 }
7301   
7302 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7303 {
7304         struct tg3 *tp = netdev_priv(dev);
7305   
7306         strcpy(info->driver, DRV_MODULE_NAME);
7307         strcpy(info->version, DRV_MODULE_VERSION);
7308         strcpy(info->bus_info, pci_name(tp->pdev));
7309 }
7310   
7311 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7312 {
7313         struct tg3 *tp = netdev_priv(dev);
7314   
7315         wol->supported = WAKE_MAGIC;
7316         wol->wolopts = 0;
7317         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7318                 wol->wolopts = WAKE_MAGIC;
7319         memset(&wol->sopass, 0, sizeof(wol->sopass));
7320 }
7321   
7322 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7323 {
7324         struct tg3 *tp = netdev_priv(dev);
7325   
7326         if (wol->wolopts & ~WAKE_MAGIC)
7327                 return -EINVAL;
7328         if ((wol->wolopts & WAKE_MAGIC) &&
7329             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7330             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7331                 return -EINVAL;
7332   
7333         spin_lock_bh(&tp->lock);
7334         if (wol->wolopts & WAKE_MAGIC)
7335                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7336         else
7337                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7338         spin_unlock_bh(&tp->lock);
7339   
7340         return 0;
7341 }
7342   
7343 static u32 tg3_get_msglevel(struct net_device *dev)
7344 {
7345         struct tg3 *tp = netdev_priv(dev);
7346         return tp->msg_enable;
7347 }
7348   
7349 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7350 {
7351         struct tg3 *tp = netdev_priv(dev);
7352         tp->msg_enable = value;
7353 }
7354   
7355 #if TG3_TSO_SUPPORT != 0
7356 static int tg3_set_tso(struct net_device *dev, u32 value)
7357 {
7358         struct tg3 *tp = netdev_priv(dev);
7359
7360         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7361                 if (value)
7362                         return -EINVAL;
7363                 return 0;
7364         }
7365         return ethtool_op_set_tso(dev, value);
7366 }
7367 #endif
7368   
7369 static int tg3_nway_reset(struct net_device *dev)
7370 {
7371         struct tg3 *tp = netdev_priv(dev);
7372         u32 bmcr;
7373         int r;
7374   
7375         if (!netif_running(dev))
7376                 return -EAGAIN;
7377
7378         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7379                 return -EINVAL;
7380
7381         spin_lock_bh(&tp->lock);
7382         r = -EINVAL;
7383         tg3_readphy(tp, MII_BMCR, &bmcr);
7384         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7385             ((bmcr & BMCR_ANENABLE) ||
7386              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7387                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7388                                            BMCR_ANENABLE);
7389                 r = 0;
7390         }
7391         spin_unlock_bh(&tp->lock);
7392   
7393         return r;
7394 }
7395   
7396 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7397 {
7398         struct tg3 *tp = netdev_priv(dev);
7399   
7400         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7401         ering->rx_mini_max_pending = 0;
7402         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7403
7404         ering->rx_pending = tp->rx_pending;
7405         ering->rx_mini_pending = 0;
7406         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7407         ering->tx_pending = tp->tx_pending;
7408 }
7409   
7410 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7411 {
7412         struct tg3 *tp = netdev_priv(dev);
7413         int irq_sync = 0;
7414   
7415         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7416             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7417             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7418                 return -EINVAL;
7419   
7420         if (netif_running(dev)) {
7421                 tg3_netif_stop(tp);
7422                 irq_sync = 1;
7423         }
7424
7425         tg3_full_lock(tp, irq_sync);
7426   
7427         tp->rx_pending = ering->rx_pending;
7428
7429         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7430             tp->rx_pending > 63)
7431                 tp->rx_pending = 63;
7432         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7433         tp->tx_pending = ering->tx_pending;
7434
7435         if (netif_running(dev)) {
7436                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7437                 tg3_init_hw(tp);
7438                 tg3_netif_start(tp);
7439         }
7440
7441         tg3_full_unlock(tp);
7442   
7443         return 0;
7444 }
7445   
7446 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7447 {
7448         struct tg3 *tp = netdev_priv(dev);
7449   
7450         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7451         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7452         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7453 }
7454   
7455 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7456 {
7457         struct tg3 *tp = netdev_priv(dev);
7458         int irq_sync = 0;
7459   
7460         if (netif_running(dev)) {
7461                 tg3_netif_stop(tp);
7462                 irq_sync = 1;
7463         }
7464
7465         tg3_full_lock(tp, irq_sync);
7466
7467         if (epause->autoneg)
7468                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7469         else
7470                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7471         if (epause->rx_pause)
7472                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7473         else
7474                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7475         if (epause->tx_pause)
7476                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7477         else
7478                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7479
7480         if (netif_running(dev)) {
7481                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7482                 tg3_init_hw(tp);
7483                 tg3_netif_start(tp);
7484         }
7485
7486         tg3_full_unlock(tp);
7487   
7488         return 0;
7489 }
7490   
7491 static u32 tg3_get_rx_csum(struct net_device *dev)
7492 {
7493         struct tg3 *tp = netdev_priv(dev);
7494         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7495 }
7496   
7497 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7498 {
7499         struct tg3 *tp = netdev_priv(dev);
7500   
7501         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7502                 if (data != 0)
7503                         return -EINVAL;
7504                 return 0;
7505         }
7506   
7507         spin_lock_bh(&tp->lock);
7508         if (data)
7509                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7510         else
7511                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7512         spin_unlock_bh(&tp->lock);
7513   
7514         return 0;
7515 }
7516   
7517 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7518 {
7519         struct tg3 *tp = netdev_priv(dev);
7520   
7521         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7522                 if (data != 0)
7523                         return -EINVAL;
7524                 return 0;
7525         }
7526   
7527         if (data)
7528                 dev->features |= NETIF_F_IP_CSUM;
7529         else
7530                 dev->features &= ~NETIF_F_IP_CSUM;
7531
7532         return 0;
7533 }
7534
7535 static int tg3_get_stats_count (struct net_device *dev)
7536 {
7537         return TG3_NUM_STATS;
7538 }
7539
7540 static int tg3_get_test_count (struct net_device *dev)
7541 {
7542         return TG3_NUM_TEST;
7543 }
7544
7545 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7546 {
7547         switch (stringset) {
7548         case ETH_SS_STATS:
7549                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7550                 break;
7551         case ETH_SS_TEST:
7552                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7553                 break;
7554         default:
7555                 WARN_ON(1);     /* we need a WARN() */
7556                 break;
7557         }
7558 }
7559
7560 static int tg3_phys_id(struct net_device *dev, u32 data)
7561 {
7562         struct tg3 *tp = netdev_priv(dev);
7563         int i;
7564
7565         if (!netif_running(tp->dev))
7566                 return -EAGAIN;
7567
7568         if (data == 0)
7569                 data = 2;
7570
7571         for (i = 0; i < (data * 2); i++) {
7572                 if ((i % 2) == 0)
7573                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7574                                            LED_CTRL_1000MBPS_ON |
7575                                            LED_CTRL_100MBPS_ON |
7576                                            LED_CTRL_10MBPS_ON |
7577                                            LED_CTRL_TRAFFIC_OVERRIDE |
7578                                            LED_CTRL_TRAFFIC_BLINK |
7579                                            LED_CTRL_TRAFFIC_LED);
7580         
7581                 else
7582                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7583                                            LED_CTRL_TRAFFIC_OVERRIDE);
7584
7585                 if (msleep_interruptible(500))
7586                         break;
7587         }
7588         tw32(MAC_LED_CTRL, tp->led_ctrl);
7589         return 0;
7590 }
7591
7592 static void tg3_get_ethtool_stats (struct net_device *dev,
7593                                    struct ethtool_stats *estats, u64 *tmp_stats)
7594 {
7595         struct tg3 *tp = netdev_priv(dev);
7596         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7597 }
7598
7599 #define NVRAM_TEST_SIZE 0x100
7600
7601 static int tg3_test_nvram(struct tg3 *tp)
7602 {
7603         u32 *buf, csum;
7604         int i, j, err = 0;
7605
7606         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7607         if (buf == NULL)
7608                 return -ENOMEM;
7609
7610         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7611                 u32 val;
7612
7613                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7614                         break;
7615                 buf[j] = cpu_to_le32(val);
7616         }
7617         if (i < NVRAM_TEST_SIZE)
7618                 goto out;
7619
7620         err = -EIO;
7621         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7622                 goto out;
7623
7624         /* Bootstrap checksum at offset 0x10 */
7625         csum = calc_crc((unsigned char *) buf, 0x10);
7626         if(csum != cpu_to_le32(buf[0x10/4]))
7627                 goto out;
7628
7629         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7630         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7631         if (csum != cpu_to_le32(buf[0xfc/4]))
7632                  goto out;
7633
7634         err = 0;
7635
7636 out:
7637         kfree(buf);
7638         return err;
7639 }
7640
7641 #define TG3_SERDES_TIMEOUT_SEC  2
7642 #define TG3_COPPER_TIMEOUT_SEC  6
7643
7644 static int tg3_test_link(struct tg3 *tp)
7645 {
7646         int i, max;
7647
7648         if (!netif_running(tp->dev))
7649                 return -ENODEV;
7650
7651         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7652                 max = TG3_SERDES_TIMEOUT_SEC;
7653         else
7654                 max = TG3_COPPER_TIMEOUT_SEC;
7655
7656         for (i = 0; i < max; i++) {
7657                 if (netif_carrier_ok(tp->dev))
7658                         return 0;
7659
7660                 if (msleep_interruptible(1000))
7661                         break;
7662         }
7663
7664         return -EIO;
7665 }
7666
7667 /* Only test the commonly used registers */
7668 static int tg3_test_registers(struct tg3 *tp)
7669 {
7670         int i, is_5705;
7671         u32 offset, read_mask, write_mask, val, save_val, read_val;
7672         static struct {
7673                 u16 offset;
7674                 u16 flags;
7675 #define TG3_FL_5705     0x1
7676 #define TG3_FL_NOT_5705 0x2
7677 #define TG3_FL_NOT_5788 0x4
7678                 u32 read_mask;
7679                 u32 write_mask;
7680         } reg_tbl[] = {
7681                 /* MAC Control Registers */
7682                 { MAC_MODE, TG3_FL_NOT_5705,
7683                         0x00000000, 0x00ef6f8c },
7684                 { MAC_MODE, TG3_FL_5705,
7685                         0x00000000, 0x01ef6b8c },
7686                 { MAC_STATUS, TG3_FL_NOT_5705,
7687                         0x03800107, 0x00000000 },
7688                 { MAC_STATUS, TG3_FL_5705,
7689                         0x03800100, 0x00000000 },
7690                 { MAC_ADDR_0_HIGH, 0x0000,
7691                         0x00000000, 0x0000ffff },
7692                 { MAC_ADDR_0_LOW, 0x0000,
7693                         0x00000000, 0xffffffff },
7694                 { MAC_RX_MTU_SIZE, 0x0000,
7695                         0x00000000, 0x0000ffff },
7696                 { MAC_TX_MODE, 0x0000,
7697                         0x00000000, 0x00000070 },
7698                 { MAC_TX_LENGTHS, 0x0000,
7699                         0x00000000, 0x00003fff },
7700                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7701                         0x00000000, 0x000007fc },
7702                 { MAC_RX_MODE, TG3_FL_5705,
7703                         0x00000000, 0x000007dc },
7704                 { MAC_HASH_REG_0, 0x0000,
7705                         0x00000000, 0xffffffff },
7706                 { MAC_HASH_REG_1, 0x0000,
7707                         0x00000000, 0xffffffff },
7708                 { MAC_HASH_REG_2, 0x0000,
7709                         0x00000000, 0xffffffff },
7710                 { MAC_HASH_REG_3, 0x0000,
7711                         0x00000000, 0xffffffff },
7712
7713                 /* Receive Data and Receive BD Initiator Control Registers. */
7714                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7715                         0x00000000, 0xffffffff },
7716                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7717                         0x00000000, 0xffffffff },
7718                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7719                         0x00000000, 0x00000003 },
7720                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7721                         0x00000000, 0xffffffff },
7722                 { RCVDBDI_STD_BD+0, 0x0000,
7723                         0x00000000, 0xffffffff },
7724                 { RCVDBDI_STD_BD+4, 0x0000,
7725                         0x00000000, 0xffffffff },
7726                 { RCVDBDI_STD_BD+8, 0x0000,
7727                         0x00000000, 0xffff0002 },
7728                 { RCVDBDI_STD_BD+0xc, 0x0000,
7729                         0x00000000, 0xffffffff },
7730         
7731                 /* Receive BD Initiator Control Registers. */
7732                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7733                         0x00000000, 0xffffffff },
7734                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7735                         0x00000000, 0x000003ff },
7736                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7737                         0x00000000, 0xffffffff },
7738         
7739                 /* Host Coalescing Control Registers. */
7740                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7741                         0x00000000, 0x00000004 },
7742                 { HOSTCC_MODE, TG3_FL_5705,
7743                         0x00000000, 0x000000f6 },
7744                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7745                         0x00000000, 0xffffffff },
7746                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7747                         0x00000000, 0x000003ff },
7748                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7749                         0x00000000, 0xffffffff },
7750                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7751                         0x00000000, 0x000003ff },
7752                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7753                         0x00000000, 0xffffffff },
7754                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7755                         0x00000000, 0x000000ff },
7756                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7757                         0x00000000, 0xffffffff },
7758                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7759                         0x00000000, 0x000000ff },
7760                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7761                         0x00000000, 0xffffffff },
7762                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7763                         0x00000000, 0xffffffff },
7764                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7765                         0x00000000, 0xffffffff },
7766                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7767                         0x00000000, 0x000000ff },
7768                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7769                         0x00000000, 0xffffffff },
7770                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7771                         0x00000000, 0x000000ff },
7772                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7773                         0x00000000, 0xffffffff },
7774                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7775                         0x00000000, 0xffffffff },
7776                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7777                         0x00000000, 0xffffffff },
7778                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7779                         0x00000000, 0xffffffff },
7780                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7781                         0x00000000, 0xffffffff },
7782                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7783                         0xffffffff, 0x00000000 },
7784                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7785                         0xffffffff, 0x00000000 },
7786
7787                 /* Buffer Manager Control Registers. */
7788                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7789                         0x00000000, 0x007fff80 },
7790                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7791                         0x00000000, 0x007fffff },
7792                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7793                         0x00000000, 0x0000003f },
7794                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7795                         0x00000000, 0x000001ff },
7796                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7797                         0x00000000, 0x000001ff },
7798                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7799                         0xffffffff, 0x00000000 },
7800                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7801                         0xffffffff, 0x00000000 },
7802         
7803                 /* Mailbox Registers */
7804                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7805                         0x00000000, 0x000001ff },
7806                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7807                         0x00000000, 0x000001ff },
7808                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7809                         0x00000000, 0x000007ff },
7810                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7811                         0x00000000, 0x000001ff },
7812
7813                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7814         };
7815
7816         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7817                 is_5705 = 1;
7818         else
7819                 is_5705 = 0;
7820
7821         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7822                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7823                         continue;
7824
7825                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7826                         continue;
7827
7828                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7829                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7830                         continue;
7831
7832                 offset = (u32) reg_tbl[i].offset;
7833                 read_mask = reg_tbl[i].read_mask;
7834                 write_mask = reg_tbl[i].write_mask;
7835
7836                 /* Save the original register content */
7837                 save_val = tr32(offset);
7838
7839                 /* Determine the read-only value. */
7840                 read_val = save_val & read_mask;
7841
7842                 /* Write zero to the register, then make sure the read-only bits
7843                  * are not changed and the read/write bits are all zeros.
7844                  */
7845                 tw32(offset, 0);
7846
7847                 val = tr32(offset);
7848
7849                 /* Test the read-only and read/write bits. */
7850                 if (((val & read_mask) != read_val) || (val & write_mask))
7851                         goto out;
7852
7853                 /* Write ones to all the bits defined by RdMask and WrMask, then
7854                  * make sure the read-only bits are not changed and the
7855                  * read/write bits are all ones.
7856                  */
7857                 tw32(offset, read_mask | write_mask);
7858
7859                 val = tr32(offset);
7860
7861                 /* Test the read-only bits. */
7862                 if ((val & read_mask) != read_val)
7863                         goto out;
7864
7865                 /* Test the read/write bits. */
7866                 if ((val & write_mask) != write_mask)
7867                         goto out;
7868
7869                 tw32(offset, save_val);
7870         }
7871
7872         return 0;
7873
7874 out:
7875         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7876         tw32(offset, save_val);
7877         return -EIO;
7878 }
7879
7880 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7881 {
7882         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7883         int i;
7884         u32 j;
7885
7886         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7887                 for (j = 0; j < len; j += 4) {
7888                         u32 val;
7889
7890                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7891                         tg3_read_mem(tp, offset + j, &val);
7892                         if (val != test_pattern[i])
7893                                 return -EIO;
7894                 }
7895         }
7896         return 0;
7897 }
7898
7899 static int tg3_test_memory(struct tg3 *tp)
7900 {
7901         static struct mem_entry {
7902                 u32 offset;
7903                 u32 len;
7904         } mem_tbl_570x[] = {
7905                 { 0x00000000, 0x01000},
7906                 { 0x00002000, 0x1c000},
7907                 { 0xffffffff, 0x00000}
7908         }, mem_tbl_5705[] = {
7909                 { 0x00000100, 0x0000c},
7910                 { 0x00000200, 0x00008},
7911                 { 0x00000b50, 0x00400},
7912                 { 0x00004000, 0x00800},
7913                 { 0x00006000, 0x01000},
7914                 { 0x00008000, 0x02000},
7915                 { 0x00010000, 0x0e000},
7916                 { 0xffffffff, 0x00000}
7917         };
7918         struct mem_entry *mem_tbl;
7919         int err = 0;
7920         int i;
7921
7922         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7923                 mem_tbl = mem_tbl_5705;
7924         else
7925                 mem_tbl = mem_tbl_570x;
7926
7927         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7928                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7929                     mem_tbl[i].len)) != 0)
7930                         break;
7931         }
7932         
7933         return err;
7934 }
7935
7936 #define TG3_MAC_LOOPBACK        0
7937 #define TG3_PHY_LOOPBACK        1
7938
7939 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7940 {
7941         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7942         u32 desc_idx;
7943         struct sk_buff *skb, *rx_skb;
7944         u8 *tx_data;
7945         dma_addr_t map;
7946         int num_pkts, tx_len, rx_len, i, err;
7947         struct tg3_rx_buffer_desc *desc;
7948
7949         if (loopback_mode == TG3_MAC_LOOPBACK) {
7950                 /* HW errata - mac loopback fails in some cases on 5780.
7951                  * Normal traffic and PHY loopback are not affected by
7952                  * errata.
7953                  */
7954                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7955                         return 0;
7956
7957                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7958                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7959                            MAC_MODE_PORT_MODE_GMII;
7960                 tw32(MAC_MODE, mac_mode);
7961         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7962                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7963                                            BMCR_SPEED1000);
7964                 udelay(40);
7965                 /* reset to prevent losing 1st rx packet intermittently */
7966                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7967                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7968                         udelay(10);
7969                         tw32_f(MAC_RX_MODE, tp->rx_mode);
7970                 }
7971                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7972                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7973                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7974                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7975                 tw32(MAC_MODE, mac_mode);
7976         }
7977         else
7978                 return -EINVAL;
7979
7980         err = -EIO;
7981
7982         tx_len = 1514;
7983         skb = dev_alloc_skb(tx_len);
7984         tx_data = skb_put(skb, tx_len);
7985         memcpy(tx_data, tp->dev->dev_addr, 6);
7986         memset(tx_data + 6, 0x0, 8);
7987
7988         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7989
7990         for (i = 14; i < tx_len; i++)
7991                 tx_data[i] = (u8) (i & 0xff);
7992
7993         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7994
7995         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7996              HOSTCC_MODE_NOW);
7997
7998         udelay(10);
7999
8000         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8001
8002         num_pkts = 0;
8003
8004         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8005
8006         tp->tx_prod++;
8007         num_pkts++;
8008
8009         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8010                      tp->tx_prod);
8011         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8012
8013         udelay(10);
8014
8015         for (i = 0; i < 10; i++) {
8016                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8017                        HOSTCC_MODE_NOW);
8018
8019                 udelay(10);
8020
8021                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8022                 rx_idx = tp->hw_status->idx[0].rx_producer;
8023                 if ((tx_idx == tp->tx_prod) &&
8024                     (rx_idx == (rx_start_idx + num_pkts)))
8025                         break;
8026         }
8027
8028         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8029         dev_kfree_skb(skb);
8030
8031         if (tx_idx != tp->tx_prod)
8032                 goto out;
8033
8034         if (rx_idx != rx_start_idx + num_pkts)
8035                 goto out;
8036
8037         desc = &tp->rx_rcb[rx_start_idx];
8038         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8039         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8040         if (opaque_key != RXD_OPAQUE_RING_STD)
8041                 goto out;
8042
8043         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8044             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8045                 goto out;
8046
8047         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8048         if (rx_len != tx_len)
8049                 goto out;
8050
8051         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8052
8053         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8054         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8055
8056         for (i = 14; i < tx_len; i++) {
8057                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8058                         goto out;
8059         }
8060         err = 0;
8061         
8062         /* tg3_free_rings will unmap and free the rx_skb */
8063 out:
8064         return err;
8065 }
8066
8067 #define TG3_MAC_LOOPBACK_FAILED         1
8068 #define TG3_PHY_LOOPBACK_FAILED         2
8069 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8070                                          TG3_PHY_LOOPBACK_FAILED)
8071
8072 static int tg3_test_loopback(struct tg3 *tp)
8073 {
8074         int err = 0;
8075
8076         if (!netif_running(tp->dev))
8077                 return TG3_LOOPBACK_FAILED;
8078
8079         tg3_reset_hw(tp);
8080
8081         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8082                 err |= TG3_MAC_LOOPBACK_FAILED;
8083         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8084                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8085                         err |= TG3_PHY_LOOPBACK_FAILED;
8086         }
8087
8088         return err;
8089 }
8090
8091 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8092                           u64 *data)
8093 {
8094         struct tg3 *tp = netdev_priv(dev);
8095
8096         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8097
8098         if (tg3_test_nvram(tp) != 0) {
8099                 etest->flags |= ETH_TEST_FL_FAILED;
8100                 data[0] = 1;
8101         }
8102         if (tg3_test_link(tp) != 0) {
8103                 etest->flags |= ETH_TEST_FL_FAILED;
8104                 data[1] = 1;
8105         }
8106         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8107                 int irq_sync = 0;
8108
8109                 if (netif_running(dev)) {
8110                         tg3_netif_stop(tp);
8111                         irq_sync = 1;
8112                 }
8113
8114                 tg3_full_lock(tp, irq_sync);
8115
8116                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8117                 tg3_nvram_lock(tp);
8118                 tg3_halt_cpu(tp, RX_CPU_BASE);
8119                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8120                         tg3_halt_cpu(tp, TX_CPU_BASE);
8121                 tg3_nvram_unlock(tp);
8122
8123                 if (tg3_test_registers(tp) != 0) {
8124                         etest->flags |= ETH_TEST_FL_FAILED;
8125                         data[2] = 1;
8126                 }
8127                 if (tg3_test_memory(tp) != 0) {
8128                         etest->flags |= ETH_TEST_FL_FAILED;
8129                         data[3] = 1;
8130                 }
8131                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8132                         etest->flags |= ETH_TEST_FL_FAILED;
8133
8134                 tg3_full_unlock(tp);
8135
8136                 if (tg3_test_interrupt(tp) != 0) {
8137                         etest->flags |= ETH_TEST_FL_FAILED;
8138                         data[5] = 1;
8139                 }
8140
8141                 tg3_full_lock(tp, 0);
8142
8143                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8144                 if (netif_running(dev)) {
8145                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8146                         tg3_init_hw(tp);
8147                         tg3_netif_start(tp);
8148                 }
8149
8150                 tg3_full_unlock(tp);
8151         }
8152 }
8153
8154 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8155 {
8156         struct mii_ioctl_data *data = if_mii(ifr);
8157         struct tg3 *tp = netdev_priv(dev);
8158         int err;
8159
8160         switch(cmd) {
8161         case SIOCGMIIPHY:
8162                 data->phy_id = PHY_ADDR;
8163
8164                 /* fallthru */
8165         case SIOCGMIIREG: {
8166                 u32 mii_regval;
8167
8168                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8169                         break;                  /* We have no PHY */
8170
8171                 spin_lock_bh(&tp->lock);
8172                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8173                 spin_unlock_bh(&tp->lock);
8174
8175                 data->val_out = mii_regval;
8176
8177                 return err;
8178         }
8179
8180         case SIOCSMIIREG:
8181                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8182                         break;                  /* We have no PHY */
8183
8184                 if (!capable(CAP_NET_ADMIN))
8185                         return -EPERM;
8186
8187                 spin_lock_bh(&tp->lock);
8188                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8189                 spin_unlock_bh(&tp->lock);
8190
8191                 return err;
8192
8193         default:
8194                 /* do nothing */
8195                 break;
8196         }
8197         return -EOPNOTSUPP;
8198 }
8199
8200 #if TG3_VLAN_TAG_USED
8201 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8202 {
8203         struct tg3 *tp = netdev_priv(dev);
8204
8205         tg3_full_lock(tp, 0);
8206
8207         tp->vlgrp = grp;
8208
8209         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8210         __tg3_set_rx_mode(dev);
8211
8212         tg3_full_unlock(tp);
8213 }
8214
8215 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8216 {
8217         struct tg3 *tp = netdev_priv(dev);
8218
8219         tg3_full_lock(tp, 0);
8220         if (tp->vlgrp)
8221                 tp->vlgrp->vlan_devices[vid] = NULL;
8222         tg3_full_unlock(tp);
8223 }
8224 #endif
8225
8226 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8227 {
8228         struct tg3 *tp = netdev_priv(dev);
8229
8230         memcpy(ec, &tp->coal, sizeof(*ec));
8231         return 0;
8232 }
8233
8234 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8235 {
8236         struct tg3 *tp = netdev_priv(dev);
8237         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8238         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8239
8240         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8241                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8242                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8243                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8244                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8245         }
8246
8247         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8248             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8249             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8250             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8251             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8252             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8253             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8254             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8255             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8256             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8257                 return -EINVAL;
8258
8259         /* No rx interrupts will be generated if both are zero */
8260         if ((ec->rx_coalesce_usecs == 0) &&
8261             (ec->rx_max_coalesced_frames == 0))
8262                 return -EINVAL;
8263
8264         /* No tx interrupts will be generated if both are zero */
8265         if ((ec->tx_coalesce_usecs == 0) &&
8266             (ec->tx_max_coalesced_frames == 0))
8267                 return -EINVAL;
8268
8269         /* Only copy relevant parameters, ignore all others. */
8270         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8271         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8272         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8273         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8274         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8275         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8276         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8277         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8278         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8279
8280         if (netif_running(dev)) {
8281                 tg3_full_lock(tp, 0);
8282                 __tg3_set_coalesce(tp, &tp->coal);
8283                 tg3_full_unlock(tp);
8284         }
8285         return 0;
8286 }
8287
8288 static struct ethtool_ops tg3_ethtool_ops = {
8289         .get_settings           = tg3_get_settings,
8290         .set_settings           = tg3_set_settings,
8291         .get_drvinfo            = tg3_get_drvinfo,
8292         .get_regs_len           = tg3_get_regs_len,
8293         .get_regs               = tg3_get_regs,
8294         .get_wol                = tg3_get_wol,
8295         .set_wol                = tg3_set_wol,
8296         .get_msglevel           = tg3_get_msglevel,
8297         .set_msglevel           = tg3_set_msglevel,
8298         .nway_reset             = tg3_nway_reset,
8299         .get_link               = ethtool_op_get_link,
8300         .get_eeprom_len         = tg3_get_eeprom_len,
8301         .get_eeprom             = tg3_get_eeprom,
8302         .set_eeprom             = tg3_set_eeprom,
8303         .get_ringparam          = tg3_get_ringparam,
8304         .set_ringparam          = tg3_set_ringparam,
8305         .get_pauseparam         = tg3_get_pauseparam,
8306         .set_pauseparam         = tg3_set_pauseparam,
8307         .get_rx_csum            = tg3_get_rx_csum,
8308         .set_rx_csum            = tg3_set_rx_csum,
8309         .get_tx_csum            = ethtool_op_get_tx_csum,
8310         .set_tx_csum            = tg3_set_tx_csum,
8311         .get_sg                 = ethtool_op_get_sg,
8312         .set_sg                 = ethtool_op_set_sg,
8313 #if TG3_TSO_SUPPORT != 0
8314         .get_tso                = ethtool_op_get_tso,
8315         .set_tso                = tg3_set_tso,
8316 #endif
8317         .self_test_count        = tg3_get_test_count,
8318         .self_test              = tg3_self_test,
8319         .get_strings            = tg3_get_strings,
8320         .phys_id                = tg3_phys_id,
8321         .get_stats_count        = tg3_get_stats_count,
8322         .get_ethtool_stats      = tg3_get_ethtool_stats,
8323         .get_coalesce           = tg3_get_coalesce,
8324         .set_coalesce           = tg3_set_coalesce,
8325         .get_perm_addr          = ethtool_op_get_perm_addr,
8326 };
8327
8328 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8329 {
8330         u32 cursize, val;
8331
8332         tp->nvram_size = EEPROM_CHIP_SIZE;
8333
8334         if (tg3_nvram_read(tp, 0, &val) != 0)
8335                 return;
8336
8337         if (swab32(val) != TG3_EEPROM_MAGIC)
8338                 return;
8339
8340         /*
8341          * Size the chip by reading offsets at increasing powers of two.
8342          * When we encounter our validation signature, we know the addressing
8343          * has wrapped around, and thus have our chip size.
8344          */
8345         cursize = 0x800;
8346
8347         while (cursize < tp->nvram_size) {
8348                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8349                         return;
8350
8351                 if (swab32(val) == TG3_EEPROM_MAGIC)
8352                         break;
8353
8354                 cursize <<= 1;
8355         }
8356
8357         tp->nvram_size = cursize;
8358 }
8359                 
8360 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8361 {
8362         u32 val;
8363
8364         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8365                 if (val != 0) {
8366                         tp->nvram_size = (val >> 16) * 1024;
8367                         return;
8368                 }
8369         }
8370         tp->nvram_size = 0x20000;
8371 }
8372
8373 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8374 {
8375         u32 nvcfg1;
8376
8377         nvcfg1 = tr32(NVRAM_CFG1);
8378         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8379                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8380         }
8381         else {
8382                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8383                 tw32(NVRAM_CFG1, nvcfg1);
8384         }
8385
8386         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8387             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8388                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8389                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8390                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8391                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8392                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8393                                 break;
8394                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8395                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8396                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8397                                 break;
8398                         case FLASH_VENDOR_ATMEL_EEPROM:
8399                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8400                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8401                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8402                                 break;
8403                         case FLASH_VENDOR_ST:
8404                                 tp->nvram_jedecnum = JEDEC_ST;
8405                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8406                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8407                                 break;
8408                         case FLASH_VENDOR_SAIFUN:
8409                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8410                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8411                                 break;
8412                         case FLASH_VENDOR_SST_SMALL:
8413                         case FLASH_VENDOR_SST_LARGE:
8414                                 tp->nvram_jedecnum = JEDEC_SST;
8415                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8416                                 break;
8417                 }
8418         }
8419         else {
8420                 tp->nvram_jedecnum = JEDEC_ATMEL;
8421                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8422                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8423         }
8424 }
8425
8426 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8427 {
8428         u32 nvcfg1;
8429
8430         nvcfg1 = tr32(NVRAM_CFG1);
8431
8432         /* NVRAM protection for TPM */
8433         if (nvcfg1 & (1 << 27))
8434                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8435
8436         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8437                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8438                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8439                         tp->nvram_jedecnum = JEDEC_ATMEL;
8440                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8441                         break;
8442                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8443                         tp->nvram_jedecnum = JEDEC_ATMEL;
8444                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8445                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8446                         break;
8447                 case FLASH_5752VENDOR_ST_M45PE10:
8448                 case FLASH_5752VENDOR_ST_M45PE20:
8449                 case FLASH_5752VENDOR_ST_M45PE40:
8450                         tp->nvram_jedecnum = JEDEC_ST;
8451                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8452                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8453                         break;
8454         }
8455
8456         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8457                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8458                         case FLASH_5752PAGE_SIZE_256:
8459                                 tp->nvram_pagesize = 256;
8460                                 break;
8461                         case FLASH_5752PAGE_SIZE_512:
8462                                 tp->nvram_pagesize = 512;
8463                                 break;
8464                         case FLASH_5752PAGE_SIZE_1K:
8465                                 tp->nvram_pagesize = 1024;
8466                                 break;
8467                         case FLASH_5752PAGE_SIZE_2K:
8468                                 tp->nvram_pagesize = 2048;
8469                                 break;
8470                         case FLASH_5752PAGE_SIZE_4K:
8471                                 tp->nvram_pagesize = 4096;
8472                                 break;
8473                         case FLASH_5752PAGE_SIZE_264:
8474                                 tp->nvram_pagesize = 264;
8475                                 break;
8476                 }
8477         }
8478         else {
8479                 /* For eeprom, set pagesize to maximum eeprom size */
8480                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8481
8482                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8483                 tw32(NVRAM_CFG1, nvcfg1);
8484         }
8485 }
8486
8487 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8488 static void __devinit tg3_nvram_init(struct tg3 *tp)
8489 {
8490         int j;
8491
8492         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8493                 return;
8494
8495         tw32_f(GRC_EEPROM_ADDR,
8496              (EEPROM_ADDR_FSM_RESET |
8497               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8498                EEPROM_ADDR_CLKPERD_SHIFT)));
8499
8500         /* XXX schedule_timeout() ... */
8501         for (j = 0; j < 100; j++)
8502                 udelay(10);
8503
8504         /* Enable seeprom accesses. */
8505         tw32_f(GRC_LOCAL_CTRL,
8506              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8507         udelay(100);
8508
8509         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8510             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8511                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8512
8513                 tg3_enable_nvram_access(tp);
8514
8515                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8516                         tg3_get_5752_nvram_info(tp);
8517                 else
8518                         tg3_get_nvram_info(tp);
8519
8520                 tg3_get_nvram_size(tp);
8521
8522                 tg3_disable_nvram_access(tp);
8523
8524         } else {
8525                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8526
8527                 tg3_get_eeprom_size(tp);
8528         }
8529 }
8530
8531 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8532                                         u32 offset, u32 *val)
8533 {
8534         u32 tmp;
8535         int i;
8536
8537         if (offset > EEPROM_ADDR_ADDR_MASK ||
8538             (offset % 4) != 0)
8539                 return -EINVAL;
8540
8541         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8542                                         EEPROM_ADDR_DEVID_MASK |
8543                                         EEPROM_ADDR_READ);
8544         tw32(GRC_EEPROM_ADDR,
8545              tmp |
8546              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8547              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8548               EEPROM_ADDR_ADDR_MASK) |
8549              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8550
8551         for (i = 0; i < 10000; i++) {
8552                 tmp = tr32(GRC_EEPROM_ADDR);
8553
8554                 if (tmp & EEPROM_ADDR_COMPLETE)
8555                         break;
8556                 udelay(100);
8557         }
8558         if (!(tmp & EEPROM_ADDR_COMPLETE))
8559                 return -EBUSY;
8560
8561         *val = tr32(GRC_EEPROM_DATA);
8562         return 0;
8563 }
8564
8565 #define NVRAM_CMD_TIMEOUT 10000
8566
8567 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8568 {
8569         int i;
8570
8571         tw32(NVRAM_CMD, nvram_cmd);
8572         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8573                 udelay(10);
8574                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8575                         udelay(10);
8576                         break;
8577                 }
8578         }
8579         if (i == NVRAM_CMD_TIMEOUT) {
8580                 return -EBUSY;
8581         }
8582         return 0;
8583 }
8584
8585 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8586 {
8587         int ret;
8588
8589         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8590                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8591                 return -EINVAL;
8592         }
8593
8594         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8595                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8596
8597         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8598                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8599                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8600
8601                 offset = ((offset / tp->nvram_pagesize) <<
8602                           ATMEL_AT45DB0X1B_PAGE_POS) +
8603                         (offset % tp->nvram_pagesize);
8604         }
8605
8606         if (offset > NVRAM_ADDR_MSK)
8607                 return -EINVAL;
8608
8609         tg3_nvram_lock(tp);
8610
8611         tg3_enable_nvram_access(tp);
8612
8613         tw32(NVRAM_ADDR, offset);
8614         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8615                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8616
8617         if (ret == 0)
8618                 *val = swab32(tr32(NVRAM_RDDATA));
8619
8620         tg3_nvram_unlock(tp);
8621
8622         tg3_disable_nvram_access(tp);
8623
8624         return ret;
8625 }
8626
8627 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8628                                     u32 offset, u32 len, u8 *buf)
8629 {
8630         int i, j, rc = 0;
8631         u32 val;
8632
8633         for (i = 0; i < len; i += 4) {
8634                 u32 addr, data;
8635
8636                 addr = offset + i;
8637
8638                 memcpy(&data, buf + i, 4);
8639
8640                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8641
8642                 val = tr32(GRC_EEPROM_ADDR);
8643                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8644
8645                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8646                         EEPROM_ADDR_READ);
8647                 tw32(GRC_EEPROM_ADDR, val |
8648                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8649                         (addr & EEPROM_ADDR_ADDR_MASK) |
8650                         EEPROM_ADDR_START |
8651                         EEPROM_ADDR_WRITE);
8652                 
8653                 for (j = 0; j < 10000; j++) {
8654                         val = tr32(GRC_EEPROM_ADDR);
8655
8656                         if (val & EEPROM_ADDR_COMPLETE)
8657                                 break;
8658                         udelay(100);
8659                 }
8660                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8661                         rc = -EBUSY;
8662                         break;
8663                 }
8664         }
8665
8666         return rc;
8667 }
8668
8669 /* offset and length are dword aligned */
8670 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8671                 u8 *buf)
8672 {
8673         int ret = 0;
8674         u32 pagesize = tp->nvram_pagesize;
8675         u32 pagemask = pagesize - 1;
8676         u32 nvram_cmd;
8677         u8 *tmp;
8678
8679         tmp = kmalloc(pagesize, GFP_KERNEL);
8680         if (tmp == NULL)
8681                 return -ENOMEM;
8682
8683         while (len) {
8684                 int j;
8685                 u32 phy_addr, page_off, size;
8686
8687                 phy_addr = offset & ~pagemask;
8688         
8689                 for (j = 0; j < pagesize; j += 4) {
8690                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8691                                                 (u32 *) (tmp + j))))
8692                                 break;
8693                 }
8694                 if (ret)
8695                         break;
8696
8697                 page_off = offset & pagemask;
8698                 size = pagesize;
8699                 if (len < size)
8700                         size = len;
8701
8702                 len -= size;
8703
8704                 memcpy(tmp + page_off, buf, size);
8705
8706                 offset = offset + (pagesize - page_off);
8707
8708                 tg3_enable_nvram_access(tp);
8709
8710                 /*
8711                  * Before we can erase the flash page, we need
8712                  * to issue a special "write enable" command.
8713                  */
8714                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8715
8716                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8717                         break;
8718
8719                 /* Erase the target page */
8720                 tw32(NVRAM_ADDR, phy_addr);
8721
8722                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8723                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8724
8725                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8726                         break;
8727
8728                 /* Issue another write enable to start the write. */
8729                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8730
8731                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8732                         break;
8733
8734                 for (j = 0; j < pagesize; j += 4) {
8735                         u32 data;
8736
8737                         data = *((u32 *) (tmp + j));
8738                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8739
8740                         tw32(NVRAM_ADDR, phy_addr + j);
8741
8742                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8743                                 NVRAM_CMD_WR;
8744
8745                         if (j == 0)
8746                                 nvram_cmd |= NVRAM_CMD_FIRST;
8747                         else if (j == (pagesize - 4))
8748                                 nvram_cmd |= NVRAM_CMD_LAST;
8749
8750                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8751                                 break;
8752                 }
8753                 if (ret)
8754                         break;
8755         }
8756
8757         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8758         tg3_nvram_exec_cmd(tp, nvram_cmd);
8759
8760         kfree(tmp);
8761
8762         return ret;
8763 }
8764
8765 /* offset and length are dword aligned */
8766 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8767                 u8 *buf)
8768 {
8769         int i, ret = 0;
8770
8771         for (i = 0; i < len; i += 4, offset += 4) {
8772                 u32 data, page_off, phy_addr, nvram_cmd;
8773
8774                 memcpy(&data, buf + i, 4);
8775                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8776
8777                 page_off = offset % tp->nvram_pagesize;
8778
8779                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8780                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8781
8782                         phy_addr = ((offset / tp->nvram_pagesize) <<
8783                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8784                 }
8785                 else {
8786                         phy_addr = offset;
8787                 }
8788
8789                 tw32(NVRAM_ADDR, phy_addr);
8790
8791                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8792
8793                 if ((page_off == 0) || (i == 0))
8794                         nvram_cmd |= NVRAM_CMD_FIRST;
8795                 else if (page_off == (tp->nvram_pagesize - 4))
8796                         nvram_cmd |= NVRAM_CMD_LAST;
8797
8798                 if (i == (len - 4))
8799                         nvram_cmd |= NVRAM_CMD_LAST;
8800
8801                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8802                     (tp->nvram_jedecnum == JEDEC_ST) &&
8803                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8804
8805                         if ((ret = tg3_nvram_exec_cmd(tp,
8806                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8807                                 NVRAM_CMD_DONE)))
8808
8809                                 break;
8810                 }
8811                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8812                         /* We always do complete word writes to eeprom. */
8813                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8814                 }
8815
8816                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8817                         break;
8818         }
8819         return ret;
8820 }
8821
8822 /* offset and length are dword aligned */
8823 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8824 {
8825         int ret;
8826
8827         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8828                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8829                 return -EINVAL;
8830         }
8831
8832         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8833                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8834                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8835                 udelay(40);
8836         }
8837
8838         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8839                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8840         }
8841         else {
8842                 u32 grc_mode;
8843
8844                 tg3_nvram_lock(tp);
8845
8846                 tg3_enable_nvram_access(tp);
8847                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8848                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8849                         tw32(NVRAM_WRITE1, 0x406);
8850
8851                 grc_mode = tr32(GRC_MODE);
8852                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8853
8854                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8855                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8856
8857                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8858                                 buf);
8859                 }
8860                 else {
8861                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8862                                 buf);
8863                 }
8864
8865                 grc_mode = tr32(GRC_MODE);
8866                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8867
8868                 tg3_disable_nvram_access(tp);
8869                 tg3_nvram_unlock(tp);
8870         }
8871
8872         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8873                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8874                 udelay(40);
8875         }
8876
8877         return ret;
8878 }
8879
8880 struct subsys_tbl_ent {
8881         u16 subsys_vendor, subsys_devid;
8882         u32 phy_id;
8883 };
8884
8885 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8886         /* Broadcom boards. */
8887         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8888         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8889         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8890         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8891         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8892         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8893         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8894         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8895         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8896         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8897         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8898
8899         /* 3com boards. */
8900         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8901         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8902         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8903         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8904         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8905
8906         /* DELL boards. */
8907         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8908         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8909         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8910         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8911
8912         /* Compaq boards. */
8913         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8914         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8915         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8916         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8917         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8918
8919         /* IBM boards. */
8920         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8921 };
8922
8923 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8924 {
8925         int i;
8926
8927         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8928                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8929                      tp->pdev->subsystem_vendor) &&
8930                     (subsys_id_to_phy_id[i].subsys_devid ==
8931                      tp->pdev->subsystem_device))
8932                         return &subsys_id_to_phy_id[i];
8933         }
8934         return NULL;
8935 }
8936
8937 /* Since this function may be called in D3-hot power state during
8938  * tg3_init_one(), only config cycles are allowed.
8939  */
8940 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8941 {
8942         u32 val;
8943
8944         /* Make sure register accesses (indirect or otherwise)
8945          * will function correctly.
8946          */
8947         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8948                                tp->misc_host_ctrl);
8949
8950         tp->phy_id = PHY_ID_INVALID;
8951         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8952
8953         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8954         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8955                 u32 nic_cfg, led_cfg;
8956                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8957                 int eeprom_phy_serdes = 0;
8958
8959                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8960                 tp->nic_sram_data_cfg = nic_cfg;
8961
8962                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8963                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8964                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8965                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8966                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8967                     (ver > 0) && (ver < 0x100))
8968                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8969
8970                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8971                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8972                         eeprom_phy_serdes = 1;
8973
8974                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8975                 if (nic_phy_id != 0) {
8976                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8977                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8978
8979                         eeprom_phy_id  = (id1 >> 16) << 10;
8980                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8981                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8982                 } else
8983                         eeprom_phy_id = 0;
8984
8985                 tp->phy_id = eeprom_phy_id;
8986                 if (eeprom_phy_serdes) {
8987                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
8988                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8989                         else
8990                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8991                 }
8992
8993                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8994                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8995                                     SHASTA_EXT_LED_MODE_MASK);
8996                 else
8997                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8998
8999                 switch (led_cfg) {
9000                 default:
9001                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9002                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9003                         break;
9004
9005                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9006                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9007                         break;
9008
9009                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9010                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9011
9012                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9013                          * read on some older 5700/5701 bootcode.
9014                          */
9015                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9016                             ASIC_REV_5700 ||
9017                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9018                             ASIC_REV_5701)
9019                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9020
9021                         break;
9022
9023                 case SHASTA_EXT_LED_SHARED:
9024                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9025                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9026                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9027                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9028                                                  LED_CTRL_MODE_PHY_2);
9029                         break;
9030
9031                 case SHASTA_EXT_LED_MAC:
9032                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9033                         break;
9034
9035                 case SHASTA_EXT_LED_COMBO:
9036                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9037                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9038                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9039                                                  LED_CTRL_MODE_PHY_2);
9040                         break;
9041
9042                 };
9043
9044                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9045                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9046                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9047                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9048
9049                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9050                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9051                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9052                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9053
9054                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9055                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9056                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9057                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9058                 }
9059                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9060                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9061
9062                 if (cfg2 & (1 << 17))
9063                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9064
9065                 /* serdes signal pre-emphasis in register 0x590 set by */
9066                 /* bootcode if bit 18 is set */
9067                 if (cfg2 & (1 << 18))
9068                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9069         }
9070 }
9071
9072 static int __devinit tg3_phy_probe(struct tg3 *tp)
9073 {
9074         u32 hw_phy_id_1, hw_phy_id_2;
9075         u32 hw_phy_id, hw_phy_id_masked;
9076         int err;
9077
9078         /* Reading the PHY ID register can conflict with ASF
9079          * firwmare access to the PHY hardware.
9080          */
9081         err = 0;
9082         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9083                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9084         } else {
9085                 /* Now read the physical PHY_ID from the chip and verify
9086                  * that it is sane.  If it doesn't look good, we fall back
9087                  * to either the hard-coded table based PHY_ID and failing
9088                  * that the value found in the eeprom area.
9089                  */
9090                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9091                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9092
9093                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9094                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9095                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9096
9097                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9098         }
9099
9100         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9101                 tp->phy_id = hw_phy_id;
9102                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9103                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9104                 else
9105                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9106         } else {
9107                 if (tp->phy_id != PHY_ID_INVALID) {
9108                         /* Do nothing, phy ID already set up in
9109                          * tg3_get_eeprom_hw_cfg().
9110                          */
9111                 } else {
9112                         struct subsys_tbl_ent *p;
9113
9114                         /* No eeprom signature?  Try the hardcoded
9115                          * subsys device table.
9116                          */
9117                         p = lookup_by_subsys(tp);
9118                         if (!p)
9119                                 return -ENODEV;
9120
9121                         tp->phy_id = p->phy_id;
9122                         if (!tp->phy_id ||
9123                             tp->phy_id == PHY_ID_BCM8002)
9124                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9125                 }
9126         }
9127
9128         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9129             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9130                 u32 bmsr, adv_reg, tg3_ctrl;
9131
9132                 tg3_readphy(tp, MII_BMSR, &bmsr);
9133                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9134                     (bmsr & BMSR_LSTATUS))
9135                         goto skip_phy_reset;
9136                     
9137                 err = tg3_phy_reset(tp);
9138                 if (err)
9139                         return err;
9140
9141                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9142                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9143                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9144                 tg3_ctrl = 0;
9145                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9146                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9147                                     MII_TG3_CTRL_ADV_1000_FULL);
9148                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9149                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9150                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9151                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9152                 }
9153
9154                 if (!tg3_copper_is_advertising_all(tp)) {
9155                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9156
9157                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9158                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9159
9160                         tg3_writephy(tp, MII_BMCR,
9161                                      BMCR_ANENABLE | BMCR_ANRESTART);
9162                 }
9163                 tg3_phy_set_wirespeed(tp);
9164
9165                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9166                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9167                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9168         }
9169
9170 skip_phy_reset:
9171         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9172                 err = tg3_init_5401phy_dsp(tp);
9173                 if (err)
9174                         return err;
9175         }
9176
9177         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9178                 err = tg3_init_5401phy_dsp(tp);
9179         }
9180
9181         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9182                 tp->link_config.advertising =
9183                         (ADVERTISED_1000baseT_Half |
9184                          ADVERTISED_1000baseT_Full |
9185                          ADVERTISED_Autoneg |
9186                          ADVERTISED_FIBRE);
9187         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9188                 tp->link_config.advertising &=
9189                         ~(ADVERTISED_1000baseT_Half |
9190                           ADVERTISED_1000baseT_Full);
9191
9192         return err;
9193 }
9194
9195 static void __devinit tg3_read_partno(struct tg3 *tp)
9196 {
9197         unsigned char vpd_data[256];
9198         int i;
9199
9200         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9201                 /* Sun decided not to put the necessary bits in the
9202                  * NVRAM of their onboard tg3 parts :(
9203                  */
9204                 strcpy(tp->board_part_number, "Sun 570X");
9205                 return;
9206         }
9207
9208         for (i = 0; i < 256; i += 4) {
9209                 u32 tmp;
9210
9211                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9212                         goto out_not_found;
9213
9214                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9215                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9216                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9217                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9218         }
9219
9220         /* Now parse and find the part number. */
9221         for (i = 0; i < 256; ) {
9222                 unsigned char val = vpd_data[i];
9223                 int block_end;
9224
9225                 if (val == 0x82 || val == 0x91) {
9226                         i = (i + 3 +
9227                              (vpd_data[i + 1] +
9228                               (vpd_data[i + 2] << 8)));
9229                         continue;
9230                 }
9231
9232                 if (val != 0x90)
9233                         goto out_not_found;
9234
9235                 block_end = (i + 3 +
9236                              (vpd_data[i + 1] +
9237                               (vpd_data[i + 2] << 8)));
9238                 i += 3;
9239                 while (i < block_end) {
9240                         if (vpd_data[i + 0] == 'P' &&
9241                             vpd_data[i + 1] == 'N') {
9242                                 int partno_len = vpd_data[i + 2];
9243
9244                                 if (partno_len > 24)
9245                                         goto out_not_found;
9246
9247                                 memcpy(tp->board_part_number,
9248                                        &vpd_data[i + 3],
9249                                        partno_len);
9250
9251                                 /* Success. */
9252                                 return;
9253                         }
9254                 }
9255
9256                 /* Part number not found. */
9257                 goto out_not_found;
9258         }
9259
9260 out_not_found:
9261         strcpy(tp->board_part_number, "none");
9262 }
9263
9264 #ifdef CONFIG_SPARC64
9265 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9266 {
9267         struct pci_dev *pdev = tp->pdev;
9268         struct pcidev_cookie *pcp = pdev->sysdata;
9269
9270         if (pcp != NULL) {
9271                 int node = pcp->prom_node;
9272                 u32 venid;
9273                 int err;
9274
9275                 err = prom_getproperty(node, "subsystem-vendor-id",
9276                                        (char *) &venid, sizeof(venid));
9277                 if (err == 0 || err == -1)
9278                         return 0;
9279                 if (venid == PCI_VENDOR_ID_SUN)
9280                         return 1;
9281         }
9282         return 0;
9283 }
9284 #endif
9285
9286 static int __devinit tg3_get_invariants(struct tg3 *tp)
9287 {
9288         static struct pci_device_id write_reorder_chipsets[] = {
9289                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9290                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9291                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9292                              PCI_DEVICE_ID_VIA_8385_0) },
9293                 { },
9294         };
9295         u32 misc_ctrl_reg;
9296         u32 cacheline_sz_reg;
9297         u32 pci_state_reg, grc_misc_cfg;
9298         u32 val;
9299         u16 pci_cmd;
9300         int err;
9301
9302 #ifdef CONFIG_SPARC64
9303         if (tg3_is_sun_570X(tp))
9304                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9305 #endif
9306
9307         /* Force memory write invalidate off.  If we leave it on,
9308          * then on 5700_BX chips we have to enable a workaround.
9309          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9310          * to match the cacheline size.  The Broadcom driver have this
9311          * workaround but turns MWI off all the times so never uses
9312          * it.  This seems to suggest that the workaround is insufficient.
9313          */
9314         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9315         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9316         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9317
9318         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9319          * has the register indirect write enable bit set before
9320          * we try to access any of the MMIO registers.  It is also
9321          * critical that the PCI-X hw workaround situation is decided
9322          * before that as well.
9323          */
9324         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9325                               &misc_ctrl_reg);
9326
9327         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9328                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9329
9330         /* Wrong chip ID in 5752 A0. This code can be removed later
9331          * as A0 is not in production.
9332          */
9333         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9334                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9335
9336         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9337          * we need to disable memory and use config. cycles
9338          * only to access all registers. The 5702/03 chips
9339          * can mistakenly decode the special cycles from the
9340          * ICH chipsets as memory write cycles, causing corruption
9341          * of register and memory space. Only certain ICH bridges
9342          * will drive special cycles with non-zero data during the
9343          * address phase which can fall within the 5703's address
9344          * range. This is not an ICH bug as the PCI spec allows
9345          * non-zero address during special cycles. However, only
9346          * these ICH bridges are known to drive non-zero addresses
9347          * during special cycles.
9348          *
9349          * Since special cycles do not cross PCI bridges, we only
9350          * enable this workaround if the 5703 is on the secondary
9351          * bus of these ICH bridges.
9352          */
9353         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9354             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9355                 static struct tg3_dev_id {
9356                         u32     vendor;
9357                         u32     device;
9358                         u32     rev;
9359                 } ich_chipsets[] = {
9360                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9361                           PCI_ANY_ID },
9362                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9363                           PCI_ANY_ID },
9364                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9365                           0xa },
9366                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9367                           PCI_ANY_ID },
9368                         { },
9369                 };
9370                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9371                 struct pci_dev *bridge = NULL;
9372
9373                 while (pci_id->vendor != 0) {
9374                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9375                                                 bridge);
9376                         if (!bridge) {
9377                                 pci_id++;
9378                                 continue;
9379                         }
9380                         if (pci_id->rev != PCI_ANY_ID) {
9381                                 u8 rev;
9382
9383                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9384                                                      &rev);
9385                                 if (rev > pci_id->rev)
9386                                         continue;
9387                         }
9388                         if (bridge->subordinate &&
9389                             (bridge->subordinate->number ==
9390                              tp->pdev->bus->number)) {
9391
9392                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9393                                 pci_dev_put(bridge);
9394                                 break;
9395                         }
9396                 }
9397         }
9398
9399         /* Find msi capability. */
9400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9401             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9402                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9403                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9404         }
9405
9406         /* Initialize misc host control in PCI block. */
9407         tp->misc_host_ctrl |= (misc_ctrl_reg &
9408                                MISC_HOST_CTRL_CHIPREV);
9409         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9410                                tp->misc_host_ctrl);
9411
9412         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9413                               &cacheline_sz_reg);
9414
9415         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9416         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9417         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9418         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9419
9420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9422             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9423                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9424
9425         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9426             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9427                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9428
9429         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9430                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9431
9432         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9433             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9434             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9435                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9436
9437         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9438                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9439
9440         /* If we have an AMD 762 or VIA K8T800 chipset, write
9441          * reordering to the mailbox registers done by the host
9442          * controller can cause major troubles.  We read back from
9443          * every mailbox register write to force the writes to be
9444          * posted to the chip in order.
9445          */
9446         if (pci_dev_present(write_reorder_chipsets) &&
9447             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9448                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9449
9450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9451             tp->pci_lat_timer < 64) {
9452                 tp->pci_lat_timer = 64;
9453
9454                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9455                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9456                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9457                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9458
9459                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9460                                        cacheline_sz_reg);
9461         }
9462
9463         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9464                               &pci_state_reg);
9465
9466         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9467                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9468
9469                 /* If this is a 5700 BX chipset, and we are in PCI-X
9470                  * mode, enable register write workaround.
9471                  *
9472                  * The workaround is to use indirect register accesses
9473                  * for all chip writes not to mailbox registers.
9474                  */
9475                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9476                         u32 pm_reg;
9477                         u16 pci_cmd;
9478
9479                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9480
9481                         /* The chip can have it's power management PCI config
9482                          * space registers clobbered due to this bug.
9483                          * So explicitly force the chip into D0 here.
9484                          */
9485                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9486                                               &pm_reg);
9487                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9488                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9489                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9490                                                pm_reg);
9491
9492                         /* Also, force SERR#/PERR# in PCI command. */
9493                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9494                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9495                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9496                 }
9497         }
9498
9499         /* 5700 BX chips need to have their TX producer index mailboxes
9500          * written twice to workaround a bug.
9501          */
9502         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9503                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9504
9505         /* Back to back register writes can cause problems on this chip,
9506          * the workaround is to read back all reg writes except those to
9507          * mailbox regs.  See tg3_write_indirect_reg32().
9508          *
9509          * PCI Express 5750_A0 rev chips need this workaround too.
9510          */
9511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9512             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9513              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9514                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9515
9516         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9517                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9518         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9519                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9520
9521         /* Chip-specific fixup from Broadcom driver */
9522         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9523             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9524                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9525                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9526         }
9527
9528         /* Default fast path register access methods */
9529         tp->read32 = tg3_read32;
9530         tp->write32 = tg3_write32;
9531         tp->read32_mbox = tg3_read32;
9532         tp->write32_mbox = tg3_write32;
9533         tp->write32_tx_mbox = tg3_write32;
9534         tp->write32_rx_mbox = tg3_write32;
9535
9536         /* Various workaround register access methods */
9537         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9538                 tp->write32 = tg3_write_indirect_reg32;
9539         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9540                 tp->write32 = tg3_write_flush_reg32;
9541
9542         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9543             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9544                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9545                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9546                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9547         }
9548
9549         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9550                 tp->read32 = tg3_read_indirect_reg32;
9551                 tp->write32 = tg3_write_indirect_reg32;
9552                 tp->read32_mbox = tg3_read_indirect_mbox;
9553                 tp->write32_mbox = tg3_write_indirect_mbox;
9554                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9555                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9556
9557                 iounmap(tp->regs);
9558                 tp->regs = NULL;
9559
9560                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9561                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9562                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9563         }
9564
9565         /* Get eeprom hw config before calling tg3_set_power_state().
9566          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9567          * determined before calling tg3_set_power_state() so that
9568          * we know whether or not to switch out of Vaux power.
9569          * When the flag is set, it means that GPIO1 is used for eeprom
9570          * write protect and also implies that it is a LOM where GPIOs
9571          * are not used to switch power.
9572          */ 
9573         tg3_get_eeprom_hw_cfg(tp);
9574
9575         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9576          * GPIO1 driven high will bring 5700's external PHY out of reset.
9577          * It is also used as eeprom write protect on LOMs.
9578          */
9579         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9580         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9581             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9582                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9583                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9584         /* Unused GPIO3 must be driven as output on 5752 because there
9585          * are no pull-up resistors on unused GPIO pins.
9586          */
9587         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9588                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9589
9590         /* Force the chip into D0. */
9591         err = tg3_set_power_state(tp, 0);
9592         if (err) {
9593                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9594                        pci_name(tp->pdev));
9595                 return err;
9596         }
9597
9598         /* 5700 B0 chips do not support checksumming correctly due
9599          * to hardware bugs.
9600          */
9601         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9602                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9603
9604         /* Pseudo-header checksum is done by hardware logic and not
9605          * the offload processers, so make the chip do the pseudo-
9606          * header checksums on receive.  For transmit it is more
9607          * convenient to do the pseudo-header checksum in software
9608          * as Linux does that on transmit for us in all cases.
9609          */
9610         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9611         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9612
9613         /* Derive initial jumbo mode from MTU assigned in
9614          * ether_setup() via the alloc_etherdev() call
9615          */
9616         if (tp->dev->mtu > ETH_DATA_LEN &&
9617             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9618                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9619
9620         /* Determine WakeOnLan speed to use. */
9621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9622             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9623             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9624             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9625                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9626         } else {
9627                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9628         }
9629
9630         /* A few boards don't want Ethernet@WireSpeed phy feature */
9631         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9632             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9633              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9634              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9635             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9636                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9637
9638         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9639             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9640                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9641         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9642                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9643
9644         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9645                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9646
9647         tp->coalesce_mode = 0;
9648         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9649             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9650                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9651
9652         /* Initialize MAC MI mode, polling disabled. */
9653         tw32_f(MAC_MI_MODE, tp->mi_mode);
9654         udelay(80);
9655
9656         /* Initialize data/descriptor byte/word swapping. */
9657         val = tr32(GRC_MODE);
9658         val &= GRC_MODE_HOST_STACKUP;
9659         tw32(GRC_MODE, val | tp->grc_mode);
9660
9661         tg3_switch_clocks(tp);
9662
9663         /* Clear this out for sanity. */
9664         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9665
9666         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9667                               &pci_state_reg);
9668         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9669             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9670                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9671
9672                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9673                     chiprevid == CHIPREV_ID_5701_B0 ||
9674                     chiprevid == CHIPREV_ID_5701_B2 ||
9675                     chiprevid == CHIPREV_ID_5701_B5) {
9676                         void __iomem *sram_base;
9677
9678                         /* Write some dummy words into the SRAM status block
9679                          * area, see if it reads back correctly.  If the return
9680                          * value is bad, force enable the PCIX workaround.
9681                          */
9682                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9683
9684                         writel(0x00000000, sram_base);
9685                         writel(0x00000000, sram_base + 4);
9686                         writel(0xffffffff, sram_base + 4);
9687                         if (readl(sram_base) != 0x00000000)
9688                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9689                 }
9690         }
9691
9692         udelay(50);
9693         tg3_nvram_init(tp);
9694
9695         grc_misc_cfg = tr32(GRC_MISC_CFG);
9696         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9697
9698         /* Broadcom's driver says that CIOBE multisplit has a bug */
9699 #if 0
9700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9701             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9702                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9703                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9704         }
9705 #endif
9706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9707             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9708              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9709                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9710
9711         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9712             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9713                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9714         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9715                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9716                                       HOSTCC_MODE_CLRTICK_TXBD);
9717
9718                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9719                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9720                                        tp->misc_host_ctrl);
9721         }
9722
9723         /* these are limited to 10/100 only */
9724         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9725              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9726             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9727              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9728              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9729               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9730               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9731             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9732              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9733               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9734                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9735
9736         err = tg3_phy_probe(tp);
9737         if (err) {
9738                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9739                        pci_name(tp->pdev), err);
9740                 /* ... but do not return immediately ... */
9741         }
9742
9743         tg3_read_partno(tp);
9744
9745         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9746                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9747         } else {
9748                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9749                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9750                 else
9751                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9752         }
9753
9754         /* 5700 {AX,BX} chips have a broken status block link
9755          * change bit implementation, so we must use the
9756          * status register in those cases.
9757          */
9758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9759                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9760         else
9761                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9762
9763         /* The led_ctrl is set during tg3_phy_probe, here we might
9764          * have to force the link status polling mechanism based
9765          * upon subsystem IDs.
9766          */
9767         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9768             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9769                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9770                                   TG3_FLAG_USE_LINKCHG_REG);
9771         }
9772
9773         /* For all SERDES we poll the MAC status register. */
9774         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9775                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9776         else
9777                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9778
9779         /* It seems all chips can get confused if TX buffers
9780          * straddle the 4GB address boundary in some cases.
9781          */
9782         tp->dev->hard_start_xmit = tg3_start_xmit;
9783
9784         tp->rx_offset = 2;
9785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9786             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9787                 tp->rx_offset = 0;
9788
9789         /* By default, disable wake-on-lan.  User can change this
9790          * using ETHTOOL_SWOL.
9791          */
9792         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9793
9794         return err;
9795 }
9796
9797 #ifdef CONFIG_SPARC64
9798 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9799 {
9800         struct net_device *dev = tp->dev;
9801         struct pci_dev *pdev = tp->pdev;
9802         struct pcidev_cookie *pcp = pdev->sysdata;
9803
9804         if (pcp != NULL) {
9805                 int node = pcp->prom_node;
9806
9807                 if (prom_getproplen(node, "local-mac-address") == 6) {
9808                         prom_getproperty(node, "local-mac-address",
9809                                          dev->dev_addr, 6);
9810                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9811                         return 0;
9812                 }
9813         }
9814         return -ENODEV;
9815 }
9816
9817 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9818 {
9819         struct net_device *dev = tp->dev;
9820
9821         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9822         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9823         return 0;
9824 }
9825 #endif
9826
9827 static int __devinit tg3_get_device_address(struct tg3 *tp)
9828 {
9829         struct net_device *dev = tp->dev;
9830         u32 hi, lo, mac_offset;
9831
9832 #ifdef CONFIG_SPARC64
9833         if (!tg3_get_macaddr_sparc(tp))
9834                 return 0;
9835 #endif
9836
9837         mac_offset = 0x7c;
9838         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9839              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9840             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9841                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9842                         mac_offset = 0xcc;
9843                 if (tg3_nvram_lock(tp))
9844                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9845                 else
9846                         tg3_nvram_unlock(tp);
9847         }
9848
9849         /* First try to get it from MAC address mailbox. */
9850         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9851         if ((hi >> 16) == 0x484b) {
9852                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9853                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9854
9855                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9856                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9857                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9858                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9859                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9860         }
9861         /* Next, try NVRAM. */
9862         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9863                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9864                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9865                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9866                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9867                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9868                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9869                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9870                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9871         }
9872         /* Finally just fetch it out of the MAC control regs. */
9873         else {
9874                 hi = tr32(MAC_ADDR_0_HIGH);
9875                 lo = tr32(MAC_ADDR_0_LOW);
9876
9877                 dev->dev_addr[5] = lo & 0xff;
9878                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9879                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9880                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9881                 dev->dev_addr[1] = hi & 0xff;
9882                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9883         }
9884
9885         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9886 #ifdef CONFIG_SPARC64
9887                 if (!tg3_get_default_macaddr_sparc(tp))
9888                         return 0;
9889 #endif
9890                 return -EINVAL;
9891         }
9892         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9893         return 0;
9894 }
9895
9896 #define BOUNDARY_SINGLE_CACHELINE       1
9897 #define BOUNDARY_MULTI_CACHELINE        2
9898
9899 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9900 {
9901         int cacheline_size;
9902         u8 byte;
9903         int goal;
9904
9905         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9906         if (byte == 0)
9907                 cacheline_size = 1024;
9908         else
9909                 cacheline_size = (int) byte * 4;
9910
9911         /* On 5703 and later chips, the boundary bits have no
9912          * effect.
9913          */
9914         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9915             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9916             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9917                 goto out;
9918
9919 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9920         goal = BOUNDARY_MULTI_CACHELINE;
9921 #else
9922 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9923         goal = BOUNDARY_SINGLE_CACHELINE;
9924 #else
9925         goal = 0;
9926 #endif
9927 #endif
9928
9929         if (!goal)
9930                 goto out;
9931
9932         /* PCI controllers on most RISC systems tend to disconnect
9933          * when a device tries to burst across a cache-line boundary.
9934          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9935          *
9936          * Unfortunately, for PCI-E there are only limited
9937          * write-side controls for this, and thus for reads
9938          * we will still get the disconnects.  We'll also waste
9939          * these PCI cycles for both read and write for chips
9940          * other than 5700 and 5701 which do not implement the
9941          * boundary bits.
9942          */
9943         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9944             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9945                 switch (cacheline_size) {
9946                 case 16:
9947                 case 32:
9948                 case 64:
9949                 case 128:
9950                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9951                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9952                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9953                         } else {
9954                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9955                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9956                         }
9957                         break;
9958
9959                 case 256:
9960                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9961                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9962                         break;
9963
9964                 default:
9965                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9966                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9967                         break;
9968                 };
9969         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9970                 switch (cacheline_size) {
9971                 case 16:
9972                 case 32:
9973                 case 64:
9974                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9975                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9976                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9977                                 break;
9978                         }
9979                         /* fallthrough */
9980                 case 128:
9981                 default:
9982                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9983                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9984                         break;
9985                 };
9986         } else {
9987                 switch (cacheline_size) {
9988                 case 16:
9989                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9990                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9991                                         DMA_RWCTRL_WRITE_BNDRY_16);
9992                                 break;
9993                         }
9994                         /* fallthrough */
9995                 case 32:
9996                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9997                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9998                                         DMA_RWCTRL_WRITE_BNDRY_32);
9999                                 break;
10000                         }
10001                         /* fallthrough */
10002                 case 64:
10003                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10004                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10005                                         DMA_RWCTRL_WRITE_BNDRY_64);
10006                                 break;
10007                         }
10008                         /* fallthrough */
10009                 case 128:
10010                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10011                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10012                                         DMA_RWCTRL_WRITE_BNDRY_128);
10013                                 break;
10014                         }
10015                         /* fallthrough */
10016                 case 256:
10017                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10018                                 DMA_RWCTRL_WRITE_BNDRY_256);
10019                         break;
10020                 case 512:
10021                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10022                                 DMA_RWCTRL_WRITE_BNDRY_512);
10023                         break;
10024                 case 1024:
10025                 default:
10026                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10027                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10028                         break;
10029                 };
10030         }
10031
10032 out:
10033         return val;
10034 }
10035
10036 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10037 {
10038         struct tg3_internal_buffer_desc test_desc;
10039         u32 sram_dma_descs;
10040         int i, ret;
10041
10042         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10043
10044         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10045         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10046         tw32(RDMAC_STATUS, 0);
10047         tw32(WDMAC_STATUS, 0);
10048
10049         tw32(BUFMGR_MODE, 0);
10050         tw32(FTQ_RESET, 0);
10051
10052         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10053         test_desc.addr_lo = buf_dma & 0xffffffff;
10054         test_desc.nic_mbuf = 0x00002100;
10055         test_desc.len = size;
10056
10057         /*
10058          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10059          * the *second* time the tg3 driver was getting loaded after an
10060          * initial scan.
10061          *
10062          * Broadcom tells me:
10063          *   ...the DMA engine is connected to the GRC block and a DMA
10064          *   reset may affect the GRC block in some unpredictable way...
10065          *   The behavior of resets to individual blocks has not been tested.
10066          *
10067          * Broadcom noted the GRC reset will also reset all sub-components.
10068          */
10069         if (to_device) {
10070                 test_desc.cqid_sqid = (13 << 8) | 2;
10071
10072                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10073                 udelay(40);
10074         } else {
10075                 test_desc.cqid_sqid = (16 << 8) | 7;
10076
10077                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10078                 udelay(40);
10079         }
10080         test_desc.flags = 0x00000005;
10081
10082         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10083                 u32 val;
10084
10085                 val = *(((u32 *)&test_desc) + i);
10086                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10087                                        sram_dma_descs + (i * sizeof(u32)));
10088                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10089         }
10090         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10091
10092         if (to_device) {
10093                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10094         } else {
10095                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10096         }
10097
10098         ret = -ENODEV;
10099         for (i = 0; i < 40; i++) {
10100                 u32 val;
10101
10102                 if (to_device)
10103                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10104                 else
10105                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10106                 if ((val & 0xffff) == sram_dma_descs) {
10107                         ret = 0;
10108                         break;
10109                 }
10110
10111                 udelay(100);
10112         }
10113
10114         return ret;
10115 }
10116
10117 #define TEST_BUFFER_SIZE        0x2000
10118
10119 static int __devinit tg3_test_dma(struct tg3 *tp)
10120 {
10121         dma_addr_t buf_dma;
10122         u32 *buf, saved_dma_rwctrl;
10123         int ret;
10124
10125         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10126         if (!buf) {
10127                 ret = -ENOMEM;
10128                 goto out_nofree;
10129         }
10130
10131         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10132                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10133
10134         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10135
10136         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10137                 /* DMA read watermark not used on PCIE */
10138                 tp->dma_rwctrl |= 0x00180000;
10139         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10140                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10141                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10142                         tp->dma_rwctrl |= 0x003f0000;
10143                 else
10144                         tp->dma_rwctrl |= 0x003f000f;
10145         } else {
10146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10147                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10148                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10149
10150                         if (ccval == 0x6 || ccval == 0x7)
10151                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10152
10153                         /* Set bit 23 to enable PCIX hw bug fix */
10154                         tp->dma_rwctrl |= 0x009f0000;
10155                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10156                         /* 5780 always in PCIX mode */
10157                         tp->dma_rwctrl |= 0x00144000;
10158                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10159                         /* 5714 always in PCIX mode */
10160                         tp->dma_rwctrl |= 0x00148000;
10161                 } else {
10162                         tp->dma_rwctrl |= 0x001b000f;
10163                 }
10164         }
10165
10166         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10167             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10168                 tp->dma_rwctrl &= 0xfffffff0;
10169
10170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10172                 /* Remove this if it causes problems for some boards. */
10173                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10174
10175                 /* On 5700/5701 chips, we need to set this bit.
10176                  * Otherwise the chip will issue cacheline transactions
10177                  * to streamable DMA memory with not all the byte
10178                  * enables turned on.  This is an error on several
10179                  * RISC PCI controllers, in particular sparc64.
10180                  *
10181                  * On 5703/5704 chips, this bit has been reassigned
10182                  * a different meaning.  In particular, it is used
10183                  * on those chips to enable a PCI-X workaround.
10184                  */
10185                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10186         }
10187
10188         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10189
10190 #if 0
10191         /* Unneeded, already done by tg3_get_invariants.  */
10192         tg3_switch_clocks(tp);
10193 #endif
10194
10195         ret = 0;
10196         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10197             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10198                 goto out;
10199
10200         /* It is best to perform DMA test with maximum write burst size
10201          * to expose the 5700/5701 write DMA bug.
10202          */
10203         saved_dma_rwctrl = tp->dma_rwctrl;
10204         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10205         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10206
10207         while (1) {
10208                 u32 *p = buf, i;
10209
10210                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10211                         p[i] = i;
10212
10213                 /* Send the buffer to the chip. */
10214                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10215                 if (ret) {
10216                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10217                         break;
10218                 }
10219
10220 #if 0
10221                 /* validate data reached card RAM correctly. */
10222                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10223                         u32 val;
10224                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10225                         if (le32_to_cpu(val) != p[i]) {
10226                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10227                                 /* ret = -ENODEV here? */
10228                         }
10229                         p[i] = 0;
10230                 }
10231 #endif
10232                 /* Now read it back. */
10233                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10234                 if (ret) {
10235                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10236
10237                         break;
10238                 }
10239
10240                 /* Verify it. */
10241                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10242                         if (p[i] == i)
10243                                 continue;
10244
10245                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10246                             DMA_RWCTRL_WRITE_BNDRY_16) {
10247                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10248                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10249                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10250                                 break;
10251                         } else {
10252                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10253                                 ret = -ENODEV;
10254                                 goto out;
10255                         }
10256                 }
10257
10258                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10259                         /* Success. */
10260                         ret = 0;
10261                         break;
10262                 }
10263         }
10264         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10265             DMA_RWCTRL_WRITE_BNDRY_16) {
10266                 static struct pci_device_id dma_wait_state_chipsets[] = {
10267                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10268                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10269                         { },
10270                 };
10271
10272                 /* DMA test passed without adjusting DMA boundary,
10273                  * now look for chipsets that are known to expose the
10274                  * DMA bug without failing the test.
10275                  */
10276                 if (pci_dev_present(dma_wait_state_chipsets)) {
10277                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10278                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10279                 }
10280                 else
10281                         /* Safe to use the calculated DMA boundary. */
10282                         tp->dma_rwctrl = saved_dma_rwctrl;
10283
10284                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10285         }
10286
10287 out:
10288         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10289 out_nofree:
10290         return ret;
10291 }
10292
10293 static void __devinit tg3_init_link_config(struct tg3 *tp)
10294 {
10295         tp->link_config.advertising =
10296                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10297                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10298                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10299                  ADVERTISED_Autoneg | ADVERTISED_MII);
10300         tp->link_config.speed = SPEED_INVALID;
10301         tp->link_config.duplex = DUPLEX_INVALID;
10302         tp->link_config.autoneg = AUTONEG_ENABLE;
10303         netif_carrier_off(tp->dev);
10304         tp->link_config.active_speed = SPEED_INVALID;
10305         tp->link_config.active_duplex = DUPLEX_INVALID;
10306         tp->link_config.phy_is_low_power = 0;
10307         tp->link_config.orig_speed = SPEED_INVALID;
10308         tp->link_config.orig_duplex = DUPLEX_INVALID;
10309         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10310 }
10311
10312 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10313 {
10314         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10315                 tp->bufmgr_config.mbuf_read_dma_low_water =
10316                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10317                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10318                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10319                 tp->bufmgr_config.mbuf_high_water =
10320                         DEFAULT_MB_HIGH_WATER_5705;
10321
10322                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10323                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10324                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10325                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10326                 tp->bufmgr_config.mbuf_high_water_jumbo =
10327                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10328         } else {
10329                 tp->bufmgr_config.mbuf_read_dma_low_water =
10330                         DEFAULT_MB_RDMA_LOW_WATER;
10331                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10332                         DEFAULT_MB_MACRX_LOW_WATER;
10333                 tp->bufmgr_config.mbuf_high_water =
10334                         DEFAULT_MB_HIGH_WATER;
10335
10336                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10337                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10338                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10339                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10340                 tp->bufmgr_config.mbuf_high_water_jumbo =
10341                         DEFAULT_MB_HIGH_WATER_JUMBO;
10342         }
10343
10344         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10345         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10346 }
10347
10348 static char * __devinit tg3_phy_string(struct tg3 *tp)
10349 {
10350         switch (tp->phy_id & PHY_ID_MASK) {
10351         case PHY_ID_BCM5400:    return "5400";
10352         case PHY_ID_BCM5401:    return "5401";
10353         case PHY_ID_BCM5411:    return "5411";
10354         case PHY_ID_BCM5701:    return "5701";
10355         case PHY_ID_BCM5703:    return "5703";
10356         case PHY_ID_BCM5704:    return "5704";
10357         case PHY_ID_BCM5705:    return "5705";
10358         case PHY_ID_BCM5750:    return "5750";
10359         case PHY_ID_BCM5752:    return "5752";
10360         case PHY_ID_BCM5714:    return "5714";
10361         case PHY_ID_BCM5780:    return "5780";
10362         case PHY_ID_BCM8002:    return "8002/serdes";
10363         case 0:                 return "serdes";
10364         default:                return "unknown";
10365         };
10366 }
10367
10368 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10369 {
10370         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10371                 strcpy(str, "PCI Express");
10372                 return str;
10373         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10374                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10375
10376                 strcpy(str, "PCIX:");
10377
10378                 if ((clock_ctrl == 7) ||
10379                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10380                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10381                         strcat(str, "133MHz");
10382                 else if (clock_ctrl == 0)
10383                         strcat(str, "33MHz");
10384                 else if (clock_ctrl == 2)
10385                         strcat(str, "50MHz");
10386                 else if (clock_ctrl == 4)
10387                         strcat(str, "66MHz");
10388                 else if (clock_ctrl == 6)
10389                         strcat(str, "100MHz");
10390                 else if (clock_ctrl == 7)
10391                         strcat(str, "133MHz");
10392         } else {
10393                 strcpy(str, "PCI:");
10394                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10395                         strcat(str, "66MHz");
10396                 else
10397                         strcat(str, "33MHz");
10398         }
10399         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10400                 strcat(str, ":32-bit");
10401         else
10402                 strcat(str, ":64-bit");
10403         return str;
10404 }
10405
10406 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10407 {
10408         struct pci_dev *peer;
10409         unsigned int func, devnr = tp->pdev->devfn & ~7;
10410
10411         for (func = 0; func < 8; func++) {
10412                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10413                 if (peer && peer != tp->pdev)
10414                         break;
10415                 pci_dev_put(peer);
10416         }
10417         if (!peer || peer == tp->pdev)
10418                 BUG();
10419
10420         /*
10421          * We don't need to keep the refcount elevated; there's no way
10422          * to remove one half of this device without removing the other
10423          */
10424         pci_dev_put(peer);
10425
10426         return peer;
10427 }
10428
10429 static void __devinit tg3_init_coal(struct tg3 *tp)
10430 {
10431         struct ethtool_coalesce *ec = &tp->coal;
10432
10433         memset(ec, 0, sizeof(*ec));
10434         ec->cmd = ETHTOOL_GCOALESCE;
10435         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10436         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10437         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10438         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10439         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10440         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10441         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10442         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10443         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10444
10445         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10446                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10447                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10448                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10449                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10450                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10451         }
10452
10453         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10454                 ec->rx_coalesce_usecs_irq = 0;
10455                 ec->tx_coalesce_usecs_irq = 0;
10456                 ec->stats_block_coalesce_usecs = 0;
10457         }
10458 }
10459
10460 static int __devinit tg3_init_one(struct pci_dev *pdev,
10461                                   const struct pci_device_id *ent)
10462 {
10463         static int tg3_version_printed = 0;
10464         unsigned long tg3reg_base, tg3reg_len;
10465         struct net_device *dev;
10466         struct tg3 *tp;
10467         int i, err, pci_using_dac, pm_cap;
10468         char str[40];
10469
10470         if (tg3_version_printed++ == 0)
10471                 printk(KERN_INFO "%s", version);
10472
10473         err = pci_enable_device(pdev);
10474         if (err) {
10475                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10476                        "aborting.\n");
10477                 return err;
10478         }
10479
10480         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10481                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10482                        "base address, aborting.\n");
10483                 err = -ENODEV;
10484                 goto err_out_disable_pdev;
10485         }
10486
10487         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10488         if (err) {
10489                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10490                        "aborting.\n");
10491                 goto err_out_disable_pdev;
10492         }
10493
10494         pci_set_master(pdev);
10495
10496         /* Find power-management capability. */
10497         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10498         if (pm_cap == 0) {
10499                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10500                        "aborting.\n");
10501                 err = -EIO;
10502                 goto err_out_free_res;
10503         }
10504
10505         /* Configure DMA attributes. */
10506         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10507         if (!err) {
10508                 pci_using_dac = 1;
10509                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10510                 if (err < 0) {
10511                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10512                                "for consistent allocations\n");
10513                         goto err_out_free_res;
10514                 }
10515         } else {
10516                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10517                 if (err) {
10518                         printk(KERN_ERR PFX "No usable DMA configuration, "
10519                                "aborting.\n");
10520                         goto err_out_free_res;
10521                 }
10522                 pci_using_dac = 0;
10523         }
10524
10525         tg3reg_base = pci_resource_start(pdev, 0);
10526         tg3reg_len = pci_resource_len(pdev, 0);
10527
10528         dev = alloc_etherdev(sizeof(*tp));
10529         if (!dev) {
10530                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10531                 err = -ENOMEM;
10532                 goto err_out_free_res;
10533         }
10534
10535         SET_MODULE_OWNER(dev);
10536         SET_NETDEV_DEV(dev, &pdev->dev);
10537
10538         if (pci_using_dac)
10539                 dev->features |= NETIF_F_HIGHDMA;
10540         dev->features |= NETIF_F_LLTX;
10541 #if TG3_VLAN_TAG_USED
10542         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10543         dev->vlan_rx_register = tg3_vlan_rx_register;
10544         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10545 #endif
10546
10547         tp = netdev_priv(dev);
10548         tp->pdev = pdev;
10549         tp->dev = dev;
10550         tp->pm_cap = pm_cap;
10551         tp->mac_mode = TG3_DEF_MAC_MODE;
10552         tp->rx_mode = TG3_DEF_RX_MODE;
10553         tp->tx_mode = TG3_DEF_TX_MODE;
10554         tp->mi_mode = MAC_MI_MODE_BASE;
10555         if (tg3_debug > 0)
10556                 tp->msg_enable = tg3_debug;
10557         else
10558                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10559
10560         /* The word/byte swap controls here control register access byte
10561          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10562          * setting below.
10563          */
10564         tp->misc_host_ctrl =
10565                 MISC_HOST_CTRL_MASK_PCI_INT |
10566                 MISC_HOST_CTRL_WORD_SWAP |
10567                 MISC_HOST_CTRL_INDIR_ACCESS |
10568                 MISC_HOST_CTRL_PCISTATE_RW;
10569
10570         /* The NONFRM (non-frame) byte/word swap controls take effect
10571          * on descriptor entries, anything which isn't packet data.
10572          *
10573          * The StrongARM chips on the board (one for tx, one for rx)
10574          * are running in big-endian mode.
10575          */
10576         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10577                         GRC_MODE_WSWAP_NONFRM_DATA);
10578 #ifdef __BIG_ENDIAN
10579         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10580 #endif
10581         spin_lock_init(&tp->lock);
10582         spin_lock_init(&tp->tx_lock);
10583         spin_lock_init(&tp->indirect_lock);
10584         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10585
10586         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10587         if (tp->regs == 0UL) {
10588                 printk(KERN_ERR PFX "Cannot map device registers, "
10589                        "aborting.\n");
10590                 err = -ENOMEM;
10591                 goto err_out_free_dev;
10592         }
10593
10594         tg3_init_link_config(tp);
10595
10596         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10597         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10598         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10599
10600         dev->open = tg3_open;
10601         dev->stop = tg3_close;
10602         dev->get_stats = tg3_get_stats;
10603         dev->set_multicast_list = tg3_set_rx_mode;
10604         dev->set_mac_address = tg3_set_mac_addr;
10605         dev->do_ioctl = tg3_ioctl;
10606         dev->tx_timeout = tg3_tx_timeout;
10607         dev->poll = tg3_poll;
10608         dev->ethtool_ops = &tg3_ethtool_ops;
10609         dev->weight = 64;
10610         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10611         dev->change_mtu = tg3_change_mtu;
10612         dev->irq = pdev->irq;
10613 #ifdef CONFIG_NET_POLL_CONTROLLER
10614         dev->poll_controller = tg3_poll_controller;
10615 #endif
10616
10617         err = tg3_get_invariants(tp);
10618         if (err) {
10619                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10620                        "aborting.\n");
10621                 goto err_out_iounmap;
10622         }
10623
10624         tg3_init_bufmgr_config(tp);
10625
10626 #if TG3_TSO_SUPPORT != 0
10627         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10628                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10629         }
10630         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10632             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10633             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10634                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10635         } else {
10636                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10637         }
10638
10639         /* TSO is off by default, user can enable using ethtool.  */
10640 #if 0
10641         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10642                 dev->features |= NETIF_F_TSO;
10643 #endif
10644
10645 #endif
10646
10647         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10648             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10649             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10650                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10651                 tp->rx_pending = 63;
10652         }
10653
10654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10655                 tp->pdev_peer = tg3_find_5704_peer(tp);
10656
10657         err = tg3_get_device_address(tp);
10658         if (err) {
10659                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10660                        "aborting.\n");
10661                 goto err_out_iounmap;
10662         }
10663
10664         /*
10665          * Reset chip in case UNDI or EFI driver did not shutdown
10666          * DMA self test will enable WDMAC and we'll see (spurious)
10667          * pending DMA on the PCI bus at that point.
10668          */
10669         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10670             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10671                 pci_save_state(tp->pdev);
10672                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10673                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10674         }
10675
10676         err = tg3_test_dma(tp);
10677         if (err) {
10678                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10679                 goto err_out_iounmap;
10680         }
10681
10682         /* Tigon3 can do ipv4 only... and some chips have buggy
10683          * checksumming.
10684          */
10685         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10686                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10687                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10688         } else
10689                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10690
10691         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10692                 dev->features &= ~NETIF_F_HIGHDMA;
10693
10694         /* flow control autonegotiation is default behavior */
10695         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10696
10697         tg3_init_coal(tp);
10698
10699         /* Now that we have fully setup the chip, save away a snapshot
10700          * of the PCI config space.  We need to restore this after
10701          * GRC_MISC_CFG core clock resets and some resume events.
10702          */
10703         pci_save_state(tp->pdev);
10704
10705         err = register_netdev(dev);
10706         if (err) {
10707                 printk(KERN_ERR PFX "Cannot register net device, "
10708                        "aborting.\n");
10709                 goto err_out_iounmap;
10710         }
10711
10712         pci_set_drvdata(pdev, dev);
10713
10714         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10715                dev->name,
10716                tp->board_part_number,
10717                tp->pci_chip_rev_id,
10718                tg3_phy_string(tp),
10719                tg3_bus_string(tp, str),
10720                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10721
10722         for (i = 0; i < 6; i++)
10723                 printk("%2.2x%c", dev->dev_addr[i],
10724                        i == 5 ? '\n' : ':');
10725
10726         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10727                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10728                "TSOcap[%d] \n",
10729                dev->name,
10730                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10731                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10732                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10733                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10734                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10735                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10736                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10737         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10738                dev->name, tp->dma_rwctrl);
10739
10740         return 0;
10741
10742 err_out_iounmap:
10743         if (tp->regs) {
10744                 iounmap(tp->regs);
10745                 tp->regs = NULL;
10746         }
10747
10748 err_out_free_dev:
10749         free_netdev(dev);
10750
10751 err_out_free_res:
10752         pci_release_regions(pdev);
10753
10754 err_out_disable_pdev:
10755         pci_disable_device(pdev);
10756         pci_set_drvdata(pdev, NULL);
10757         return err;
10758 }
10759
10760 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10761 {
10762         struct net_device *dev = pci_get_drvdata(pdev);
10763
10764         if (dev) {
10765                 struct tg3 *tp = netdev_priv(dev);
10766
10767                 unregister_netdev(dev);
10768                 if (tp->regs) {
10769                         iounmap(tp->regs);
10770                         tp->regs = NULL;
10771                 }
10772                 free_netdev(dev);
10773                 pci_release_regions(pdev);
10774                 pci_disable_device(pdev);
10775                 pci_set_drvdata(pdev, NULL);
10776         }
10777 }
10778
10779 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10780 {
10781         struct net_device *dev = pci_get_drvdata(pdev);
10782         struct tg3 *tp = netdev_priv(dev);
10783         int err;
10784
10785         if (!netif_running(dev))
10786                 return 0;
10787
10788         tg3_netif_stop(tp);
10789
10790         del_timer_sync(&tp->timer);
10791
10792         tg3_full_lock(tp, 1);
10793         tg3_disable_ints(tp);
10794         tg3_full_unlock(tp);
10795
10796         netif_device_detach(dev);
10797
10798         tg3_full_lock(tp, 0);
10799         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10800         tg3_full_unlock(tp);
10801
10802         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10803         if (err) {
10804                 tg3_full_lock(tp, 0);
10805
10806                 tg3_init_hw(tp);
10807
10808                 tp->timer.expires = jiffies + tp->timer_offset;
10809                 add_timer(&tp->timer);
10810
10811                 netif_device_attach(dev);
10812                 tg3_netif_start(tp);
10813
10814                 tg3_full_unlock(tp);
10815         }
10816
10817         return err;
10818 }
10819
10820 static int tg3_resume(struct pci_dev *pdev)
10821 {
10822         struct net_device *dev = pci_get_drvdata(pdev);
10823         struct tg3 *tp = netdev_priv(dev);
10824         int err;
10825
10826         if (!netif_running(dev))
10827                 return 0;
10828
10829         pci_restore_state(tp->pdev);
10830
10831         err = tg3_set_power_state(tp, 0);
10832         if (err)
10833                 return err;
10834
10835         netif_device_attach(dev);
10836
10837         tg3_full_lock(tp, 0);
10838
10839         tg3_init_hw(tp);
10840
10841         tp->timer.expires = jiffies + tp->timer_offset;
10842         add_timer(&tp->timer);
10843
10844         tg3_netif_start(tp);
10845
10846         tg3_full_unlock(tp);
10847
10848         return 0;
10849 }
10850
10851 static struct pci_driver tg3_driver = {
10852         .name           = DRV_MODULE_NAME,
10853         .id_table       = tg3_pci_tbl,
10854         .probe          = tg3_init_one,
10855         .remove         = __devexit_p(tg3_remove_one),
10856         .suspend        = tg3_suspend,
10857         .resume         = tg3_resume
10858 };
10859
10860 static int __init tg3_init(void)
10861 {
10862         return pci_module_init(&tg3_driver);
10863 }
10864
10865 static void __exit tg3_cleanup(void)
10866 {
10867         pci_unregister_driver(&tg3_driver);
10868 }
10869
10870 module_init(tg3_init);
10871 module_exit(tg3_cleanup);