]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Add tg3_poll_fw().
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
208         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
209         {}
210 };
211
212 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
213
214 static const struct {
215         const char string[ETH_GSTRING_LEN];
216 } ethtool_stats_keys[TG3_NUM_STATS] = {
217         { "rx_octets" },
218         { "rx_fragments" },
219         { "rx_ucast_packets" },
220         { "rx_mcast_packets" },
221         { "rx_bcast_packets" },
222         { "rx_fcs_errors" },
223         { "rx_align_errors" },
224         { "rx_xon_pause_rcvd" },
225         { "rx_xoff_pause_rcvd" },
226         { "rx_mac_ctrl_rcvd" },
227         { "rx_xoff_entered" },
228         { "rx_frame_too_long_errors" },
229         { "rx_jabbers" },
230         { "rx_undersize_packets" },
231         { "rx_in_length_errors" },
232         { "rx_out_length_errors" },
233         { "rx_64_or_less_octet_packets" },
234         { "rx_65_to_127_octet_packets" },
235         { "rx_128_to_255_octet_packets" },
236         { "rx_256_to_511_octet_packets" },
237         { "rx_512_to_1023_octet_packets" },
238         { "rx_1024_to_1522_octet_packets" },
239         { "rx_1523_to_2047_octet_packets" },
240         { "rx_2048_to_4095_octet_packets" },
241         { "rx_4096_to_8191_octet_packets" },
242         { "rx_8192_to_9022_octet_packets" },
243
244         { "tx_octets" },
245         { "tx_collisions" },
246
247         { "tx_xon_sent" },
248         { "tx_xoff_sent" },
249         { "tx_flow_control" },
250         { "tx_mac_errors" },
251         { "tx_single_collisions" },
252         { "tx_mult_collisions" },
253         { "tx_deferred" },
254         { "tx_excessive_collisions" },
255         { "tx_late_collisions" },
256         { "tx_collide_2times" },
257         { "tx_collide_3times" },
258         { "tx_collide_4times" },
259         { "tx_collide_5times" },
260         { "tx_collide_6times" },
261         { "tx_collide_7times" },
262         { "tx_collide_8times" },
263         { "tx_collide_9times" },
264         { "tx_collide_10times" },
265         { "tx_collide_11times" },
266         { "tx_collide_12times" },
267         { "tx_collide_13times" },
268         { "tx_collide_14times" },
269         { "tx_collide_15times" },
270         { "tx_ucast_packets" },
271         { "tx_mcast_packets" },
272         { "tx_bcast_packets" },
273         { "tx_carrier_sense_errors" },
274         { "tx_discards" },
275         { "tx_errors" },
276
277         { "dma_writeq_full" },
278         { "dma_write_prioq_full" },
279         { "rxbds_empty" },
280         { "rx_discards" },
281         { "rx_errors" },
282         { "rx_threshold_hit" },
283
284         { "dma_readq_full" },
285         { "dma_read_prioq_full" },
286         { "tx_comp_queue_full" },
287
288         { "ring_set_send_prod_index" },
289         { "ring_status_update" },
290         { "nic_irqs" },
291         { "nic_avoided_irqs" },
292         { "nic_tx_threshold_hit" }
293 };
294
295 static const struct {
296         const char string[ETH_GSTRING_LEN];
297 } ethtool_test_keys[TG3_NUM_TEST] = {
298         { "nvram test     (online) " },
299         { "link test      (online) " },
300         { "register test  (offline)" },
301         { "memory test    (offline)" },
302         { "loopback test  (offline)" },
303         { "interrupt test (offline)" },
304 };
305
306 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
307 {
308         writel(val, tp->regs + off);
309 }
310
311 static u32 tg3_read32(struct tg3 *tp, u32 off)
312 {
313         return (readl(tp->regs + off));
314 }
315
316 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
317 {
318         unsigned long flags;
319
320         spin_lock_irqsave(&tp->indirect_lock, flags);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
322         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
323         spin_unlock_irqrestore(&tp->indirect_lock, flags);
324 }
325
326 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
327 {
328         writel(val, tp->regs + off);
329         readl(tp->regs + off);
330 }
331
332 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
333 {
334         unsigned long flags;
335         u32 val;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341         return val;
342 }
343
344 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
349                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
350                                        TG3_64BIT_REG_LOW, val);
351                 return;
352         }
353         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
354                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
355                                        TG3_64BIT_REG_LOW, val);
356                 return;
357         }
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363
364         /* In indirect mode when disabling interrupts, we also need
365          * to clear the interrupt bit in the GRC local ctrl register.
366          */
367         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
368             (val == 0x1)) {
369                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
370                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
371         }
372 }
373
374 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
375 {
376         unsigned long flags;
377         u32 val;
378
379         spin_lock_irqsave(&tp->indirect_lock, flags);
380         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
381         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
382         spin_unlock_irqrestore(&tp->indirect_lock, flags);
383         return val;
384 }
385
386 /* usec_wait specifies the wait time in usec when writing to certain registers
387  * where it is unsafe to read back the register without some delay.
388  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
389  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
390  */
391 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
392 {
393         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
394             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
395                 /* Non-posted methods */
396                 tp->write32(tp, off, val);
397         else {
398                 /* Posted method */
399                 tg3_write32(tp, off, val);
400                 if (usec_wait)
401                         udelay(usec_wait);
402                 tp->read32(tp, off);
403         }
404         /* Wait again after the read for the posted method to guarantee that
405          * the wait time is met.
406          */
407         if (usec_wait)
408                 udelay(usec_wait);
409 }
410
411 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
412 {
413         tp->write32_mbox(tp, off, val);
414         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
415             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
416                 tp->read32_mbox(tp, off);
417 }
418
419 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
420 {
421         void __iomem *mbox = tp->regs + off;
422         writel(val, mbox);
423         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
424                 writel(val, mbox);
425         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
426                 readl(mbox);
427 }
428
429 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
430 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
431 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
432 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
433 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
434
435 #define tw32(reg,val)           tp->write32(tp, reg, val)
436 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
437 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
438 #define tr32(reg)               tp->read32(tp, reg)
439
440 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
441 {
442         unsigned long flags;
443
444         spin_lock_irqsave(&tp->indirect_lock, flags);
445         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
446                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
447                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
448
449                 /* Always leave this as zero. */
450                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
451         } else {
452                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
453                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
454
455                 /* Always leave this as zero. */
456                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
457         }
458         spin_unlock_irqrestore(&tp->indirect_lock, flags);
459 }
460
461 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
462 {
463         unsigned long flags;
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
467                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
468                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
469
470                 /* Always leave this as zero. */
471                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
472         } else {
473                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 *val = tr32(TG3PCI_MEM_WIN_DATA);
475
476                 /* Always leave this as zero. */
477                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         }
479         spin_unlock_irqrestore(&tp->indirect_lock, flags);
480 }
481
482 static void tg3_disable_ints(struct tg3 *tp)
483 {
484         tw32(TG3PCI_MISC_HOST_CTRL,
485              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
486         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
487 }
488
489 static inline void tg3_cond_int(struct tg3 *tp)
490 {
491         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
492             (tp->hw_status->status & SD_STATUS_UPDATED))
493                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
494 }
495
496 static void tg3_enable_ints(struct tg3 *tp)
497 {
498         tp->irq_sync = 0;
499         wmb();
500
501         tw32(TG3PCI_MISC_HOST_CTRL,
502              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
503         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
504                        (tp->last_tag << 24));
505         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
506                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
507                                (tp->last_tag << 24));
508         tg3_cond_int(tp);
509 }
510
511 static inline unsigned int tg3_has_work(struct tg3 *tp)
512 {
513         struct tg3_hw_status *sblk = tp->hw_status;
514         unsigned int work_exists = 0;
515
516         /* check for phy events */
517         if (!(tp->tg3_flags &
518               (TG3_FLAG_USE_LINKCHG_REG |
519                TG3_FLAG_POLL_SERDES))) {
520                 if (sblk->status & SD_STATUS_LINK_CHG)
521                         work_exists = 1;
522         }
523         /* check for RX/TX work to do */
524         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
525             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
526                 work_exists = 1;
527
528         return work_exists;
529 }
530
531 /* tg3_restart_ints
532  *  similar to tg3_enable_ints, but it accurately determines whether there
533  *  is new work pending and can return without flushing the PIO write
534  *  which reenables interrupts
535  */
536 static void tg3_restart_ints(struct tg3 *tp)
537 {
538         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
539                      tp->last_tag << 24);
540         mmiowb();
541
542         /* When doing tagged status, this work check is unnecessary.
543          * The last_tag we write above tells the chip which piece of
544          * work we've completed.
545          */
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             tg3_has_work(tp))
548                 tw32(HOSTCC_MODE, tp->coalesce_mode |
549                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
550 }
551
552 static inline void tg3_netif_stop(struct tg3 *tp)
553 {
554         tp->dev->trans_start = jiffies; /* prevent tx timeout */
555         netif_poll_disable(tp->dev);
556         netif_tx_disable(tp->dev);
557 }
558
559 static inline void tg3_netif_start(struct tg3 *tp)
560 {
561         netif_wake_queue(tp->dev);
562         /* NOTE: unconditional netif_wake_queue is only appropriate
563          * so long as all callers are assured to have free tx slots
564          * (such as after tg3_init_hw)
565          */
566         netif_poll_enable(tp->dev);
567         tp->hw_status->status |= SD_STATUS_UPDATED;
568         tg3_enable_ints(tp);
569 }
570
571 static void tg3_switch_clocks(struct tg3 *tp)
572 {
573         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
574         u32 orig_clock_ctrl;
575
576         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
577                 return;
578
579         orig_clock_ctrl = clock_ctrl;
580         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
581                        CLOCK_CTRL_CLKRUN_OENABLE |
582                        0x1f);
583         tp->pci_clock_ctrl = clock_ctrl;
584
585         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
586                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
587                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
588                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
589                 }
590         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
591                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
592                             clock_ctrl |
593                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
594                             40);
595                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
596                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
597                             40);
598         }
599         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
600 }
601
602 #define PHY_BUSY_LOOPS  5000
603
604 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
605 {
606         u32 frame_val;
607         unsigned int loops;
608         int ret;
609
610         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
611                 tw32_f(MAC_MI_MODE,
612                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
613                 udelay(80);
614         }
615
616         *val = 0x0;
617
618         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
619                       MI_COM_PHY_ADDR_MASK);
620         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
621                       MI_COM_REG_ADDR_MASK);
622         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
623
624         tw32_f(MAC_MI_COM, frame_val);
625
626         loops = PHY_BUSY_LOOPS;
627         while (loops != 0) {
628                 udelay(10);
629                 frame_val = tr32(MAC_MI_COM);
630
631                 if ((frame_val & MI_COM_BUSY) == 0) {
632                         udelay(5);
633                         frame_val = tr32(MAC_MI_COM);
634                         break;
635                 }
636                 loops -= 1;
637         }
638
639         ret = -EBUSY;
640         if (loops != 0) {
641                 *val = frame_val & MI_COM_DATA_MASK;
642                 ret = 0;
643         }
644
645         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
646                 tw32_f(MAC_MI_MODE, tp->mi_mode);
647                 udelay(80);
648         }
649
650         return ret;
651 }
652
653 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
654 {
655         u32 frame_val;
656         unsigned int loops;
657         int ret;
658
659         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
660                 tw32_f(MAC_MI_MODE,
661                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
662                 udelay(80);
663         }
664
665         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
666                       MI_COM_PHY_ADDR_MASK);
667         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
668                       MI_COM_REG_ADDR_MASK);
669         frame_val |= (val & MI_COM_DATA_MASK);
670         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
671
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678                 if ((frame_val & MI_COM_BUSY) == 0) {
679                         udelay(5);
680                         frame_val = tr32(MAC_MI_COM);
681                         break;
682                 }
683                 loops -= 1;
684         }
685
686         ret = -EBUSY;
687         if (loops != 0)
688                 ret = 0;
689
690         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
691                 tw32_f(MAC_MI_MODE, tp->mi_mode);
692                 udelay(80);
693         }
694
695         return ret;
696 }
697
698 static void tg3_phy_set_wirespeed(struct tg3 *tp)
699 {
700         u32 val;
701
702         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
703                 return;
704
705         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
706             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
707                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
708                              (val | (1 << 15) | (1 << 4)));
709 }
710
711 static int tg3_bmcr_reset(struct tg3 *tp)
712 {
713         u32 phy_control;
714         int limit, err;
715
716         /* OK, reset it, and poll the BMCR_RESET bit until it
717          * clears or we time out.
718          */
719         phy_control = BMCR_RESET;
720         err = tg3_writephy(tp, MII_BMCR, phy_control);
721         if (err != 0)
722                 return -EBUSY;
723
724         limit = 5000;
725         while (limit--) {
726                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
727                 if (err != 0)
728                         return -EBUSY;
729
730                 if ((phy_control & BMCR_RESET) == 0) {
731                         udelay(40);
732                         break;
733                 }
734                 udelay(10);
735         }
736         if (limit <= 0)
737                 return -EBUSY;
738
739         return 0;
740 }
741
742 static int tg3_wait_macro_done(struct tg3 *tp)
743 {
744         int limit = 100;
745
746         while (limit--) {
747                 u32 tmp32;
748
749                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
750                         if ((tmp32 & 0x1000) == 0)
751                                 break;
752                 }
753         }
754         if (limit <= 0)
755                 return -EBUSY;
756
757         return 0;
758 }
759
760 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
761 {
762         static const u32 test_pat[4][6] = {
763         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
764         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
765         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
766         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
767         };
768         int chan;
769
770         for (chan = 0; chan < 4; chan++) {
771                 int i;
772
773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
774                              (chan * 0x2000) | 0x0200);
775                 tg3_writephy(tp, 0x16, 0x0002);
776
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
779                                      test_pat[chan][i]);
780
781                 tg3_writephy(tp, 0x16, 0x0202);
782                 if (tg3_wait_macro_done(tp)) {
783                         *resetp = 1;
784                         return -EBUSY;
785                 }
786
787                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
788                              (chan * 0x2000) | 0x0200);
789                 tg3_writephy(tp, 0x16, 0x0082);
790                 if (tg3_wait_macro_done(tp)) {
791                         *resetp = 1;
792                         return -EBUSY;
793                 }
794
795                 tg3_writephy(tp, 0x16, 0x0802);
796                 if (tg3_wait_macro_done(tp)) {
797                         *resetp = 1;
798                         return -EBUSY;
799                 }
800
801                 for (i = 0; i < 6; i += 2) {
802                         u32 low, high;
803
804                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
805                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
806                             tg3_wait_macro_done(tp)) {
807                                 *resetp = 1;
808                                 return -EBUSY;
809                         }
810                         low &= 0x7fff;
811                         high &= 0x000f;
812                         if (low != test_pat[chan][i] ||
813                             high != test_pat[chan][i+1]) {
814                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
815                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
816                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
817
818                                 return -EBUSY;
819                         }
820                 }
821         }
822
823         return 0;
824 }
825
826 static int tg3_phy_reset_chanpat(struct tg3 *tp)
827 {
828         int chan;
829
830         for (chan = 0; chan < 4; chan++) {
831                 int i;
832
833                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
834                              (chan * 0x2000) | 0x0200);
835                 tg3_writephy(tp, 0x16, 0x0002);
836                 for (i = 0; i < 6; i++)
837                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
838                 tg3_writephy(tp, 0x16, 0x0202);
839                 if (tg3_wait_macro_done(tp))
840                         return -EBUSY;
841         }
842
843         return 0;
844 }
845
846 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
847 {
848         u32 reg32, phy9_orig;
849         int retries, do_phy_reset, err;
850
851         retries = 10;
852         do_phy_reset = 1;
853         do {
854                 if (do_phy_reset) {
855                         err = tg3_bmcr_reset(tp);
856                         if (err)
857                                 return err;
858                         do_phy_reset = 0;
859                 }
860
861                 /* Disable transmitter and interrupt.  */
862                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
863                         continue;
864
865                 reg32 |= 0x3000;
866                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
867
868                 /* Set full-duplex, 1000 mbps.  */
869                 tg3_writephy(tp, MII_BMCR,
870                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
871
872                 /* Set to master mode.  */
873                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
874                         continue;
875
876                 tg3_writephy(tp, MII_TG3_CTRL,
877                              (MII_TG3_CTRL_AS_MASTER |
878                               MII_TG3_CTRL_ENABLE_AS_MASTER));
879
880                 /* Enable SM_DSP_CLOCK and 6dB.  */
881                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
882
883                 /* Block the PHY control access.  */
884                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
885                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
886
887                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
888                 if (!err)
889                         break;
890         } while (--retries);
891
892         err = tg3_phy_reset_chanpat(tp);
893         if (err)
894                 return err;
895
896         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
898
899         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
900         tg3_writephy(tp, 0x16, 0x0000);
901
902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
904                 /* Set Extended packet length bit for jumbo frames */
905                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
906         }
907         else {
908                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
909         }
910
911         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
912
913         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
914                 reg32 &= ~0x3000;
915                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
916         } else if (!err)
917                 err = -EBUSY;
918
919         return err;
920 }
921
922 static void tg3_link_report(struct tg3 *);
923
924 /* This will reset the tigon3 PHY if there is no valid
925  * link unless the FORCE argument is non-zero.
926  */
927 static int tg3_phy_reset(struct tg3 *tp)
928 {
929         u32 phy_status;
930         int err;
931
932         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
933         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
934         if (err != 0)
935                 return -EBUSY;
936
937         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
938                 netif_carrier_off(tp->dev);
939                 tg3_link_report(tp);
940         }
941
942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
945                 err = tg3_phy_reset_5703_4_5(tp);
946                 if (err)
947                         return err;
948                 goto out;
949         }
950
951         err = tg3_bmcr_reset(tp);
952         if (err)
953                 return err;
954
955 out:
956         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
957                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
960                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
961                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
963         }
964         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
965                 tg3_writephy(tp, 0x1c, 0x8d68);
966                 tg3_writephy(tp, 0x1c, 0x8d68);
967         }
968         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
969                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
974                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
975                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
976                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
977         }
978         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
979                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
983         }
984         /* Set Extended packet length bit (bit 14) on all chips that */
985         /* support jumbo frames */
986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
987                 /* Cannot do read-modify-write on 5401 */
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
989         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
990                 u32 phy_reg;
991
992                 /* Set bit 14 with read-modify-write to preserve other bits */
993                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
994                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
996         }
997
998         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999          * jumbo frames transmission.
1000          */
1001         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1002                 u32 phy_reg;
1003
1004                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1005                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1007         }
1008
1009         tg3_phy_set_wirespeed(tp);
1010         return 0;
1011 }
1012
1013 static void tg3_frob_aux_power(struct tg3 *tp)
1014 {
1015         struct tg3 *tp_peer = tp;
1016
1017         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018                 return;
1019
1020         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1021             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1022                 struct net_device *dev_peer;
1023
1024                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1025                 /* remove_one() may have been run on the peer. */
1026                 if (!dev_peer)
1027                         tp_peer = tp;
1028                 else
1029                         tp_peer = netdev_priv(dev_peer);
1030         }
1031
1032         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1034             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1035             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1037                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1038                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1039                                     (GRC_LCLCTRL_GPIO_OE0 |
1040                                      GRC_LCLCTRL_GPIO_OE1 |
1041                                      GRC_LCLCTRL_GPIO_OE2 |
1042                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1043                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1044                                     100);
1045                 } else {
1046                         u32 no_gpio2;
1047                         u32 grc_local_ctrl = 0;
1048
1049                         if (tp_peer != tp &&
1050                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1051                                 return;
1052
1053                         /* Workaround to prevent overdrawing Amps. */
1054                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1055                             ASIC_REV_5714) {
1056                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1057                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1058                                             grc_local_ctrl, 100);
1059                         }
1060
1061                         /* On 5753 and variants, GPIO2 cannot be used. */
1062                         no_gpio2 = tp->nic_sram_data_cfg &
1063                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1064
1065                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1066                                          GRC_LCLCTRL_GPIO_OE1 |
1067                                          GRC_LCLCTRL_GPIO_OE2 |
1068                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1069                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1070                         if (no_gpio2) {
1071                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1072                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1073                         }
1074                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1075                                                     grc_local_ctrl, 100);
1076
1077                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1078
1079                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1080                                                     grc_local_ctrl, 100);
1081
1082                         if (!no_gpio2) {
1083                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1084                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1085                                             grc_local_ctrl, 100);
1086                         }
1087                 }
1088         } else {
1089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1090                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096                                     (GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1098
1099                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                     GRC_LCLCTRL_GPIO_OE1, 100);
1101
1102                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103                                     (GRC_LCLCTRL_GPIO_OE1 |
1104                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1105                 }
1106         }
1107 }
1108
1109 static int tg3_setup_phy(struct tg3 *, int);
1110
1111 #define RESET_KIND_SHUTDOWN     0
1112 #define RESET_KIND_INIT         1
1113 #define RESET_KIND_SUSPEND      2
1114
1115 static void tg3_write_sig_post_reset(struct tg3 *, int);
1116 static int tg3_halt_cpu(struct tg3 *, u32);
1117 static int tg3_nvram_lock(struct tg3 *);
1118 static void tg3_nvram_unlock(struct tg3 *);
1119
1120 static void tg3_power_down_phy(struct tg3 *tp)
1121 {
1122         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1123                 return;
1124
1125         tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1126         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1127
1128         /* The PHY should not be powered down on some chips because
1129          * of bugs.
1130          */
1131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1133             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1134              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1135                 return;
1136         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1137 }
1138
1139 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1140 {
1141         u32 misc_host_ctrl;
1142         u16 power_control, power_caps;
1143         int pm = tp->pm_cap;
1144
1145         /* Make sure register accesses (indirect or otherwise)
1146          * will function correctly.
1147          */
1148         pci_write_config_dword(tp->pdev,
1149                                TG3PCI_MISC_HOST_CTRL,
1150                                tp->misc_host_ctrl);
1151
1152         pci_read_config_word(tp->pdev,
1153                              pm + PCI_PM_CTRL,
1154                              &power_control);
1155         power_control |= PCI_PM_CTRL_PME_STATUS;
1156         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1157         switch (state) {
1158         case PCI_D0:
1159                 power_control |= 0;
1160                 pci_write_config_word(tp->pdev,
1161                                       pm + PCI_PM_CTRL,
1162                                       power_control);
1163                 udelay(100);    /* Delay after power state change */
1164
1165                 /* Switch out of Vaux if it is not a LOM */
1166                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1167                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1168
1169                 return 0;
1170
1171         case PCI_D1:
1172                 power_control |= 1;
1173                 break;
1174
1175         case PCI_D2:
1176                 power_control |= 2;
1177                 break;
1178
1179         case PCI_D3hot:
1180                 power_control |= 3;
1181                 break;
1182
1183         default:
1184                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1185                        "requested.\n",
1186                        tp->dev->name, state);
1187                 return -EINVAL;
1188         };
1189
1190         power_control |= PCI_PM_CTRL_PME_ENABLE;
1191
1192         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1193         tw32(TG3PCI_MISC_HOST_CTRL,
1194              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1195
1196         if (tp->link_config.phy_is_low_power == 0) {
1197                 tp->link_config.phy_is_low_power = 1;
1198                 tp->link_config.orig_speed = tp->link_config.speed;
1199                 tp->link_config.orig_duplex = tp->link_config.duplex;
1200                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1201         }
1202
1203         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1204                 tp->link_config.speed = SPEED_10;
1205                 tp->link_config.duplex = DUPLEX_HALF;
1206                 tp->link_config.autoneg = AUTONEG_ENABLE;
1207                 tg3_setup_phy(tp, 0);
1208         }
1209
1210         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1211                 int i;
1212                 u32 val;
1213
1214                 for (i = 0; i < 200; i++) {
1215                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1216                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1217                                 break;
1218                         msleep(1);
1219                 }
1220         }
1221         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1222                                              WOL_DRV_STATE_SHUTDOWN |
1223                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1224
1225         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1226
1227         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1228                 u32 mac_mode;
1229
1230                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1231                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1232                         udelay(40);
1233
1234                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1235                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1236                         else
1237                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1238
1239                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1240                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1241                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1242                 } else {
1243                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1244                 }
1245
1246                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1247                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1248
1249                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1250                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1251                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1252
1253                 tw32_f(MAC_MODE, mac_mode);
1254                 udelay(100);
1255
1256                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1257                 udelay(10);
1258         }
1259
1260         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1261             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1262              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1263                 u32 base_val;
1264
1265                 base_val = tp->pci_clock_ctrl;
1266                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1267                              CLOCK_CTRL_TXCLK_DISABLE);
1268
1269                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1270                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1271         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1272                 /* do nothing */
1273         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1274                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1275                 u32 newbits1, newbits2;
1276
1277                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1278                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1279                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1280                                     CLOCK_CTRL_TXCLK_DISABLE |
1281                                     CLOCK_CTRL_ALTCLK);
1282                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1283                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1284                         newbits1 = CLOCK_CTRL_625_CORE;
1285                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1286                 } else {
1287                         newbits1 = CLOCK_CTRL_ALTCLK;
1288                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1289                 }
1290
1291                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1292                             40);
1293
1294                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1295                             40);
1296
1297                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1298                         u32 newbits3;
1299
1300                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1301                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1302                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1303                                             CLOCK_CTRL_TXCLK_DISABLE |
1304                                             CLOCK_CTRL_44MHZ_CORE);
1305                         } else {
1306                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1307                         }
1308
1309                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1310                                     tp->pci_clock_ctrl | newbits3, 40);
1311                 }
1312         }
1313
1314         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1315             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1316                 tg3_power_down_phy(tp);
1317
1318         tg3_frob_aux_power(tp);
1319
1320         /* Workaround for unstable PLL clock */
1321         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1322             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1323                 u32 val = tr32(0x7d00);
1324
1325                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326                 tw32(0x7d00, val);
1327                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328                         int err;
1329
1330                         err = tg3_nvram_lock(tp);
1331                         tg3_halt_cpu(tp, RX_CPU_BASE);
1332                         if (!err)
1333                                 tg3_nvram_unlock(tp);
1334                 }
1335         }
1336
1337         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1338
1339         /* Finally, set the new power state. */
1340         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1341         udelay(100);    /* Delay after power state change */
1342
1343         return 0;
1344 }
1345
1346 static void tg3_link_report(struct tg3 *tp)
1347 {
1348         if (!netif_carrier_ok(tp->dev)) {
1349                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1350         } else {
1351                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1352                        tp->dev->name,
1353                        (tp->link_config.active_speed == SPEED_1000 ?
1354                         1000 :
1355                         (tp->link_config.active_speed == SPEED_100 ?
1356                          100 : 10)),
1357                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1358                         "full" : "half"));
1359
1360                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1361                        "%s for RX.\n",
1362                        tp->dev->name,
1363                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1364                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1365         }
1366 }
1367
1368 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1369 {
1370         u32 new_tg3_flags = 0;
1371         u32 old_rx_mode = tp->rx_mode;
1372         u32 old_tx_mode = tp->tx_mode;
1373
1374         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1375
1376                 /* Convert 1000BaseX flow control bits to 1000BaseT
1377                  * bits before resolving flow control.
1378                  */
1379                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1380                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1381                                        ADVERTISE_PAUSE_ASYM);
1382                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1383
1384                         if (local_adv & ADVERTISE_1000XPAUSE)
1385                                 local_adv |= ADVERTISE_PAUSE_CAP;
1386                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1387                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1388                         if (remote_adv & LPA_1000XPAUSE)
1389                                 remote_adv |= LPA_PAUSE_CAP;
1390                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1391                                 remote_adv |= LPA_PAUSE_ASYM;
1392                 }
1393
1394                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1395                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1396                                 if (remote_adv & LPA_PAUSE_CAP)
1397                                         new_tg3_flags |=
1398                                                 (TG3_FLAG_RX_PAUSE |
1399                                                 TG3_FLAG_TX_PAUSE);
1400                                 else if (remote_adv & LPA_PAUSE_ASYM)
1401                                         new_tg3_flags |=
1402                                                 (TG3_FLAG_RX_PAUSE);
1403                         } else {
1404                                 if (remote_adv & LPA_PAUSE_CAP)
1405                                         new_tg3_flags |=
1406                                                 (TG3_FLAG_RX_PAUSE |
1407                                                 TG3_FLAG_TX_PAUSE);
1408                         }
1409                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410                         if ((remote_adv & LPA_PAUSE_CAP) &&
1411                         (remote_adv & LPA_PAUSE_ASYM))
1412                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1413                 }
1414
1415                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1416                 tp->tg3_flags |= new_tg3_flags;
1417         } else {
1418                 new_tg3_flags = tp->tg3_flags;
1419         }
1420
1421         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1422                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1423         else
1424                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1425
1426         if (old_rx_mode != tp->rx_mode) {
1427                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1428         }
1429
1430         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1431                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1432         else
1433                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1434
1435         if (old_tx_mode != tp->tx_mode) {
1436                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1437         }
1438 }
1439
1440 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1441 {
1442         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1443         case MII_TG3_AUX_STAT_10HALF:
1444                 *speed = SPEED_10;
1445                 *duplex = DUPLEX_HALF;
1446                 break;
1447
1448         case MII_TG3_AUX_STAT_10FULL:
1449                 *speed = SPEED_10;
1450                 *duplex = DUPLEX_FULL;
1451                 break;
1452
1453         case MII_TG3_AUX_STAT_100HALF:
1454                 *speed = SPEED_100;
1455                 *duplex = DUPLEX_HALF;
1456                 break;
1457
1458         case MII_TG3_AUX_STAT_100FULL:
1459                 *speed = SPEED_100;
1460                 *duplex = DUPLEX_FULL;
1461                 break;
1462
1463         case MII_TG3_AUX_STAT_1000HALF:
1464                 *speed = SPEED_1000;
1465                 *duplex = DUPLEX_HALF;
1466                 break;
1467
1468         case MII_TG3_AUX_STAT_1000FULL:
1469                 *speed = SPEED_1000;
1470                 *duplex = DUPLEX_FULL;
1471                 break;
1472
1473         default:
1474                 *speed = SPEED_INVALID;
1475                 *duplex = DUPLEX_INVALID;
1476                 break;
1477         };
1478 }
1479
1480 static void tg3_phy_copper_begin(struct tg3 *tp)
1481 {
1482         u32 new_adv;
1483         int i;
1484
1485         if (tp->link_config.phy_is_low_power) {
1486                 /* Entering low power mode.  Disable gigabit and
1487                  * 100baseT advertisements.
1488                  */
1489                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1490
1491                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1492                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1493                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1494                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1495
1496                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1497         } else if (tp->link_config.speed == SPEED_INVALID) {
1498                 tp->link_config.advertising =
1499                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1500                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1501                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1502                          ADVERTISED_Autoneg | ADVERTISED_MII);
1503
1504                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1505                         tp->link_config.advertising &=
1506                                 ~(ADVERTISED_1000baseT_Half |
1507                                   ADVERTISED_1000baseT_Full);
1508
1509                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1511                         new_adv |= ADVERTISE_10HALF;
1512                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1513                         new_adv |= ADVERTISE_10FULL;
1514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1515                         new_adv |= ADVERTISE_100HALF;
1516                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1517                         new_adv |= ADVERTISE_100FULL;
1518                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1519
1520                 if (tp->link_config.advertising &
1521                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1522                         new_adv = 0;
1523                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1524                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1525                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1526                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1527                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1528                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1529                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1530                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1531                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1532                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1533                 } else {
1534                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1535                 }
1536         } else {
1537                 /* Asking for a specific link mode. */
1538                 if (tp->link_config.speed == SPEED_1000) {
1539                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1540                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1541
1542                         if (tp->link_config.duplex == DUPLEX_FULL)
1543                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1544                         else
1545                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1546                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1547                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1548                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1549                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1550                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1551                 } else {
1552                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1553
1554                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1555                         if (tp->link_config.speed == SPEED_100) {
1556                                 if (tp->link_config.duplex == DUPLEX_FULL)
1557                                         new_adv |= ADVERTISE_100FULL;
1558                                 else
1559                                         new_adv |= ADVERTISE_100HALF;
1560                         } else {
1561                                 if (tp->link_config.duplex == DUPLEX_FULL)
1562                                         new_adv |= ADVERTISE_10FULL;
1563                                 else
1564                                         new_adv |= ADVERTISE_10HALF;
1565                         }
1566                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1567                 }
1568         }
1569
1570         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1571             tp->link_config.speed != SPEED_INVALID) {
1572                 u32 bmcr, orig_bmcr;
1573
1574                 tp->link_config.active_speed = tp->link_config.speed;
1575                 tp->link_config.active_duplex = tp->link_config.duplex;
1576
1577                 bmcr = 0;
1578                 switch (tp->link_config.speed) {
1579                 default:
1580                 case SPEED_10:
1581                         break;
1582
1583                 case SPEED_100:
1584                         bmcr |= BMCR_SPEED100;
1585                         break;
1586
1587                 case SPEED_1000:
1588                         bmcr |= TG3_BMCR_SPEED1000;
1589                         break;
1590                 };
1591
1592                 if (tp->link_config.duplex == DUPLEX_FULL)
1593                         bmcr |= BMCR_FULLDPLX;
1594
1595                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1596                     (bmcr != orig_bmcr)) {
1597                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1598                         for (i = 0; i < 1500; i++) {
1599                                 u32 tmp;
1600
1601                                 udelay(10);
1602                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1603                                     tg3_readphy(tp, MII_BMSR, &tmp))
1604                                         continue;
1605                                 if (!(tmp & BMSR_LSTATUS)) {
1606                                         udelay(40);
1607                                         break;
1608                                 }
1609                         }
1610                         tg3_writephy(tp, MII_BMCR, bmcr);
1611                         udelay(40);
1612                 }
1613         } else {
1614                 tg3_writephy(tp, MII_BMCR,
1615                              BMCR_ANENABLE | BMCR_ANRESTART);
1616         }
1617 }
1618
1619 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1620 {
1621         int err;
1622
1623         /* Turn off tap power management. */
1624         /* Set Extended packet length bit */
1625         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1626
1627         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1628         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1629
1630         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1631         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1632
1633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1635
1636         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1637         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1638
1639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1641
1642         udelay(40);
1643
1644         return err;
1645 }
1646
1647 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1648 {
1649         u32 adv_reg, all_mask;
1650
1651         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1652                 return 0;
1653
1654         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1655                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1656         if ((adv_reg & all_mask) != all_mask)
1657                 return 0;
1658         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1659                 u32 tg3_ctrl;
1660
1661                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1662                         return 0;
1663
1664                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1665                             MII_TG3_CTRL_ADV_1000_FULL);
1666                 if ((tg3_ctrl & all_mask) != all_mask)
1667                         return 0;
1668         }
1669         return 1;
1670 }
1671
1672 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1673 {
1674         int current_link_up;
1675         u32 bmsr, dummy;
1676         u16 current_speed;
1677         u8 current_duplex;
1678         int i, err;
1679
1680         tw32(MAC_EVENT, 0);
1681
1682         tw32_f(MAC_STATUS,
1683              (MAC_STATUS_SYNC_CHANGED |
1684               MAC_STATUS_CFG_CHANGED |
1685               MAC_STATUS_MI_COMPLETION |
1686               MAC_STATUS_LNKSTATE_CHANGED));
1687         udelay(40);
1688
1689         tp->mi_mode = MAC_MI_MODE_BASE;
1690         tw32_f(MAC_MI_MODE, tp->mi_mode);
1691         udelay(80);
1692
1693         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1694
1695         /* Some third-party PHYs need to be reset on link going
1696          * down.
1697          */
1698         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1699              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1700              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1701             netif_carrier_ok(tp->dev)) {
1702                 tg3_readphy(tp, MII_BMSR, &bmsr);
1703                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1704                     !(bmsr & BMSR_LSTATUS))
1705                         force_reset = 1;
1706         }
1707         if (force_reset)
1708                 tg3_phy_reset(tp);
1709
1710         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1711                 tg3_readphy(tp, MII_BMSR, &bmsr);
1712                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1713                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1714                         bmsr = 0;
1715
1716                 if (!(bmsr & BMSR_LSTATUS)) {
1717                         err = tg3_init_5401phy_dsp(tp);
1718                         if (err)
1719                                 return err;
1720
1721                         tg3_readphy(tp, MII_BMSR, &bmsr);
1722                         for (i = 0; i < 1000; i++) {
1723                                 udelay(10);
1724                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1725                                     (bmsr & BMSR_LSTATUS)) {
1726                                         udelay(40);
1727                                         break;
1728                                 }
1729                         }
1730
1731                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1732                             !(bmsr & BMSR_LSTATUS) &&
1733                             tp->link_config.active_speed == SPEED_1000) {
1734                                 err = tg3_phy_reset(tp);
1735                                 if (!err)
1736                                         err = tg3_init_5401phy_dsp(tp);
1737                                 if (err)
1738                                         return err;
1739                         }
1740                 }
1741         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1742                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1743                 /* 5701 {A0,B0} CRC bug workaround */
1744                 tg3_writephy(tp, 0x15, 0x0a75);
1745                 tg3_writephy(tp, 0x1c, 0x8c68);
1746                 tg3_writephy(tp, 0x1c, 0x8d68);
1747                 tg3_writephy(tp, 0x1c, 0x8c68);
1748         }
1749
1750         /* Clear pending interrupts... */
1751         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1752         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1753
1754         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1755                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1756         else
1757                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1758
1759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1761                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1762                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1763                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1764                 else
1765                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1766         }
1767
1768         current_link_up = 0;
1769         current_speed = SPEED_INVALID;
1770         current_duplex = DUPLEX_INVALID;
1771
1772         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1773                 u32 val;
1774
1775                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1776                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1777                 if (!(val & (1 << 10))) {
1778                         val |= (1 << 10);
1779                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1780                         goto relink;
1781                 }
1782         }
1783
1784         bmsr = 0;
1785         for (i = 0; i < 100; i++) {
1786                 tg3_readphy(tp, MII_BMSR, &bmsr);
1787                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1788                     (bmsr & BMSR_LSTATUS))
1789                         break;
1790                 udelay(40);
1791         }
1792
1793         if (bmsr & BMSR_LSTATUS) {
1794                 u32 aux_stat, bmcr;
1795
1796                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1797                 for (i = 0; i < 2000; i++) {
1798                         udelay(10);
1799                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1800                             aux_stat)
1801                                 break;
1802                 }
1803
1804                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1805                                              &current_speed,
1806                                              &current_duplex);
1807
1808                 bmcr = 0;
1809                 for (i = 0; i < 200; i++) {
1810                         tg3_readphy(tp, MII_BMCR, &bmcr);
1811                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1812                                 continue;
1813                         if (bmcr && bmcr != 0x7fff)
1814                                 break;
1815                         udelay(10);
1816                 }
1817
1818                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1819                         if (bmcr & BMCR_ANENABLE) {
1820                                 current_link_up = 1;
1821
1822                                 /* Force autoneg restart if we are exiting
1823                                  * low power mode.
1824                                  */
1825                                 if (!tg3_copper_is_advertising_all(tp))
1826                                         current_link_up = 0;
1827                         } else {
1828                                 current_link_up = 0;
1829                         }
1830                 } else {
1831                         if (!(bmcr & BMCR_ANENABLE) &&
1832                             tp->link_config.speed == current_speed &&
1833                             tp->link_config.duplex == current_duplex) {
1834                                 current_link_up = 1;
1835                         } else {
1836                                 current_link_up = 0;
1837                         }
1838                 }
1839
1840                 tp->link_config.active_speed = current_speed;
1841                 tp->link_config.active_duplex = current_duplex;
1842         }
1843
1844         if (current_link_up == 1 &&
1845             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1846             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1847                 u32 local_adv, remote_adv;
1848
1849                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1850                         local_adv = 0;
1851                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1852
1853                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1854                         remote_adv = 0;
1855
1856                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1857
1858                 /* If we are not advertising full pause capability,
1859                  * something is wrong.  Bring the link down and reconfigure.
1860                  */
1861                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1862                         current_link_up = 0;
1863                 } else {
1864                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1865                 }
1866         }
1867 relink:
1868         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1869                 u32 tmp;
1870
1871                 tg3_phy_copper_begin(tp);
1872
1873                 tg3_readphy(tp, MII_BMSR, &tmp);
1874                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1875                     (tmp & BMSR_LSTATUS))
1876                         current_link_up = 1;
1877         }
1878
1879         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1880         if (current_link_up == 1) {
1881                 if (tp->link_config.active_speed == SPEED_100 ||
1882                     tp->link_config.active_speed == SPEED_10)
1883                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1884                 else
1885                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886         } else
1887                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1888
1889         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1890         if (tp->link_config.active_duplex == DUPLEX_HALF)
1891                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1892
1893         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1895                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1896                     (current_link_up == 1 &&
1897                      tp->link_config.active_speed == SPEED_10))
1898                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1899         } else {
1900                 if (current_link_up == 1)
1901                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1902         }
1903
1904         /* ??? Without this setting Netgear GA302T PHY does not
1905          * ??? send/receive packets...
1906          */
1907         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1908             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1909                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1910                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1911                 udelay(80);
1912         }
1913
1914         tw32_f(MAC_MODE, tp->mac_mode);
1915         udelay(40);
1916
1917         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1918                 /* Polled via timer. */
1919                 tw32_f(MAC_EVENT, 0);
1920         } else {
1921                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1922         }
1923         udelay(40);
1924
1925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1926             current_link_up == 1 &&
1927             tp->link_config.active_speed == SPEED_1000 &&
1928             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1929              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1930                 udelay(120);
1931                 tw32_f(MAC_STATUS,
1932                      (MAC_STATUS_SYNC_CHANGED |
1933                       MAC_STATUS_CFG_CHANGED));
1934                 udelay(40);
1935                 tg3_write_mem(tp,
1936                               NIC_SRAM_FIRMWARE_MBOX,
1937                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1938         }
1939
1940         if (current_link_up != netif_carrier_ok(tp->dev)) {
1941                 if (current_link_up)
1942                         netif_carrier_on(tp->dev);
1943                 else
1944                         netif_carrier_off(tp->dev);
1945                 tg3_link_report(tp);
1946         }
1947
1948         return 0;
1949 }
1950
1951 struct tg3_fiber_aneginfo {
1952         int state;
1953 #define ANEG_STATE_UNKNOWN              0
1954 #define ANEG_STATE_AN_ENABLE            1
1955 #define ANEG_STATE_RESTART_INIT         2
1956 #define ANEG_STATE_RESTART              3
1957 #define ANEG_STATE_DISABLE_LINK_OK      4
1958 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1959 #define ANEG_STATE_ABILITY_DETECT       6
1960 #define ANEG_STATE_ACK_DETECT_INIT      7
1961 #define ANEG_STATE_ACK_DETECT           8
1962 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1963 #define ANEG_STATE_COMPLETE_ACK         10
1964 #define ANEG_STATE_IDLE_DETECT_INIT     11
1965 #define ANEG_STATE_IDLE_DETECT          12
1966 #define ANEG_STATE_LINK_OK              13
1967 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1968 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1969
1970         u32 flags;
1971 #define MR_AN_ENABLE            0x00000001
1972 #define MR_RESTART_AN           0x00000002
1973 #define MR_AN_COMPLETE          0x00000004
1974 #define MR_PAGE_RX              0x00000008
1975 #define MR_NP_LOADED            0x00000010
1976 #define MR_TOGGLE_TX            0x00000020
1977 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1978 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1979 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1980 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1981 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1982 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1983 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1984 #define MR_TOGGLE_RX            0x00002000
1985 #define MR_NP_RX                0x00004000
1986
1987 #define MR_LINK_OK              0x80000000
1988
1989         unsigned long link_time, cur_time;
1990
1991         u32 ability_match_cfg;
1992         int ability_match_count;
1993
1994         char ability_match, idle_match, ack_match;
1995
1996         u32 txconfig, rxconfig;
1997 #define ANEG_CFG_NP             0x00000080
1998 #define ANEG_CFG_ACK            0x00000040
1999 #define ANEG_CFG_RF2            0x00000020
2000 #define ANEG_CFG_RF1            0x00000010
2001 #define ANEG_CFG_PS2            0x00000001
2002 #define ANEG_CFG_PS1            0x00008000
2003 #define ANEG_CFG_HD             0x00004000
2004 #define ANEG_CFG_FD             0x00002000
2005 #define ANEG_CFG_INVAL          0x00001f06
2006
2007 };
2008 #define ANEG_OK         0
2009 #define ANEG_DONE       1
2010 #define ANEG_TIMER_ENAB 2
2011 #define ANEG_FAILED     -1
2012
2013 #define ANEG_STATE_SETTLE_TIME  10000
2014
2015 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2016                                    struct tg3_fiber_aneginfo *ap)
2017 {
2018         unsigned long delta;
2019         u32 rx_cfg_reg;
2020         int ret;
2021
2022         if (ap->state == ANEG_STATE_UNKNOWN) {
2023                 ap->rxconfig = 0;
2024                 ap->link_time = 0;
2025                 ap->cur_time = 0;
2026                 ap->ability_match_cfg = 0;
2027                 ap->ability_match_count = 0;
2028                 ap->ability_match = 0;
2029                 ap->idle_match = 0;
2030                 ap->ack_match = 0;
2031         }
2032         ap->cur_time++;
2033
2034         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2035                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2036
2037                 if (rx_cfg_reg != ap->ability_match_cfg) {
2038                         ap->ability_match_cfg = rx_cfg_reg;
2039                         ap->ability_match = 0;
2040                         ap->ability_match_count = 0;
2041                 } else {
2042                         if (++ap->ability_match_count > 1) {
2043                                 ap->ability_match = 1;
2044                                 ap->ability_match_cfg = rx_cfg_reg;
2045                         }
2046                 }
2047                 if (rx_cfg_reg & ANEG_CFG_ACK)
2048                         ap->ack_match = 1;
2049                 else
2050                         ap->ack_match = 0;
2051
2052                 ap->idle_match = 0;
2053         } else {
2054                 ap->idle_match = 1;
2055                 ap->ability_match_cfg = 0;
2056                 ap->ability_match_count = 0;
2057                 ap->ability_match = 0;
2058                 ap->ack_match = 0;
2059
2060                 rx_cfg_reg = 0;
2061         }
2062
2063         ap->rxconfig = rx_cfg_reg;
2064         ret = ANEG_OK;
2065
2066         switch(ap->state) {
2067         case ANEG_STATE_UNKNOWN:
2068                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2069                         ap->state = ANEG_STATE_AN_ENABLE;
2070
2071                 /* fallthru */
2072         case ANEG_STATE_AN_ENABLE:
2073                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2074                 if (ap->flags & MR_AN_ENABLE) {
2075                         ap->link_time = 0;
2076                         ap->cur_time = 0;
2077                         ap->ability_match_cfg = 0;
2078                         ap->ability_match_count = 0;
2079                         ap->ability_match = 0;
2080                         ap->idle_match = 0;
2081                         ap->ack_match = 0;
2082
2083                         ap->state = ANEG_STATE_RESTART_INIT;
2084                 } else {
2085                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2086                 }
2087                 break;
2088
2089         case ANEG_STATE_RESTART_INIT:
2090                 ap->link_time = ap->cur_time;
2091                 ap->flags &= ~(MR_NP_LOADED);
2092                 ap->txconfig = 0;
2093                 tw32(MAC_TX_AUTO_NEG, 0);
2094                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2095                 tw32_f(MAC_MODE, tp->mac_mode);
2096                 udelay(40);
2097
2098                 ret = ANEG_TIMER_ENAB;
2099                 ap->state = ANEG_STATE_RESTART;
2100
2101                 /* fallthru */
2102         case ANEG_STATE_RESTART:
2103                 delta = ap->cur_time - ap->link_time;
2104                 if (delta > ANEG_STATE_SETTLE_TIME) {
2105                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2106                 } else {
2107                         ret = ANEG_TIMER_ENAB;
2108                 }
2109                 break;
2110
2111         case ANEG_STATE_DISABLE_LINK_OK:
2112                 ret = ANEG_DONE;
2113                 break;
2114
2115         case ANEG_STATE_ABILITY_DETECT_INIT:
2116                 ap->flags &= ~(MR_TOGGLE_TX);
2117                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2118                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2119                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2120                 tw32_f(MAC_MODE, tp->mac_mode);
2121                 udelay(40);
2122
2123                 ap->state = ANEG_STATE_ABILITY_DETECT;
2124                 break;
2125
2126         case ANEG_STATE_ABILITY_DETECT:
2127                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2128                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2129                 }
2130                 break;
2131
2132         case ANEG_STATE_ACK_DETECT_INIT:
2133                 ap->txconfig |= ANEG_CFG_ACK;
2134                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2135                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2136                 tw32_f(MAC_MODE, tp->mac_mode);
2137                 udelay(40);
2138
2139                 ap->state = ANEG_STATE_ACK_DETECT;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_ACK_DETECT:
2143                 if (ap->ack_match != 0) {
2144                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2145                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2146                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2147                         } else {
2148                                 ap->state = ANEG_STATE_AN_ENABLE;
2149                         }
2150                 } else if (ap->ability_match != 0 &&
2151                            ap->rxconfig == 0) {
2152                         ap->state = ANEG_STATE_AN_ENABLE;
2153                 }
2154                 break;
2155
2156         case ANEG_STATE_COMPLETE_ACK_INIT:
2157                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2158                         ret = ANEG_FAILED;
2159                         break;
2160                 }
2161                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2162                                MR_LP_ADV_HALF_DUPLEX |
2163                                MR_LP_ADV_SYM_PAUSE |
2164                                MR_LP_ADV_ASYM_PAUSE |
2165                                MR_LP_ADV_REMOTE_FAULT1 |
2166                                MR_LP_ADV_REMOTE_FAULT2 |
2167                                MR_LP_ADV_NEXT_PAGE |
2168                                MR_TOGGLE_RX |
2169                                MR_NP_RX);
2170                 if (ap->rxconfig & ANEG_CFG_FD)
2171                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2172                 if (ap->rxconfig & ANEG_CFG_HD)
2173                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2174                 if (ap->rxconfig & ANEG_CFG_PS1)
2175                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2176                 if (ap->rxconfig & ANEG_CFG_PS2)
2177                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2178                 if (ap->rxconfig & ANEG_CFG_RF1)
2179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2180                 if (ap->rxconfig & ANEG_CFG_RF2)
2181                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2182                 if (ap->rxconfig & ANEG_CFG_NP)
2183                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2184
2185                 ap->link_time = ap->cur_time;
2186
2187                 ap->flags ^= (MR_TOGGLE_TX);
2188                 if (ap->rxconfig & 0x0008)
2189                         ap->flags |= MR_TOGGLE_RX;
2190                 if (ap->rxconfig & ANEG_CFG_NP)
2191                         ap->flags |= MR_NP_RX;
2192                 ap->flags |= MR_PAGE_RX;
2193
2194                 ap->state = ANEG_STATE_COMPLETE_ACK;
2195                 ret = ANEG_TIMER_ENAB;
2196                 break;
2197
2198         case ANEG_STATE_COMPLETE_ACK:
2199                 if (ap->ability_match != 0 &&
2200                     ap->rxconfig == 0) {
2201                         ap->state = ANEG_STATE_AN_ENABLE;
2202                         break;
2203                 }
2204                 delta = ap->cur_time - ap->link_time;
2205                 if (delta > ANEG_STATE_SETTLE_TIME) {
2206                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2207                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208                         } else {
2209                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2210                                     !(ap->flags & MR_NP_RX)) {
2211                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2212                                 } else {
2213                                         ret = ANEG_FAILED;
2214                                 }
2215                         }
2216                 }
2217                 break;
2218
2219         case ANEG_STATE_IDLE_DETECT_INIT:
2220                 ap->link_time = ap->cur_time;
2221                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2222                 tw32_f(MAC_MODE, tp->mac_mode);
2223                 udelay(40);
2224
2225                 ap->state = ANEG_STATE_IDLE_DETECT;
2226                 ret = ANEG_TIMER_ENAB;
2227                 break;
2228
2229         case ANEG_STATE_IDLE_DETECT:
2230                 if (ap->ability_match != 0 &&
2231                     ap->rxconfig == 0) {
2232                         ap->state = ANEG_STATE_AN_ENABLE;
2233                         break;
2234                 }
2235                 delta = ap->cur_time - ap->link_time;
2236                 if (delta > ANEG_STATE_SETTLE_TIME) {
2237                         /* XXX another gem from the Broadcom driver :( */
2238                         ap->state = ANEG_STATE_LINK_OK;
2239                 }
2240                 break;
2241
2242         case ANEG_STATE_LINK_OK:
2243                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2244                 ret = ANEG_DONE;
2245                 break;
2246
2247         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2248                 /* ??? unimplemented */
2249                 break;
2250
2251         case ANEG_STATE_NEXT_PAGE_WAIT:
2252                 /* ??? unimplemented */
2253                 break;
2254
2255         default:
2256                 ret = ANEG_FAILED;
2257                 break;
2258         };
2259
2260         return ret;
2261 }
2262
2263 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2264 {
2265         int res = 0;
2266         struct tg3_fiber_aneginfo aninfo;
2267         int status = ANEG_FAILED;
2268         unsigned int tick;
2269         u32 tmp;
2270
2271         tw32_f(MAC_TX_AUTO_NEG, 0);
2272
2273         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2274         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2275         udelay(40);
2276
2277         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2278         udelay(40);
2279
2280         memset(&aninfo, 0, sizeof(aninfo));
2281         aninfo.flags |= MR_AN_ENABLE;
2282         aninfo.state = ANEG_STATE_UNKNOWN;
2283         aninfo.cur_time = 0;
2284         tick = 0;
2285         while (++tick < 195000) {
2286                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2287                 if (status == ANEG_DONE || status == ANEG_FAILED)
2288                         break;
2289
2290                 udelay(1);
2291         }
2292
2293         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2294         tw32_f(MAC_MODE, tp->mac_mode);
2295         udelay(40);
2296
2297         *flags = aninfo.flags;
2298
2299         if (status == ANEG_DONE &&
2300             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2301                              MR_LP_ADV_FULL_DUPLEX)))
2302                 res = 1;
2303
2304         return res;
2305 }
2306
2307 static void tg3_init_bcm8002(struct tg3 *tp)
2308 {
2309         u32 mac_status = tr32(MAC_STATUS);
2310         int i;
2311
2312         /* Reset when initting first time or we have a link. */
2313         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2314             !(mac_status & MAC_STATUS_PCS_SYNCED))
2315                 return;
2316
2317         /* Set PLL lock range. */
2318         tg3_writephy(tp, 0x16, 0x8007);
2319
2320         /* SW reset */
2321         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2322
2323         /* Wait for reset to complete. */
2324         /* XXX schedule_timeout() ... */
2325         for (i = 0; i < 500; i++)
2326                 udelay(10);
2327
2328         /* Config mode; select PMA/Ch 1 regs. */
2329         tg3_writephy(tp, 0x10, 0x8411);
2330
2331         /* Enable auto-lock and comdet, select txclk for tx. */
2332         tg3_writephy(tp, 0x11, 0x0a10);
2333
2334         tg3_writephy(tp, 0x18, 0x00a0);
2335         tg3_writephy(tp, 0x16, 0x41ff);
2336
2337         /* Assert and deassert POR. */
2338         tg3_writephy(tp, 0x13, 0x0400);
2339         udelay(40);
2340         tg3_writephy(tp, 0x13, 0x0000);
2341
2342         tg3_writephy(tp, 0x11, 0x0a50);
2343         udelay(40);
2344         tg3_writephy(tp, 0x11, 0x0a10);
2345
2346         /* Wait for signal to stabilize */
2347         /* XXX schedule_timeout() ... */
2348         for (i = 0; i < 15000; i++)
2349                 udelay(10);
2350
2351         /* Deselect the channel register so we can read the PHYID
2352          * later.
2353          */
2354         tg3_writephy(tp, 0x10, 0x8011);
2355 }
2356
2357 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2358 {
2359         u32 sg_dig_ctrl, sg_dig_status;
2360         u32 serdes_cfg, expected_sg_dig_ctrl;
2361         int workaround, port_a;
2362         int current_link_up;
2363
2364         serdes_cfg = 0;
2365         expected_sg_dig_ctrl = 0;
2366         workaround = 0;
2367         port_a = 1;
2368         current_link_up = 0;
2369
2370         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2371             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2372                 workaround = 1;
2373                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2374                         port_a = 0;
2375
2376                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2377                 /* preserve bits 20-23 for voltage regulator */
2378                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2379         }
2380
2381         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2382
2383         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2384                 if (sg_dig_ctrl & (1 << 31)) {
2385                         if (workaround) {
2386                                 u32 val = serdes_cfg;
2387
2388                                 if (port_a)
2389                                         val |= 0xc010000;
2390                                 else
2391                                         val |= 0x4010000;
2392                                 tw32_f(MAC_SERDES_CFG, val);
2393                         }
2394                         tw32_f(SG_DIG_CTRL, 0x01388400);
2395                 }
2396                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2397                         tg3_setup_flow_control(tp, 0, 0);
2398                         current_link_up = 1;
2399                 }
2400                 goto out;
2401         }
2402
2403         /* Want auto-negotiation.  */
2404         expected_sg_dig_ctrl = 0x81388400;
2405
2406         /* Pause capability */
2407         expected_sg_dig_ctrl |= (1 << 11);
2408
2409         /* Asymettric pause */
2410         expected_sg_dig_ctrl |= (1 << 12);
2411
2412         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2413                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2414                     tp->serdes_counter &&
2415                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2416                                     MAC_STATUS_RCVD_CFG)) ==
2417                      MAC_STATUS_PCS_SYNCED)) {
2418                         tp->serdes_counter--;
2419                         current_link_up = 1;
2420                         goto out;
2421                 }
2422 restart_autoneg:
2423                 if (workaround)
2424                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2425                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2426                 udelay(5);
2427                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2428
2429                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2430                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2431         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2432                                  MAC_STATUS_SIGNAL_DET)) {
2433                 sg_dig_status = tr32(SG_DIG_STATUS);
2434                 mac_status = tr32(MAC_STATUS);
2435
2436                 if ((sg_dig_status & (1 << 1)) &&
2437                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2438                         u32 local_adv, remote_adv;
2439
2440                         local_adv = ADVERTISE_PAUSE_CAP;
2441                         remote_adv = 0;
2442                         if (sg_dig_status & (1 << 19))
2443                                 remote_adv |= LPA_PAUSE_CAP;
2444                         if (sg_dig_status & (1 << 20))
2445                                 remote_adv |= LPA_PAUSE_ASYM;
2446
2447                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2448                         current_link_up = 1;
2449                         tp->serdes_counter = 0;
2450                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2451                 } else if (!(sg_dig_status & (1 << 1))) {
2452                         if (tp->serdes_counter)
2453                                 tp->serdes_counter--;
2454                         else {
2455                                 if (workaround) {
2456                                         u32 val = serdes_cfg;
2457
2458                                         if (port_a)
2459                                                 val |= 0xc010000;
2460                                         else
2461                                                 val |= 0x4010000;
2462
2463                                         tw32_f(MAC_SERDES_CFG, val);
2464                                 }
2465
2466                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2467                                 udelay(40);
2468
2469                                 /* Link parallel detection - link is up */
2470                                 /* only if we have PCS_SYNC and not */
2471                                 /* receiving config code words */
2472                                 mac_status = tr32(MAC_STATUS);
2473                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2474                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2475                                         tg3_setup_flow_control(tp, 0, 0);
2476                                         current_link_up = 1;
2477                                         tp->tg3_flags2 |=
2478                                                 TG3_FLG2_PARALLEL_DETECT;
2479                                         tp->serdes_counter =
2480                                                 SERDES_PARALLEL_DET_TIMEOUT;
2481                                 } else
2482                                         goto restart_autoneg;
2483                         }
2484                 }
2485         } else {
2486                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2487                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2488         }
2489
2490 out:
2491         return current_link_up;
2492 }
2493
2494 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2495 {
2496         int current_link_up = 0;
2497
2498         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2499                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2500                 goto out;
2501         }
2502
2503         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2504                 u32 flags;
2505                 int i;
2506
2507                 if (fiber_autoneg(tp, &flags)) {
2508                         u32 local_adv, remote_adv;
2509
2510                         local_adv = ADVERTISE_PAUSE_CAP;
2511                         remote_adv = 0;
2512                         if (flags & MR_LP_ADV_SYM_PAUSE)
2513                                 remote_adv |= LPA_PAUSE_CAP;
2514                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2515                                 remote_adv |= LPA_PAUSE_ASYM;
2516
2517                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2518
2519                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2520                         current_link_up = 1;
2521                 }
2522                 for (i = 0; i < 30; i++) {
2523                         udelay(20);
2524                         tw32_f(MAC_STATUS,
2525                                (MAC_STATUS_SYNC_CHANGED |
2526                                 MAC_STATUS_CFG_CHANGED));
2527                         udelay(40);
2528                         if ((tr32(MAC_STATUS) &
2529                              (MAC_STATUS_SYNC_CHANGED |
2530                               MAC_STATUS_CFG_CHANGED)) == 0)
2531                                 break;
2532                 }
2533
2534                 mac_status = tr32(MAC_STATUS);
2535                 if (current_link_up == 0 &&
2536                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2537                     !(mac_status & MAC_STATUS_RCVD_CFG))
2538                         current_link_up = 1;
2539         } else {
2540                 /* Forcing 1000FD link up. */
2541                 current_link_up = 1;
2542                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2543
2544                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2545                 udelay(40);
2546         }
2547
2548 out:
2549         return current_link_up;
2550 }
2551
2552 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2553 {
2554         u32 orig_pause_cfg;
2555         u16 orig_active_speed;
2556         u8 orig_active_duplex;
2557         u32 mac_status;
2558         int current_link_up;
2559         int i;
2560
2561         orig_pause_cfg =
2562                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2563                                   TG3_FLAG_TX_PAUSE));
2564         orig_active_speed = tp->link_config.active_speed;
2565         orig_active_duplex = tp->link_config.active_duplex;
2566
2567         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2568             netif_carrier_ok(tp->dev) &&
2569             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2570                 mac_status = tr32(MAC_STATUS);
2571                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2572                                MAC_STATUS_SIGNAL_DET |
2573                                MAC_STATUS_CFG_CHANGED |
2574                                MAC_STATUS_RCVD_CFG);
2575                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2576                                    MAC_STATUS_SIGNAL_DET)) {
2577                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2578                                             MAC_STATUS_CFG_CHANGED));
2579                         return 0;
2580                 }
2581         }
2582
2583         tw32_f(MAC_TX_AUTO_NEG, 0);
2584
2585         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2586         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2587         tw32_f(MAC_MODE, tp->mac_mode);
2588         udelay(40);
2589
2590         if (tp->phy_id == PHY_ID_BCM8002)
2591                 tg3_init_bcm8002(tp);
2592
2593         /* Enable link change event even when serdes polling.  */
2594         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2595         udelay(40);
2596
2597         current_link_up = 0;
2598         mac_status = tr32(MAC_STATUS);
2599
2600         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2601                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2602         else
2603                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2604
2605         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2606         tw32_f(MAC_MODE, tp->mac_mode);
2607         udelay(40);
2608
2609         tp->hw_status->status =
2610                 (SD_STATUS_UPDATED |
2611                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2612
2613         for (i = 0; i < 100; i++) {
2614                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2615                                     MAC_STATUS_CFG_CHANGED));
2616                 udelay(5);
2617                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2618                                          MAC_STATUS_CFG_CHANGED |
2619                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2620                         break;
2621         }
2622
2623         mac_status = tr32(MAC_STATUS);
2624         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2625                 current_link_up = 0;
2626                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2627                     tp->serdes_counter == 0) {
2628                         tw32_f(MAC_MODE, (tp->mac_mode |
2629                                           MAC_MODE_SEND_CONFIGS));
2630                         udelay(1);
2631                         tw32_f(MAC_MODE, tp->mac_mode);
2632                 }
2633         }
2634
2635         if (current_link_up == 1) {
2636                 tp->link_config.active_speed = SPEED_1000;
2637                 tp->link_config.active_duplex = DUPLEX_FULL;
2638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2639                                     LED_CTRL_LNKLED_OVERRIDE |
2640                                     LED_CTRL_1000MBPS_ON));
2641         } else {
2642                 tp->link_config.active_speed = SPEED_INVALID;
2643                 tp->link_config.active_duplex = DUPLEX_INVALID;
2644                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2645                                     LED_CTRL_LNKLED_OVERRIDE |
2646                                     LED_CTRL_TRAFFIC_OVERRIDE));
2647         }
2648
2649         if (current_link_up != netif_carrier_ok(tp->dev)) {
2650                 if (current_link_up)
2651                         netif_carrier_on(tp->dev);
2652                 else
2653                         netif_carrier_off(tp->dev);
2654                 tg3_link_report(tp);
2655         } else {
2656                 u32 now_pause_cfg =
2657                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2658                                          TG3_FLAG_TX_PAUSE);
2659                 if (orig_pause_cfg != now_pause_cfg ||
2660                     orig_active_speed != tp->link_config.active_speed ||
2661                     orig_active_duplex != tp->link_config.active_duplex)
2662                         tg3_link_report(tp);
2663         }
2664
2665         return 0;
2666 }
2667
2668 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2669 {
2670         int current_link_up, err = 0;
2671         u32 bmsr, bmcr;
2672         u16 current_speed;
2673         u8 current_duplex;
2674
2675         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2676         tw32_f(MAC_MODE, tp->mac_mode);
2677         udelay(40);
2678
2679         tw32(MAC_EVENT, 0);
2680
2681         tw32_f(MAC_STATUS,
2682              (MAC_STATUS_SYNC_CHANGED |
2683               MAC_STATUS_CFG_CHANGED |
2684               MAC_STATUS_MI_COMPLETION |
2685               MAC_STATUS_LNKSTATE_CHANGED));
2686         udelay(40);
2687
2688         if (force_reset)
2689                 tg3_phy_reset(tp);
2690
2691         current_link_up = 0;
2692         current_speed = SPEED_INVALID;
2693         current_duplex = DUPLEX_INVALID;
2694
2695         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2698                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2699                         bmsr |= BMSR_LSTATUS;
2700                 else
2701                         bmsr &= ~BMSR_LSTATUS;
2702         }
2703
2704         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2705
2706         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2707             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2708                 /* do nothing, just check for link up at the end */
2709         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2710                 u32 adv, new_adv;
2711
2712                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2713                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2714                                   ADVERTISE_1000XPAUSE |
2715                                   ADVERTISE_1000XPSE_ASYM |
2716                                   ADVERTISE_SLCT);
2717
2718                 /* Always advertise symmetric PAUSE just like copper */
2719                 new_adv |= ADVERTISE_1000XPAUSE;
2720
2721                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2722                         new_adv |= ADVERTISE_1000XHALF;
2723                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2724                         new_adv |= ADVERTISE_1000XFULL;
2725
2726                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2727                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2729                         tg3_writephy(tp, MII_BMCR, bmcr);
2730
2731                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2732                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2733                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734
2735                         return err;
2736                 }
2737         } else {
2738                 u32 new_bmcr;
2739
2740                 bmcr &= ~BMCR_SPEED1000;
2741                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2742
2743                 if (tp->link_config.duplex == DUPLEX_FULL)
2744                         new_bmcr |= BMCR_FULLDPLX;
2745
2746                 if (new_bmcr != bmcr) {
2747                         /* BMCR_SPEED1000 is a reserved bit that needs
2748                          * to be set on write.
2749                          */
2750                         new_bmcr |= BMCR_SPEED1000;
2751
2752                         /* Force a linkdown */
2753                         if (netif_carrier_ok(tp->dev)) {
2754                                 u32 adv;
2755
2756                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2757                                 adv &= ~(ADVERTISE_1000XFULL |
2758                                          ADVERTISE_1000XHALF |
2759                                          ADVERTISE_SLCT);
2760                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2761                                 tg3_writephy(tp, MII_BMCR, bmcr |
2762                                                            BMCR_ANRESTART |
2763                                                            BMCR_ANENABLE);
2764                                 udelay(10);
2765                                 netif_carrier_off(tp->dev);
2766                         }
2767                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2768                         bmcr = new_bmcr;
2769                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2770                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2772                             ASIC_REV_5714) {
2773                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2774                                         bmsr |= BMSR_LSTATUS;
2775                                 else
2776                                         bmsr &= ~BMSR_LSTATUS;
2777                         }
2778                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2779                 }
2780         }
2781
2782         if (bmsr & BMSR_LSTATUS) {
2783                 current_speed = SPEED_1000;
2784                 current_link_up = 1;
2785                 if (bmcr & BMCR_FULLDPLX)
2786                         current_duplex = DUPLEX_FULL;
2787                 else
2788                         current_duplex = DUPLEX_HALF;
2789
2790                 if (bmcr & BMCR_ANENABLE) {
2791                         u32 local_adv, remote_adv, common;
2792
2793                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2794                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2795                         common = local_adv & remote_adv;
2796                         if (common & (ADVERTISE_1000XHALF |
2797                                       ADVERTISE_1000XFULL)) {
2798                                 if (common & ADVERTISE_1000XFULL)
2799                                         current_duplex = DUPLEX_FULL;
2800                                 else
2801                                         current_duplex = DUPLEX_HALF;
2802
2803                                 tg3_setup_flow_control(tp, local_adv,
2804                                                        remote_adv);
2805                         }
2806                         else
2807                                 current_link_up = 0;
2808                 }
2809         }
2810
2811         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812         if (tp->link_config.active_duplex == DUPLEX_HALF)
2813                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2814
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2819
2820         tp->link_config.active_speed = current_speed;
2821         tp->link_config.active_duplex = current_duplex;
2822
2823         if (current_link_up != netif_carrier_ok(tp->dev)) {
2824                 if (current_link_up)
2825                         netif_carrier_on(tp->dev);
2826                 else {
2827                         netif_carrier_off(tp->dev);
2828                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2829                 }
2830                 tg3_link_report(tp);
2831         }
2832         return err;
2833 }
2834
2835 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2836 {
2837         if (tp->serdes_counter) {
2838                 /* Give autoneg time to complete. */
2839                 tp->serdes_counter--;
2840                 return;
2841         }
2842         if (!netif_carrier_ok(tp->dev) &&
2843             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2844                 u32 bmcr;
2845
2846                 tg3_readphy(tp, MII_BMCR, &bmcr);
2847                 if (bmcr & BMCR_ANENABLE) {
2848                         u32 phy1, phy2;
2849
2850                         /* Select shadow register 0x1f */
2851                         tg3_writephy(tp, 0x1c, 0x7c00);
2852                         tg3_readphy(tp, 0x1c, &phy1);
2853
2854                         /* Select expansion interrupt status register */
2855                         tg3_writephy(tp, 0x17, 0x0f01);
2856                         tg3_readphy(tp, 0x15, &phy2);
2857                         tg3_readphy(tp, 0x15, &phy2);
2858
2859                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2860                                 /* We have signal detect and not receiving
2861                                  * config code words, link is up by parallel
2862                                  * detection.
2863                                  */
2864
2865                                 bmcr &= ~BMCR_ANENABLE;
2866                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2867                                 tg3_writephy(tp, MII_BMCR, bmcr);
2868                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2869                         }
2870                 }
2871         }
2872         else if (netif_carrier_ok(tp->dev) &&
2873                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2874                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2875                 u32 phy2;
2876
2877                 /* Select expansion interrupt status register */
2878                 tg3_writephy(tp, 0x17, 0x0f01);
2879                 tg3_readphy(tp, 0x15, &phy2);
2880                 if (phy2 & 0x20) {
2881                         u32 bmcr;
2882
2883                         /* Config code words received, turn on autoneg. */
2884                         tg3_readphy(tp, MII_BMCR, &bmcr);
2885                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2886
2887                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888
2889                 }
2890         }
2891 }
2892
2893 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2894 {
2895         int err;
2896
2897         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2898                 err = tg3_setup_fiber_phy(tp, force_reset);
2899         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2900                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2901         } else {
2902                 err = tg3_setup_copper_phy(tp, force_reset);
2903         }
2904
2905         if (tp->link_config.active_speed == SPEED_1000 &&
2906             tp->link_config.active_duplex == DUPLEX_HALF)
2907                 tw32(MAC_TX_LENGTHS,
2908                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2909                       (6 << TX_LENGTHS_IPG_SHIFT) |
2910                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2911         else
2912                 tw32(MAC_TX_LENGTHS,
2913                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2914                       (6 << TX_LENGTHS_IPG_SHIFT) |
2915                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2916
2917         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2918                 if (netif_carrier_ok(tp->dev)) {
2919                         tw32(HOSTCC_STAT_COAL_TICKS,
2920                              tp->coal.stats_block_coalesce_usecs);
2921                 } else {
2922                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2923                 }
2924         }
2925
2926         return err;
2927 }
2928
2929 /* This is called whenever we suspect that the system chipset is re-
2930  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2931  * is bogus tx completions. We try to recover by setting the
2932  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2933  * in the workqueue.
2934  */
2935 static void tg3_tx_recover(struct tg3 *tp)
2936 {
2937         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2938                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2939
2940         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2941                "mapped I/O cycles to the network device, attempting to "
2942                "recover. Please report the problem to the driver maintainer "
2943                "and include system chipset information.\n", tp->dev->name);
2944
2945         spin_lock(&tp->lock);
2946         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2947         spin_unlock(&tp->lock);
2948 }
2949
2950 static inline u32 tg3_tx_avail(struct tg3 *tp)
2951 {
2952         smp_mb();
2953         return (tp->tx_pending -
2954                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2955 }
2956
2957 /* Tigon3 never reports partial packet sends.  So we do not
2958  * need special logic to handle SKBs that have not had all
2959  * of their frags sent yet, like SunGEM does.
2960  */
2961 static void tg3_tx(struct tg3 *tp)
2962 {
2963         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2964         u32 sw_idx = tp->tx_cons;
2965
2966         while (sw_idx != hw_idx) {
2967                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2968                 struct sk_buff *skb = ri->skb;
2969                 int i, tx_bug = 0;
2970
2971                 if (unlikely(skb == NULL)) {
2972                         tg3_tx_recover(tp);
2973                         return;
2974                 }
2975
2976                 pci_unmap_single(tp->pdev,
2977                                  pci_unmap_addr(ri, mapping),
2978                                  skb_headlen(skb),
2979                                  PCI_DMA_TODEVICE);
2980
2981                 ri->skb = NULL;
2982
2983                 sw_idx = NEXT_TX(sw_idx);
2984
2985                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2986                         ri = &tp->tx_buffers[sw_idx];
2987                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
2988                                 tx_bug = 1;
2989
2990                         pci_unmap_page(tp->pdev,
2991                                        pci_unmap_addr(ri, mapping),
2992                                        skb_shinfo(skb)->frags[i].size,
2993                                        PCI_DMA_TODEVICE);
2994
2995                         sw_idx = NEXT_TX(sw_idx);
2996                 }
2997
2998                 dev_kfree_skb(skb);
2999
3000                 if (unlikely(tx_bug)) {
3001                         tg3_tx_recover(tp);
3002                         return;
3003                 }
3004         }
3005
3006         tp->tx_cons = sw_idx;
3007
3008         /* Need to make the tx_cons update visible to tg3_start_xmit()
3009          * before checking for netif_queue_stopped().  Without the
3010          * memory barrier, there is a small possibility that tg3_start_xmit()
3011          * will miss it and cause the queue to be stopped forever.
3012          */
3013         smp_mb();
3014
3015         if (unlikely(netif_queue_stopped(tp->dev) &&
3016                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3017                 netif_tx_lock(tp->dev);
3018                 if (netif_queue_stopped(tp->dev) &&
3019                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3020                         netif_wake_queue(tp->dev);
3021                 netif_tx_unlock(tp->dev);
3022         }
3023 }
3024
3025 /* Returns size of skb allocated or < 0 on error.
3026  *
3027  * We only need to fill in the address because the other members
3028  * of the RX descriptor are invariant, see tg3_init_rings.
3029  *
3030  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3031  * posting buffers we only dirty the first cache line of the RX
3032  * descriptor (containing the address).  Whereas for the RX status
3033  * buffers the cpu only reads the last cacheline of the RX descriptor
3034  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3035  */
3036 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3037                             int src_idx, u32 dest_idx_unmasked)
3038 {
3039         struct tg3_rx_buffer_desc *desc;
3040         struct ring_info *map, *src_map;
3041         struct sk_buff *skb;
3042         dma_addr_t mapping;
3043         int skb_size, dest_idx;
3044
3045         src_map = NULL;
3046         switch (opaque_key) {
3047         case RXD_OPAQUE_RING_STD:
3048                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3049                 desc = &tp->rx_std[dest_idx];
3050                 map = &tp->rx_std_buffers[dest_idx];
3051                 if (src_idx >= 0)
3052                         src_map = &tp->rx_std_buffers[src_idx];
3053                 skb_size = tp->rx_pkt_buf_sz;
3054                 break;
3055
3056         case RXD_OPAQUE_RING_JUMBO:
3057                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3058                 desc = &tp->rx_jumbo[dest_idx];
3059                 map = &tp->rx_jumbo_buffers[dest_idx];
3060                 if (src_idx >= 0)
3061                         src_map = &tp->rx_jumbo_buffers[src_idx];
3062                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3063                 break;
3064
3065         default:
3066                 return -EINVAL;
3067         };
3068
3069         /* Do not overwrite any of the map or rp information
3070          * until we are sure we can commit to a new buffer.
3071          *
3072          * Callers depend upon this behavior and assume that
3073          * we leave everything unchanged if we fail.
3074          */
3075         skb = netdev_alloc_skb(tp->dev, skb_size);
3076         if (skb == NULL)
3077                 return -ENOMEM;
3078
3079         skb_reserve(skb, tp->rx_offset);
3080
3081         mapping = pci_map_single(tp->pdev, skb->data,
3082                                  skb_size - tp->rx_offset,
3083                                  PCI_DMA_FROMDEVICE);
3084
3085         map->skb = skb;
3086         pci_unmap_addr_set(map, mapping, mapping);
3087
3088         if (src_map != NULL)
3089                 src_map->skb = NULL;
3090
3091         desc->addr_hi = ((u64)mapping >> 32);
3092         desc->addr_lo = ((u64)mapping & 0xffffffff);
3093
3094         return skb_size;
3095 }
3096
3097 /* We only need to move over in the address because the other
3098  * members of the RX descriptor are invariant.  See notes above
3099  * tg3_alloc_rx_skb for full details.
3100  */
3101 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3102                            int src_idx, u32 dest_idx_unmasked)
3103 {
3104         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3105         struct ring_info *src_map, *dest_map;
3106         int dest_idx;
3107
3108         switch (opaque_key) {
3109         case RXD_OPAQUE_RING_STD:
3110                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3111                 dest_desc = &tp->rx_std[dest_idx];
3112                 dest_map = &tp->rx_std_buffers[dest_idx];
3113                 src_desc = &tp->rx_std[src_idx];
3114                 src_map = &tp->rx_std_buffers[src_idx];
3115                 break;
3116
3117         case RXD_OPAQUE_RING_JUMBO:
3118                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3119                 dest_desc = &tp->rx_jumbo[dest_idx];
3120                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3121                 src_desc = &tp->rx_jumbo[src_idx];
3122                 src_map = &tp->rx_jumbo_buffers[src_idx];
3123                 break;
3124
3125         default:
3126                 return;
3127         };
3128
3129         dest_map->skb = src_map->skb;
3130         pci_unmap_addr_set(dest_map, mapping,
3131                            pci_unmap_addr(src_map, mapping));
3132         dest_desc->addr_hi = src_desc->addr_hi;
3133         dest_desc->addr_lo = src_desc->addr_lo;
3134
3135         src_map->skb = NULL;
3136 }
3137
3138 #if TG3_VLAN_TAG_USED
3139 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3140 {
3141         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3142 }
3143 #endif
3144
3145 /* The RX ring scheme is composed of multiple rings which post fresh
3146  * buffers to the chip, and one special ring the chip uses to report
3147  * status back to the host.
3148  *
3149  * The special ring reports the status of received packets to the
3150  * host.  The chip does not write into the original descriptor the
3151  * RX buffer was obtained from.  The chip simply takes the original
3152  * descriptor as provided by the host, updates the status and length
3153  * field, then writes this into the next status ring entry.
3154  *
3155  * Each ring the host uses to post buffers to the chip is described
3156  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3157  * it is first placed into the on-chip ram.  When the packet's length
3158  * is known, it walks down the TG3_BDINFO entries to select the ring.
3159  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3160  * which is within the range of the new packet's length is chosen.
3161  *
3162  * The "separate ring for rx status" scheme may sound queer, but it makes
3163  * sense from a cache coherency perspective.  If only the host writes
3164  * to the buffer post rings, and only the chip writes to the rx status
3165  * rings, then cache lines never move beyond shared-modified state.
3166  * If both the host and chip were to write into the same ring, cache line
3167  * eviction could occur since both entities want it in an exclusive state.
3168  */
3169 static int tg3_rx(struct tg3 *tp, int budget)
3170 {
3171         u32 work_mask, rx_std_posted = 0;
3172         u32 sw_idx = tp->rx_rcb_ptr;
3173         u16 hw_idx;
3174         int received;
3175
3176         hw_idx = tp->hw_status->idx[0].rx_producer;
3177         /*
3178          * We need to order the read of hw_idx and the read of
3179          * the opaque cookie.
3180          */
3181         rmb();
3182         work_mask = 0;
3183         received = 0;
3184         while (sw_idx != hw_idx && budget > 0) {
3185                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3186                 unsigned int len;
3187                 struct sk_buff *skb;
3188                 dma_addr_t dma_addr;
3189                 u32 opaque_key, desc_idx, *post_ptr;
3190
3191                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3192                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3193                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3194                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3195                                                   mapping);
3196                         skb = tp->rx_std_buffers[desc_idx].skb;
3197                         post_ptr = &tp->rx_std_ptr;
3198                         rx_std_posted++;
3199                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3200                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3201                                                   mapping);
3202                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3203                         post_ptr = &tp->rx_jumbo_ptr;
3204                 }
3205                 else {
3206                         goto next_pkt_nopost;
3207                 }
3208
3209                 work_mask |= opaque_key;
3210
3211                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3212                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3213                 drop_it:
3214                         tg3_recycle_rx(tp, opaque_key,
3215                                        desc_idx, *post_ptr);
3216                 drop_it_no_recycle:
3217                         /* Other statistics kept track of by card. */
3218                         tp->net_stats.rx_dropped++;
3219                         goto next_pkt;
3220                 }
3221
3222                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3223
3224                 if (len > RX_COPY_THRESHOLD
3225                         && tp->rx_offset == 2
3226                         /* rx_offset != 2 iff this is a 5701 card running
3227                          * in PCI-X mode [see tg3_get_invariants()] */
3228                 ) {
3229                         int skb_size;
3230
3231                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3232                                                     desc_idx, *post_ptr);
3233                         if (skb_size < 0)
3234                                 goto drop_it;
3235
3236                         pci_unmap_single(tp->pdev, dma_addr,
3237                                          skb_size - tp->rx_offset,
3238                                          PCI_DMA_FROMDEVICE);
3239
3240                         skb_put(skb, len);
3241                 } else {
3242                         struct sk_buff *copy_skb;
3243
3244                         tg3_recycle_rx(tp, opaque_key,
3245                                        desc_idx, *post_ptr);
3246
3247                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3248                         if (copy_skb == NULL)
3249                                 goto drop_it_no_recycle;
3250
3251                         skb_reserve(copy_skb, 2);
3252                         skb_put(copy_skb, len);
3253                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3254                         memcpy(copy_skb->data, skb->data, len);
3255                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3256
3257                         /* We'll reuse the original ring buffer. */
3258                         skb = copy_skb;
3259                 }
3260
3261                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3262                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3263                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3264                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3265                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3266                 else
3267                         skb->ip_summed = CHECKSUM_NONE;
3268
3269                 skb->protocol = eth_type_trans(skb, tp->dev);
3270 #if TG3_VLAN_TAG_USED
3271                 if (tp->vlgrp != NULL &&
3272                     desc->type_flags & RXD_FLAG_VLAN) {
3273                         tg3_vlan_rx(tp, skb,
3274                                     desc->err_vlan & RXD_VLAN_MASK);
3275                 } else
3276 #endif
3277                         netif_receive_skb(skb);
3278
3279                 tp->dev->last_rx = jiffies;
3280                 received++;
3281                 budget--;
3282
3283 next_pkt:
3284                 (*post_ptr)++;
3285
3286                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3287                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3288
3289                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3290                                      TG3_64BIT_REG_LOW, idx);
3291                         work_mask &= ~RXD_OPAQUE_RING_STD;
3292                         rx_std_posted = 0;
3293                 }
3294 next_pkt_nopost:
3295                 sw_idx++;
3296                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3297
3298                 /* Refresh hw_idx to see if there is new work */
3299                 if (sw_idx == hw_idx) {
3300                         hw_idx = tp->hw_status->idx[0].rx_producer;
3301                         rmb();
3302                 }
3303         }
3304
3305         /* ACK the status ring. */
3306         tp->rx_rcb_ptr = sw_idx;
3307         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3308
3309         /* Refill RX ring(s). */
3310         if (work_mask & RXD_OPAQUE_RING_STD) {
3311                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3312                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3313                              sw_idx);
3314         }
3315         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3316                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3317                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3318                              sw_idx);
3319         }
3320         mmiowb();
3321
3322         return received;
3323 }
3324
3325 static int tg3_poll(struct net_device *netdev, int *budget)
3326 {
3327         struct tg3 *tp = netdev_priv(netdev);
3328         struct tg3_hw_status *sblk = tp->hw_status;
3329         int done;
3330
3331         /* handle link change and other phy events */
3332         if (!(tp->tg3_flags &
3333               (TG3_FLAG_USE_LINKCHG_REG |
3334                TG3_FLAG_POLL_SERDES))) {
3335                 if (sblk->status & SD_STATUS_LINK_CHG) {
3336                         sblk->status = SD_STATUS_UPDATED |
3337                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3338                         spin_lock(&tp->lock);
3339                         tg3_setup_phy(tp, 0);
3340                         spin_unlock(&tp->lock);
3341                 }
3342         }
3343
3344         /* run TX completion thread */
3345         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3346                 tg3_tx(tp);
3347                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3348                         netif_rx_complete(netdev);
3349                         schedule_work(&tp->reset_task);
3350                         return 0;
3351                 }
3352         }
3353
3354         /* run RX thread, within the bounds set by NAPI.
3355          * All RX "locking" is done by ensuring outside
3356          * code synchronizes with dev->poll()
3357          */
3358         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3359                 int orig_budget = *budget;
3360                 int work_done;
3361
3362                 if (orig_budget > netdev->quota)
3363                         orig_budget = netdev->quota;
3364
3365                 work_done = tg3_rx(tp, orig_budget);
3366
3367                 *budget -= work_done;
3368                 netdev->quota -= work_done;
3369         }
3370
3371         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3372                 tp->last_tag = sblk->status_tag;
3373                 rmb();
3374         } else
3375                 sblk->status &= ~SD_STATUS_UPDATED;
3376
3377         /* if no more work, tell net stack and NIC we're done */
3378         done = !tg3_has_work(tp);
3379         if (done) {
3380                 netif_rx_complete(netdev);
3381                 tg3_restart_ints(tp);
3382         }
3383
3384         return (done ? 0 : 1);
3385 }
3386
3387 static void tg3_irq_quiesce(struct tg3 *tp)
3388 {
3389         BUG_ON(tp->irq_sync);
3390
3391         tp->irq_sync = 1;
3392         smp_mb();
3393
3394         synchronize_irq(tp->pdev->irq);
3395 }
3396
3397 static inline int tg3_irq_sync(struct tg3 *tp)
3398 {
3399         return tp->irq_sync;
3400 }
3401
3402 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3403  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3404  * with as well.  Most of the time, this is not necessary except when
3405  * shutting down the device.
3406  */
3407 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3408 {
3409         if (irq_sync)
3410                 tg3_irq_quiesce(tp);
3411         spin_lock_bh(&tp->lock);
3412 }
3413
3414 static inline void tg3_full_unlock(struct tg3 *tp)
3415 {
3416         spin_unlock_bh(&tp->lock);
3417 }
3418
3419 /* One-shot MSI handler - Chip automatically disables interrupt
3420  * after sending MSI so driver doesn't have to do it.
3421  */
3422 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3423 {
3424         struct net_device *dev = dev_id;
3425         struct tg3 *tp = netdev_priv(dev);
3426
3427         prefetch(tp->hw_status);
3428         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3429
3430         if (likely(!tg3_irq_sync(tp)))
3431                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3432
3433         return IRQ_HANDLED;
3434 }
3435
3436 /* MSI ISR - No need to check for interrupt sharing and no need to
3437  * flush status block and interrupt mailbox. PCI ordering rules
3438  * guarantee that MSI will arrive after the status block.
3439  */
3440 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3441 {
3442         struct net_device *dev = dev_id;
3443         struct tg3 *tp = netdev_priv(dev);
3444
3445         prefetch(tp->hw_status);
3446         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3447         /*
3448          * Writing any value to intr-mbox-0 clears PCI INTA# and
3449          * chip-internal interrupt pending events.
3450          * Writing non-zero to intr-mbox-0 additional tells the
3451          * NIC to stop sending us irqs, engaging "in-intr-handler"
3452          * event coalescing.
3453          */
3454         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3455         if (likely(!tg3_irq_sync(tp)))
3456                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3457
3458         return IRQ_RETVAL(1);
3459 }
3460
3461 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3462 {
3463         struct net_device *dev = dev_id;
3464         struct tg3 *tp = netdev_priv(dev);
3465         struct tg3_hw_status *sblk = tp->hw_status;
3466         unsigned int handled = 1;
3467
3468         /* In INTx mode, it is possible for the interrupt to arrive at
3469          * the CPU before the status block posted prior to the interrupt.
3470          * Reading the PCI State register will confirm whether the
3471          * interrupt is ours and will flush the status block.
3472          */
3473         if ((sblk->status & SD_STATUS_UPDATED) ||
3474             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3475                 /*
3476                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3477                  * chip-internal interrupt pending events.
3478                  * Writing non-zero to intr-mbox-0 additional tells the
3479                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3480                  * event coalescing.
3481                  */
3482                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3483                              0x00000001);
3484                 if (tg3_irq_sync(tp))
3485                         goto out;
3486                 sblk->status &= ~SD_STATUS_UPDATED;
3487                 if (likely(tg3_has_work(tp))) {
3488                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3489                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3490                 } else {
3491                         /* No work, shared interrupt perhaps?  re-enable
3492                          * interrupts, and flush that PCI write
3493                          */
3494                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3495                                 0x00000000);
3496                 }
3497         } else {        /* shared interrupt */
3498                 handled = 0;
3499         }
3500 out:
3501         return IRQ_RETVAL(handled);
3502 }
3503
3504 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3505 {
3506         struct net_device *dev = dev_id;
3507         struct tg3 *tp = netdev_priv(dev);
3508         struct tg3_hw_status *sblk = tp->hw_status;
3509         unsigned int handled = 1;
3510
3511         /* In INTx mode, it is possible for the interrupt to arrive at
3512          * the CPU before the status block posted prior to the interrupt.
3513          * Reading the PCI State register will confirm whether the
3514          * interrupt is ours and will flush the status block.
3515          */
3516         if ((sblk->status_tag != tp->last_tag) ||
3517             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3518                 /*
3519                  * writing any value to intr-mbox-0 clears PCI INTA# and
3520                  * chip-internal interrupt pending events.
3521                  * writing non-zero to intr-mbox-0 additional tells the
3522                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3523                  * event coalescing.
3524                  */
3525                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3526                              0x00000001);
3527                 if (tg3_irq_sync(tp))
3528                         goto out;
3529                 if (netif_rx_schedule_prep(dev)) {
3530                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3531                         /* Update last_tag to mark that this status has been
3532                          * seen. Because interrupt may be shared, we may be
3533                          * racing with tg3_poll(), so only update last_tag
3534                          * if tg3_poll() is not scheduled.
3535                          */
3536                         tp->last_tag = sblk->status_tag;
3537                         __netif_rx_schedule(dev);
3538                 }
3539         } else {        /* shared interrupt */
3540                 handled = 0;
3541         }
3542 out:
3543         return IRQ_RETVAL(handled);
3544 }
3545
3546 /* ISR for interrupt test */
3547 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3548                 struct pt_regs *regs)
3549 {
3550         struct net_device *dev = dev_id;
3551         struct tg3 *tp = netdev_priv(dev);
3552         struct tg3_hw_status *sblk = tp->hw_status;
3553
3554         if ((sblk->status & SD_STATUS_UPDATED) ||
3555             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3556                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3557                              0x00000001);
3558                 return IRQ_RETVAL(1);
3559         }
3560         return IRQ_RETVAL(0);
3561 }
3562
3563 static int tg3_init_hw(struct tg3 *, int);
3564 static int tg3_halt(struct tg3 *, int, int);
3565
3566 /* Restart hardware after configuration changes, self-test, etc.
3567  * Invoked with tp->lock held.
3568  */
3569 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3570 {
3571         int err;
3572
3573         err = tg3_init_hw(tp, reset_phy);
3574         if (err) {
3575                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3576                        "aborting.\n", tp->dev->name);
3577                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3578                 tg3_full_unlock(tp);
3579                 del_timer_sync(&tp->timer);
3580                 tp->irq_sync = 0;
3581                 netif_poll_enable(tp->dev);
3582                 dev_close(tp->dev);
3583                 tg3_full_lock(tp, 0);
3584         }
3585         return err;
3586 }
3587
3588 #ifdef CONFIG_NET_POLL_CONTROLLER
3589 static void tg3_poll_controller(struct net_device *dev)
3590 {
3591         struct tg3 *tp = netdev_priv(dev);
3592
3593         tg3_interrupt(tp->pdev->irq, dev, NULL);
3594 }
3595 #endif
3596
3597 static void tg3_reset_task(void *_data)
3598 {
3599         struct tg3 *tp = _data;
3600         unsigned int restart_timer;
3601
3602         tg3_full_lock(tp, 0);
3603         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3604
3605         if (!netif_running(tp->dev)) {
3606                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3607                 tg3_full_unlock(tp);
3608                 return;
3609         }
3610
3611         tg3_full_unlock(tp);
3612
3613         tg3_netif_stop(tp);
3614
3615         tg3_full_lock(tp, 1);
3616
3617         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3618         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3619
3620         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3621                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3622                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3623                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3624                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3625         }
3626
3627         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3628         if (tg3_init_hw(tp, 1))
3629                 goto out;
3630
3631         tg3_netif_start(tp);
3632
3633         if (restart_timer)
3634                 mod_timer(&tp->timer, jiffies + 1);
3635
3636 out:
3637         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3638
3639         tg3_full_unlock(tp);
3640 }
3641
3642 static void tg3_tx_timeout(struct net_device *dev)
3643 {
3644         struct tg3 *tp = netdev_priv(dev);
3645
3646         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3647                dev->name);
3648
3649         schedule_work(&tp->reset_task);
3650 }
3651
3652 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3653 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3654 {
3655         u32 base = (u32) mapping & 0xffffffff;
3656
3657         return ((base > 0xffffdcc0) &&
3658                 (base + len + 8 < base));
3659 }
3660
3661 /* Test for DMA addresses > 40-bit */
3662 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3663                                           int len)
3664 {
3665 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3666         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3667                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3668         return 0;
3669 #else
3670         return 0;
3671 #endif
3672 }
3673
3674 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3675
3676 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3677 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3678                                        u32 last_plus_one, u32 *start,
3679                                        u32 base_flags, u32 mss)
3680 {
3681         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3682         dma_addr_t new_addr = 0;
3683         u32 entry = *start;
3684         int i, ret = 0;
3685
3686         if (!new_skb) {
3687                 ret = -1;
3688         } else {
3689                 /* New SKB is guaranteed to be linear. */
3690                 entry = *start;
3691                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3692                                           PCI_DMA_TODEVICE);
3693                 /* Make sure new skb does not cross any 4G boundaries.
3694                  * Drop the packet if it does.
3695                  */
3696                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3697                         ret = -1;
3698                         dev_kfree_skb(new_skb);
3699                         new_skb = NULL;
3700                 } else {
3701                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3702                                     base_flags, 1 | (mss << 1));
3703                         *start = NEXT_TX(entry);
3704                 }
3705         }
3706
3707         /* Now clean up the sw ring entries. */
3708         i = 0;
3709         while (entry != last_plus_one) {
3710                 int len;
3711
3712                 if (i == 0)
3713                         len = skb_headlen(skb);
3714                 else
3715                         len = skb_shinfo(skb)->frags[i-1].size;
3716                 pci_unmap_single(tp->pdev,
3717                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3718                                  len, PCI_DMA_TODEVICE);
3719                 if (i == 0) {
3720                         tp->tx_buffers[entry].skb = new_skb;
3721                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3722                 } else {
3723                         tp->tx_buffers[entry].skb = NULL;
3724                 }
3725                 entry = NEXT_TX(entry);
3726                 i++;
3727         }
3728
3729         dev_kfree_skb(skb);
3730
3731         return ret;
3732 }
3733
3734 static void tg3_set_txd(struct tg3 *tp, int entry,
3735                         dma_addr_t mapping, int len, u32 flags,
3736                         u32 mss_and_is_end)
3737 {
3738         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3739         int is_end = (mss_and_is_end & 0x1);
3740         u32 mss = (mss_and_is_end >> 1);
3741         u32 vlan_tag = 0;
3742
3743         if (is_end)
3744                 flags |= TXD_FLAG_END;
3745         if (flags & TXD_FLAG_VLAN) {
3746                 vlan_tag = flags >> 16;
3747                 flags &= 0xffff;
3748         }
3749         vlan_tag |= (mss << TXD_MSS_SHIFT);
3750
3751         txd->addr_hi = ((u64) mapping >> 32);
3752         txd->addr_lo = ((u64) mapping & 0xffffffff);
3753         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3754         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3755 }
3756
3757 /* hard_start_xmit for devices that don't have any bugs and
3758  * support TG3_FLG2_HW_TSO_2 only.
3759  */
3760 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3761 {
3762         struct tg3 *tp = netdev_priv(dev);
3763         dma_addr_t mapping;
3764         u32 len, entry, base_flags, mss;
3765
3766         len = skb_headlen(skb);
3767
3768         /* We are running in BH disabled context with netif_tx_lock
3769          * and TX reclaim runs via tp->poll inside of a software
3770          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3771          * no IRQ context deadlocks to worry about either.  Rejoice!
3772          */
3773         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3774                 if (!netif_queue_stopped(dev)) {
3775                         netif_stop_queue(dev);
3776
3777                         /* This is a hard error, log it. */
3778                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3779                                "queue awake!\n", dev->name);
3780                 }
3781                 return NETDEV_TX_BUSY;
3782         }
3783
3784         entry = tp->tx_prod;
3785         base_flags = 0;
3786 #if TG3_TSO_SUPPORT != 0
3787         mss = 0;
3788         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3789             (mss = skb_shinfo(skb)->gso_size) != 0) {
3790                 int tcp_opt_len, ip_tcp_len;
3791
3792                 if (skb_header_cloned(skb) &&
3793                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3794                         dev_kfree_skb(skb);
3795                         goto out_unlock;
3796                 }
3797
3798                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3799                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3800                 else {
3801                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3802                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3803                                      sizeof(struct tcphdr);
3804
3805                         skb->nh.iph->check = 0;
3806                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3807                                                      tcp_opt_len);
3808                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3809                 }
3810
3811                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3812                                TXD_FLAG_CPU_POST_DMA);
3813
3814                 skb->h.th->check = 0;
3815
3816         }
3817         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3818                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 #else
3820         mss = 0;
3821         if (skb->ip_summed == CHECKSUM_PARTIAL)
3822                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3823 #endif
3824 #if TG3_VLAN_TAG_USED
3825         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3826                 base_flags |= (TXD_FLAG_VLAN |
3827                                (vlan_tx_tag_get(skb) << 16));
3828 #endif
3829
3830         /* Queue skb data, a.k.a. the main skb fragment. */
3831         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3832
3833         tp->tx_buffers[entry].skb = skb;
3834         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3835
3836         tg3_set_txd(tp, entry, mapping, len, base_flags,
3837                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3838
3839         entry = NEXT_TX(entry);
3840
3841         /* Now loop through additional data fragments, and queue them. */
3842         if (skb_shinfo(skb)->nr_frags > 0) {
3843                 unsigned int i, last;
3844
3845                 last = skb_shinfo(skb)->nr_frags - 1;
3846                 for (i = 0; i <= last; i++) {
3847                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3848
3849                         len = frag->size;
3850                         mapping = pci_map_page(tp->pdev,
3851                                                frag->page,
3852                                                frag->page_offset,
3853                                                len, PCI_DMA_TODEVICE);
3854
3855                         tp->tx_buffers[entry].skb = NULL;
3856                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3857
3858                         tg3_set_txd(tp, entry, mapping, len,
3859                                     base_flags, (i == last) | (mss << 1));
3860
3861                         entry = NEXT_TX(entry);
3862                 }
3863         }
3864
3865         /* Packets are ready, update Tx producer idx local and on card. */
3866         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3867
3868         tp->tx_prod = entry;
3869         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3870                 netif_stop_queue(dev);
3871                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3872                         netif_wake_queue(tp->dev);
3873         }
3874
3875 out_unlock:
3876         mmiowb();
3877
3878         dev->trans_start = jiffies;
3879
3880         return NETDEV_TX_OK;
3881 }
3882
3883 #if TG3_TSO_SUPPORT != 0
3884 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3885
3886 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3887  * TSO header is greater than 80 bytes.
3888  */
3889 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3890 {
3891         struct sk_buff *segs, *nskb;
3892
3893         /* Estimate the number of fragments in the worst case */
3894         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3895                 netif_stop_queue(tp->dev);
3896                 return NETDEV_TX_BUSY;
3897         }
3898
3899         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3900         if (unlikely(IS_ERR(segs)))
3901                 goto tg3_tso_bug_end;
3902
3903         do {
3904                 nskb = segs;
3905                 segs = segs->next;
3906                 nskb->next = NULL;
3907                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3908         } while (segs);
3909
3910 tg3_tso_bug_end:
3911         dev_kfree_skb(skb);
3912
3913         return NETDEV_TX_OK;
3914 }
3915 #endif
3916
3917 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3918  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3919  */
3920 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3921 {
3922         struct tg3 *tp = netdev_priv(dev);
3923         dma_addr_t mapping;
3924         u32 len, entry, base_flags, mss;
3925         int would_hit_hwbug;
3926
3927         len = skb_headlen(skb);
3928
3929         /* We are running in BH disabled context with netif_tx_lock
3930          * and TX reclaim runs via tp->poll inside of a software
3931          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3932          * no IRQ context deadlocks to worry about either.  Rejoice!
3933          */
3934         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3935                 if (!netif_queue_stopped(dev)) {
3936                         netif_stop_queue(dev);
3937
3938                         /* This is a hard error, log it. */
3939                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3940                                "queue awake!\n", dev->name);
3941                 }
3942                 return NETDEV_TX_BUSY;
3943         }
3944
3945         entry = tp->tx_prod;
3946         base_flags = 0;
3947         if (skb->ip_summed == CHECKSUM_PARTIAL)
3948                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3949 #if TG3_TSO_SUPPORT != 0
3950         mss = 0;
3951         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3952             (mss = skb_shinfo(skb)->gso_size) != 0) {
3953                 int tcp_opt_len, ip_tcp_len, hdr_len;
3954
3955                 if (skb_header_cloned(skb) &&
3956                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3957                         dev_kfree_skb(skb);
3958                         goto out_unlock;
3959                 }
3960
3961                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3962                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3963
3964                 hdr_len = ip_tcp_len + tcp_opt_len;
3965                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3966                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3967                         return (tg3_tso_bug(tp, skb));
3968
3969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970                                TXD_FLAG_CPU_POST_DMA);
3971
3972                 skb->nh.iph->check = 0;
3973                 skb->nh.iph->tot_len = htons(mss + hdr_len);
3974                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3975                         skb->h.th->check = 0;
3976                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3977                 }
3978                 else {
3979                         skb->h.th->check =
3980                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3981                                                    skb->nh.iph->daddr,
3982                                                    0, IPPROTO_TCP, 0);
3983                 }
3984
3985                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3986                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3987                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3988                                 int tsflags;
3989
3990                                 tsflags = ((skb->nh.iph->ihl - 5) +
3991                                            (tcp_opt_len >> 2));
3992                                 mss |= (tsflags << 11);
3993                         }
3994                 } else {
3995                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3996                                 int tsflags;
3997
3998                                 tsflags = ((skb->nh.iph->ihl - 5) +
3999                                            (tcp_opt_len >> 2));
4000                                 base_flags |= tsflags << 12;
4001                         }
4002                 }
4003         }
4004 #else
4005         mss = 0;
4006 #endif
4007 #if TG3_VLAN_TAG_USED
4008         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4009                 base_flags |= (TXD_FLAG_VLAN |
4010                                (vlan_tx_tag_get(skb) << 16));
4011 #endif
4012
4013         /* Queue skb data, a.k.a. the main skb fragment. */
4014         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4015
4016         tp->tx_buffers[entry].skb = skb;
4017         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4018
4019         would_hit_hwbug = 0;
4020
4021         if (tg3_4g_overflow_test(mapping, len))
4022                 would_hit_hwbug = 1;
4023
4024         tg3_set_txd(tp, entry, mapping, len, base_flags,
4025                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4026
4027         entry = NEXT_TX(entry);
4028
4029         /* Now loop through additional data fragments, and queue them. */
4030         if (skb_shinfo(skb)->nr_frags > 0) {
4031                 unsigned int i, last;
4032
4033                 last = skb_shinfo(skb)->nr_frags - 1;
4034                 for (i = 0; i <= last; i++) {
4035                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4036
4037                         len = frag->size;
4038                         mapping = pci_map_page(tp->pdev,
4039                                                frag->page,
4040                                                frag->page_offset,
4041                                                len, PCI_DMA_TODEVICE);
4042
4043                         tp->tx_buffers[entry].skb = NULL;
4044                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4045
4046                         if (tg3_4g_overflow_test(mapping, len))
4047                                 would_hit_hwbug = 1;
4048
4049                         if (tg3_40bit_overflow_test(tp, mapping, len))
4050                                 would_hit_hwbug = 1;
4051
4052                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4053                                 tg3_set_txd(tp, entry, mapping, len,
4054                                             base_flags, (i == last)|(mss << 1));
4055                         else
4056                                 tg3_set_txd(tp, entry, mapping, len,
4057                                             base_flags, (i == last));
4058
4059                         entry = NEXT_TX(entry);
4060                 }
4061         }
4062
4063         if (would_hit_hwbug) {
4064                 u32 last_plus_one = entry;
4065                 u32 start;
4066
4067                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4068                 start &= (TG3_TX_RING_SIZE - 1);
4069
4070                 /* If the workaround fails due to memory/mapping
4071                  * failure, silently drop this packet.
4072                  */
4073                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4074                                                 &start, base_flags, mss))
4075                         goto out_unlock;
4076
4077                 entry = start;
4078         }
4079
4080         /* Packets are ready, update Tx producer idx local and on card. */
4081         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4082
4083         tp->tx_prod = entry;
4084         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4085                 netif_stop_queue(dev);
4086                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4087                         netif_wake_queue(tp->dev);
4088         }
4089
4090 out_unlock:
4091         mmiowb();
4092
4093         dev->trans_start = jiffies;
4094
4095         return NETDEV_TX_OK;
4096 }
4097
4098 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4099                                int new_mtu)
4100 {
4101         dev->mtu = new_mtu;
4102
4103         if (new_mtu > ETH_DATA_LEN) {
4104                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4105                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4106                         ethtool_op_set_tso(dev, 0);
4107                 }
4108                 else
4109                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4110         } else {
4111                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4112                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4113                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4114         }
4115 }
4116
4117 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4118 {
4119         struct tg3 *tp = netdev_priv(dev);
4120         int err;
4121
4122         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4123                 return -EINVAL;
4124
4125         if (!netif_running(dev)) {
4126                 /* We'll just catch it later when the
4127                  * device is up'd.
4128                  */
4129                 tg3_set_mtu(dev, tp, new_mtu);
4130                 return 0;
4131         }
4132
4133         tg3_netif_stop(tp);
4134
4135         tg3_full_lock(tp, 1);
4136
4137         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4138
4139         tg3_set_mtu(dev, tp, new_mtu);
4140
4141         err = tg3_restart_hw(tp, 0);
4142
4143         if (!err)
4144                 tg3_netif_start(tp);
4145
4146         tg3_full_unlock(tp);
4147
4148         return err;
4149 }
4150
4151 /* Free up pending packets in all rx/tx rings.
4152  *
4153  * The chip has been shut down and the driver detached from
4154  * the networking, so no interrupts or new tx packets will
4155  * end up in the driver.  tp->{tx,}lock is not held and we are not
4156  * in an interrupt context and thus may sleep.
4157  */
4158 static void tg3_free_rings(struct tg3 *tp)
4159 {
4160         struct ring_info *rxp;
4161         int i;
4162
4163         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4164                 rxp = &tp->rx_std_buffers[i];
4165
4166                 if (rxp->skb == NULL)
4167                         continue;
4168                 pci_unmap_single(tp->pdev,
4169                                  pci_unmap_addr(rxp, mapping),
4170                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4171                                  PCI_DMA_FROMDEVICE);
4172                 dev_kfree_skb_any(rxp->skb);
4173                 rxp->skb = NULL;
4174         }
4175
4176         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4177                 rxp = &tp->rx_jumbo_buffers[i];
4178
4179                 if (rxp->skb == NULL)
4180                         continue;
4181                 pci_unmap_single(tp->pdev,
4182                                  pci_unmap_addr(rxp, mapping),
4183                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4184                                  PCI_DMA_FROMDEVICE);
4185                 dev_kfree_skb_any(rxp->skb);
4186                 rxp->skb = NULL;
4187         }
4188
4189         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4190                 struct tx_ring_info *txp;
4191                 struct sk_buff *skb;
4192                 int j;
4193
4194                 txp = &tp->tx_buffers[i];
4195                 skb = txp->skb;
4196
4197                 if (skb == NULL) {
4198                         i++;
4199                         continue;
4200                 }
4201
4202                 pci_unmap_single(tp->pdev,
4203                                  pci_unmap_addr(txp, mapping),
4204                                  skb_headlen(skb),
4205                                  PCI_DMA_TODEVICE);
4206                 txp->skb = NULL;
4207
4208                 i++;
4209
4210                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4211                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4212                         pci_unmap_page(tp->pdev,
4213                                        pci_unmap_addr(txp, mapping),
4214                                        skb_shinfo(skb)->frags[j].size,
4215                                        PCI_DMA_TODEVICE);
4216                         i++;
4217                 }
4218
4219                 dev_kfree_skb_any(skb);
4220         }
4221 }
4222
4223 /* Initialize tx/rx rings for packet processing.
4224  *
4225  * The chip has been shut down and the driver detached from
4226  * the networking, so no interrupts or new tx packets will
4227  * end up in the driver.  tp->{tx,}lock are held and thus
4228  * we may not sleep.
4229  */
4230 static int tg3_init_rings(struct tg3 *tp)
4231 {
4232         u32 i;
4233
4234         /* Free up all the SKBs. */
4235         tg3_free_rings(tp);
4236
4237         /* Zero out all descriptors. */
4238         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4239         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4240         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4241         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4242
4243         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4244         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4245             (tp->dev->mtu > ETH_DATA_LEN))
4246                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4247
4248         /* Initialize invariants of the rings, we only set this
4249          * stuff once.  This works because the card does not
4250          * write into the rx buffer posting rings.
4251          */
4252         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4253                 struct tg3_rx_buffer_desc *rxd;
4254
4255                 rxd = &tp->rx_std[i];
4256                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4257                         << RXD_LEN_SHIFT;
4258                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4259                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4260                                (i << RXD_OPAQUE_INDEX_SHIFT));
4261         }
4262
4263         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4264                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4265                         struct tg3_rx_buffer_desc *rxd;
4266
4267                         rxd = &tp->rx_jumbo[i];
4268                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4269                                 << RXD_LEN_SHIFT;
4270                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4271                                 RXD_FLAG_JUMBO;
4272                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4273                                (i << RXD_OPAQUE_INDEX_SHIFT));
4274                 }
4275         }
4276
4277         /* Now allocate fresh SKBs for each rx ring. */
4278         for (i = 0; i < tp->rx_pending; i++) {
4279                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4280                         printk(KERN_WARNING PFX
4281                                "%s: Using a smaller RX standard ring, "
4282                                "only %d out of %d buffers were allocated "
4283                                "successfully.\n",
4284                                tp->dev->name, i, tp->rx_pending);
4285                         if (i == 0)
4286                                 return -ENOMEM;
4287                         tp->rx_pending = i;
4288                         break;
4289                 }
4290         }
4291
4292         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4293                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4294                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4295                                              -1, i) < 0) {
4296                                 printk(KERN_WARNING PFX
4297                                        "%s: Using a smaller RX jumbo ring, "
4298                                        "only %d out of %d buffers were "
4299                                        "allocated successfully.\n",
4300                                        tp->dev->name, i, tp->rx_jumbo_pending);
4301                                 if (i == 0) {
4302                                         tg3_free_rings(tp);
4303                                         return -ENOMEM;
4304                                 }
4305                                 tp->rx_jumbo_pending = i;
4306                                 break;
4307                         }
4308                 }
4309         }
4310         return 0;
4311 }
4312
4313 /*
4314  * Must not be invoked with interrupt sources disabled and
4315  * the hardware shutdown down.
4316  */
4317 static void tg3_free_consistent(struct tg3 *tp)
4318 {
4319         kfree(tp->rx_std_buffers);
4320         tp->rx_std_buffers = NULL;
4321         if (tp->rx_std) {
4322                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4323                                     tp->rx_std, tp->rx_std_mapping);
4324                 tp->rx_std = NULL;
4325         }
4326         if (tp->rx_jumbo) {
4327                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4328                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4329                 tp->rx_jumbo = NULL;
4330         }
4331         if (tp->rx_rcb) {
4332                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4333                                     tp->rx_rcb, tp->rx_rcb_mapping);
4334                 tp->rx_rcb = NULL;
4335         }
4336         if (tp->tx_ring) {
4337                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4338                         tp->tx_ring, tp->tx_desc_mapping);
4339                 tp->tx_ring = NULL;
4340         }
4341         if (tp->hw_status) {
4342                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4343                                     tp->hw_status, tp->status_mapping);
4344                 tp->hw_status = NULL;
4345         }
4346         if (tp->hw_stats) {
4347                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4348                                     tp->hw_stats, tp->stats_mapping);
4349                 tp->hw_stats = NULL;
4350         }
4351 }
4352
4353 /*
4354  * Must not be invoked with interrupt sources disabled and
4355  * the hardware shutdown down.  Can sleep.
4356  */
4357 static int tg3_alloc_consistent(struct tg3 *tp)
4358 {
4359         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4360                                       (TG3_RX_RING_SIZE +
4361                                        TG3_RX_JUMBO_RING_SIZE)) +
4362                                      (sizeof(struct tx_ring_info) *
4363                                       TG3_TX_RING_SIZE),
4364                                      GFP_KERNEL);
4365         if (!tp->rx_std_buffers)
4366                 return -ENOMEM;
4367
4368         memset(tp->rx_std_buffers, 0,
4369                (sizeof(struct ring_info) *
4370                 (TG3_RX_RING_SIZE +
4371                  TG3_RX_JUMBO_RING_SIZE)) +
4372                (sizeof(struct tx_ring_info) *
4373                 TG3_TX_RING_SIZE));
4374
4375         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4376         tp->tx_buffers = (struct tx_ring_info *)
4377                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4378
4379         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4380                                           &tp->rx_std_mapping);
4381         if (!tp->rx_std)
4382                 goto err_out;
4383
4384         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4385                                             &tp->rx_jumbo_mapping);
4386
4387         if (!tp->rx_jumbo)
4388                 goto err_out;
4389
4390         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4391                                           &tp->rx_rcb_mapping);
4392         if (!tp->rx_rcb)
4393                 goto err_out;
4394
4395         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4396                                            &tp->tx_desc_mapping);
4397         if (!tp->tx_ring)
4398                 goto err_out;
4399
4400         tp->hw_status = pci_alloc_consistent(tp->pdev,
4401                                              TG3_HW_STATUS_SIZE,
4402                                              &tp->status_mapping);
4403         if (!tp->hw_status)
4404                 goto err_out;
4405
4406         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4407                                             sizeof(struct tg3_hw_stats),
4408                                             &tp->stats_mapping);
4409         if (!tp->hw_stats)
4410                 goto err_out;
4411
4412         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4413         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4414
4415         return 0;
4416
4417 err_out:
4418         tg3_free_consistent(tp);
4419         return -ENOMEM;
4420 }
4421
4422 #define MAX_WAIT_CNT 1000
4423
4424 /* To stop a block, clear the enable bit and poll till it
4425  * clears.  tp->lock is held.
4426  */
4427 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4428 {
4429         unsigned int i;
4430         u32 val;
4431
4432         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4433                 switch (ofs) {
4434                 case RCVLSC_MODE:
4435                 case DMAC_MODE:
4436                 case MBFREE_MODE:
4437                 case BUFMGR_MODE:
4438                 case MEMARB_MODE:
4439                         /* We can't enable/disable these bits of the
4440                          * 5705/5750, just say success.
4441                          */
4442                         return 0;
4443
4444                 default:
4445                         break;
4446                 };
4447         }
4448
4449         val = tr32(ofs);
4450         val &= ~enable_bit;
4451         tw32_f(ofs, val);
4452
4453         for (i = 0; i < MAX_WAIT_CNT; i++) {
4454                 udelay(100);
4455                 val = tr32(ofs);
4456                 if ((val & enable_bit) == 0)
4457                         break;
4458         }
4459
4460         if (i == MAX_WAIT_CNT && !silent) {
4461                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4462                        "ofs=%lx enable_bit=%x\n",
4463                        ofs, enable_bit);
4464                 return -ENODEV;
4465         }
4466
4467         return 0;
4468 }
4469
4470 /* tp->lock is held. */
4471 static int tg3_abort_hw(struct tg3 *tp, int silent)
4472 {
4473         int i, err;
4474
4475         tg3_disable_ints(tp);
4476
4477         tp->rx_mode &= ~RX_MODE_ENABLE;
4478         tw32_f(MAC_RX_MODE, tp->rx_mode);
4479         udelay(10);
4480
4481         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4482         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4483         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4484         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4485         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4486         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4487
4488         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4489         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4490         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4491         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4492         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4493         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4494         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4495
4496         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4497         tw32_f(MAC_MODE, tp->mac_mode);
4498         udelay(40);
4499
4500         tp->tx_mode &= ~TX_MODE_ENABLE;
4501         tw32_f(MAC_TX_MODE, tp->tx_mode);
4502
4503         for (i = 0; i < MAX_WAIT_CNT; i++) {
4504                 udelay(100);
4505                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4506                         break;
4507         }
4508         if (i >= MAX_WAIT_CNT) {
4509                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4510                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4511                        tp->dev->name, tr32(MAC_TX_MODE));
4512                 err |= -ENODEV;
4513         }
4514
4515         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4516         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4517         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4518
4519         tw32(FTQ_RESET, 0xffffffff);
4520         tw32(FTQ_RESET, 0x00000000);
4521
4522         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4524
4525         if (tp->hw_status)
4526                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4527         if (tp->hw_stats)
4528                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4529
4530         return err;
4531 }
4532
4533 /* tp->lock is held. */
4534 static int tg3_nvram_lock(struct tg3 *tp)
4535 {
4536         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4537                 int i;
4538
4539                 if (tp->nvram_lock_cnt == 0) {
4540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4541                         for (i = 0; i < 8000; i++) {
4542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4543                                         break;
4544                                 udelay(20);
4545                         }
4546                         if (i == 8000) {
4547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4548                                 return -ENODEV;
4549                         }
4550                 }
4551                 tp->nvram_lock_cnt++;
4552         }
4553         return 0;
4554 }
4555
4556 /* tp->lock is held. */
4557 static void tg3_nvram_unlock(struct tg3 *tp)
4558 {
4559         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4560                 if (tp->nvram_lock_cnt > 0)
4561                         tp->nvram_lock_cnt--;
4562                 if (tp->nvram_lock_cnt == 0)
4563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4564         }
4565 }
4566
4567 /* tp->lock is held. */
4568 static void tg3_enable_nvram_access(struct tg3 *tp)
4569 {
4570         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4571             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4572                 u32 nvaccess = tr32(NVRAM_ACCESS);
4573
4574                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4575         }
4576 }
4577
4578 /* tp->lock is held. */
4579 static void tg3_disable_nvram_access(struct tg3 *tp)
4580 {
4581         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4582             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4583                 u32 nvaccess = tr32(NVRAM_ACCESS);
4584
4585                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4586         }
4587 }
4588
4589 /* tp->lock is held. */
4590 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4591 {
4592         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4593                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4594
4595         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4596                 switch (kind) {
4597                 case RESET_KIND_INIT:
4598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4599                                       DRV_STATE_START);
4600                         break;
4601
4602                 case RESET_KIND_SHUTDOWN:
4603                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4604                                       DRV_STATE_UNLOAD);
4605                         break;
4606
4607                 case RESET_KIND_SUSPEND:
4608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4609                                       DRV_STATE_SUSPEND);
4610                         break;
4611
4612                 default:
4613                         break;
4614                 };
4615         }
4616 }
4617
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4620 {
4621         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4622                 switch (kind) {
4623                 case RESET_KIND_INIT:
4624                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4625                                       DRV_STATE_START_DONE);
4626                         break;
4627
4628                 case RESET_KIND_SHUTDOWN:
4629                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4630                                       DRV_STATE_UNLOAD_DONE);
4631                         break;
4632
4633                 default:
4634                         break;
4635                 };
4636         }
4637 }
4638
4639 /* tp->lock is held. */
4640 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4641 {
4642         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4643                 switch (kind) {
4644                 case RESET_KIND_INIT:
4645                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4646                                       DRV_STATE_START);
4647                         break;
4648
4649                 case RESET_KIND_SHUTDOWN:
4650                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4651                                       DRV_STATE_UNLOAD);
4652                         break;
4653
4654                 case RESET_KIND_SUSPEND:
4655                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4656                                       DRV_STATE_SUSPEND);
4657                         break;
4658
4659                 default:
4660                         break;
4661                 };
4662         }
4663 }
4664
4665 static int tg3_poll_fw(struct tg3 *tp)
4666 {
4667         int i;
4668         u32 val;
4669
4670         /* Wait for firmware initialization to complete. */
4671         for (i = 0; i < 100000; i++) {
4672                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4673                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4674                         break;
4675                 udelay(10);
4676         }
4677
4678         /* Chip might not be fitted with firmware.  Some Sun onboard
4679          * parts are configured like that.  So don't signal the timeout
4680          * of the above loop as an error, but do report the lack of
4681          * running firmware once.
4682          */
4683         if (i >= 100000 &&
4684             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4685                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4686
4687                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4688                        tp->dev->name);
4689         }
4690
4691         return 0;
4692 }
4693
4694 static void tg3_stop_fw(struct tg3 *);
4695
4696 /* tp->lock is held. */
4697 static int tg3_chip_reset(struct tg3 *tp)
4698 {
4699         u32 val;
4700         void (*write_op)(struct tg3 *, u32, u32);
4701         int err;
4702
4703         tg3_nvram_lock(tp);
4704
4705         /* No matching tg3_nvram_unlock() after this because
4706          * chip reset below will undo the nvram lock.
4707          */
4708         tp->nvram_lock_cnt = 0;
4709
4710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4713                 tw32(GRC_FASTBOOT_PC, 0);
4714
4715         /*
4716          * We must avoid the readl() that normally takes place.
4717          * It locks machines, causes machine checks, and other
4718          * fun things.  So, temporarily disable the 5701
4719          * hardware workaround, while we do the reset.
4720          */
4721         write_op = tp->write32;
4722         if (write_op == tg3_write_flush_reg32)
4723                 tp->write32 = tg3_write32;
4724
4725         /* do the reset */
4726         val = GRC_MISC_CFG_CORECLK_RESET;
4727
4728         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4729                 if (tr32(0x7e2c) == 0x60) {
4730                         tw32(0x7e2c, 0x20);
4731                 }
4732                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4733                         tw32(GRC_MISC_CFG, (1 << 29));
4734                         val |= (1 << 29);
4735                 }
4736         }
4737
4738         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4739                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4740         tw32(GRC_MISC_CFG, val);
4741
4742         /* restore 5701 hardware bug workaround write method */
4743         tp->write32 = write_op;
4744
4745         /* Unfortunately, we have to delay before the PCI read back.
4746          * Some 575X chips even will not respond to a PCI cfg access
4747          * when the reset command is given to the chip.
4748          *
4749          * How do these hardware designers expect things to work
4750          * properly if the PCI write is posted for a long period
4751          * of time?  It is always necessary to have some method by
4752          * which a register read back can occur to push the write
4753          * out which does the reset.
4754          *
4755          * For most tg3 variants the trick below was working.
4756          * Ho hum...
4757          */
4758         udelay(120);
4759
4760         /* Flush PCI posted writes.  The normal MMIO registers
4761          * are inaccessible at this time so this is the only
4762          * way to make this reliably (actually, this is no longer
4763          * the case, see above).  I tried to use indirect
4764          * register read/write but this upset some 5701 variants.
4765          */
4766         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4767
4768         udelay(120);
4769
4770         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4771                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4772                         int i;
4773                         u32 cfg_val;
4774
4775                         /* Wait for link training to complete.  */
4776                         for (i = 0; i < 5000; i++)
4777                                 udelay(100);
4778
4779                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4780                         pci_write_config_dword(tp->pdev, 0xc4,
4781                                                cfg_val | (1 << 15));
4782                 }
4783                 /* Set PCIE max payload size and clear error status.  */
4784                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4785         }
4786
4787         /* Re-enable indirect register accesses. */
4788         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4789                                tp->misc_host_ctrl);
4790
4791         /* Set MAX PCI retry to zero. */
4792         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4793         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4794             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4795                 val |= PCISTATE_RETRY_SAME_DMA;
4796         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4797
4798         pci_restore_state(tp->pdev);
4799
4800         /* Make sure PCI-X relaxed ordering bit is clear. */
4801         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4802         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4803         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4804
4805         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4806                 u32 val;
4807
4808                 /* Chip reset on 5780 will reset MSI enable bit,
4809                  * so need to restore it.
4810                  */
4811                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4812                         u16 ctrl;
4813
4814                         pci_read_config_word(tp->pdev,
4815                                              tp->msi_cap + PCI_MSI_FLAGS,
4816                                              &ctrl);
4817                         pci_write_config_word(tp->pdev,
4818                                               tp->msi_cap + PCI_MSI_FLAGS,
4819                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4820                         val = tr32(MSGINT_MODE);
4821                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4822                 }
4823
4824                 val = tr32(MEMARB_MODE);
4825                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4826
4827         } else
4828                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4829
4830         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4831                 tg3_stop_fw(tp);
4832                 tw32(0x5000, 0x400);
4833         }
4834
4835         tw32(GRC_MODE, tp->grc_mode);
4836
4837         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4838                 u32 val = tr32(0xc4);
4839
4840                 tw32(0xc4, val | (1 << 15));
4841         }
4842
4843         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4845                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4847                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4848                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4849         }
4850
4851         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4852                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4853                 tw32_f(MAC_MODE, tp->mac_mode);
4854         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4855                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4856                 tw32_f(MAC_MODE, tp->mac_mode);
4857         } else
4858                 tw32_f(MAC_MODE, 0);
4859         udelay(40);
4860
4861         err = tg3_poll_fw(tp);
4862         if (err)
4863                 return err;
4864
4865         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4866             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4867                 u32 val = tr32(0x7c00);
4868
4869                 tw32(0x7c00, val | (1 << 25));
4870         }
4871
4872         /* Reprobe ASF enable state.  */
4873         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4874         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4875         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4876         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4877                 u32 nic_cfg;
4878
4879                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4880                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4881                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4882                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4883                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4884                 }
4885         }
4886
4887         return 0;
4888 }
4889
4890 /* tp->lock is held. */
4891 static void tg3_stop_fw(struct tg3 *tp)
4892 {
4893         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4894                 u32 val;
4895                 int i;
4896
4897                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4898                 val = tr32(GRC_RX_CPU_EVENT);
4899                 val |= (1 << 14);
4900                 tw32(GRC_RX_CPU_EVENT, val);
4901
4902                 /* Wait for RX cpu to ACK the event.  */
4903                 for (i = 0; i < 100; i++) {
4904                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4905                                 break;
4906                         udelay(1);
4907                 }
4908         }
4909 }
4910
4911 /* tp->lock is held. */
4912 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4913 {
4914         int err;
4915
4916         tg3_stop_fw(tp);
4917
4918         tg3_write_sig_pre_reset(tp, kind);
4919
4920         tg3_abort_hw(tp, silent);
4921         err = tg3_chip_reset(tp);
4922
4923         tg3_write_sig_legacy(tp, kind);
4924         tg3_write_sig_post_reset(tp, kind);
4925
4926         if (err)
4927                 return err;
4928
4929         return 0;
4930 }
4931
4932 #define TG3_FW_RELEASE_MAJOR    0x0
4933 #define TG3_FW_RELASE_MINOR     0x0
4934 #define TG3_FW_RELEASE_FIX      0x0
4935 #define TG3_FW_START_ADDR       0x08000000
4936 #define TG3_FW_TEXT_ADDR        0x08000000
4937 #define TG3_FW_TEXT_LEN         0x9c0
4938 #define TG3_FW_RODATA_ADDR      0x080009c0
4939 #define TG3_FW_RODATA_LEN       0x60
4940 #define TG3_FW_DATA_ADDR        0x08000a40
4941 #define TG3_FW_DATA_LEN         0x20
4942 #define TG3_FW_SBSS_ADDR        0x08000a60
4943 #define TG3_FW_SBSS_LEN         0xc
4944 #define TG3_FW_BSS_ADDR         0x08000a70
4945 #define TG3_FW_BSS_LEN          0x10
4946
4947 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4948         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4949         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4950         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4951         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4952         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4953         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4954         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4955         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4956         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4957         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4958         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4959         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4960         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4961         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4962         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4963         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4964         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4965         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4966         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4967         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4968         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4969         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4970         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4971         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4973         0, 0, 0, 0, 0, 0,
4974         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4975         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4976         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4977         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4978         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4979         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4980         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4981         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4982         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4983         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4984         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4985         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4986         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4987         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4988         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4989         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4990         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4991         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4992         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4993         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4994         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4995         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4996         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4997         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4998         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4999         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5000         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5001         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5002         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5003         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5004         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5005         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5006         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5007         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5008         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5009         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5010         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5011         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5012         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5013         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5014         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5015         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5016         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5017         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5018         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5019         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5020         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5021         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5022         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5023         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5024         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5025         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5026         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5027         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5028         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5029         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5030         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5031         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5032         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5033         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5034         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5035         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5036         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5037         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5038         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5039 };
5040
5041 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5042         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5043         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5044         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5045         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5046         0x00000000
5047 };
5048
5049 #if 0 /* All zeros, don't eat up space with it. */
5050 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5051         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5052         0x00000000, 0x00000000, 0x00000000, 0x00000000
5053 };
5054 #endif
5055
5056 #define RX_CPU_SCRATCH_BASE     0x30000
5057 #define RX_CPU_SCRATCH_SIZE     0x04000
5058 #define TX_CPU_SCRATCH_BASE     0x34000
5059 #define TX_CPU_SCRATCH_SIZE     0x04000
5060
5061 /* tp->lock is held. */
5062 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5063 {
5064         int i;
5065
5066         BUG_ON(offset == TX_CPU_BASE &&
5067             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5068
5069         if (offset == RX_CPU_BASE) {
5070                 for (i = 0; i < 10000; i++) {
5071                         tw32(offset + CPU_STATE, 0xffffffff);
5072                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5073                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5074                                 break;
5075                 }
5076
5077                 tw32(offset + CPU_STATE, 0xffffffff);
5078                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5079                 udelay(10);
5080         } else {
5081                 for (i = 0; i < 10000; i++) {
5082                         tw32(offset + CPU_STATE, 0xffffffff);
5083                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5084                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5085                                 break;
5086                 }
5087         }
5088
5089         if (i >= 10000) {
5090                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5091                        "and %s CPU\n",
5092                        tp->dev->name,
5093                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5094                 return -ENODEV;
5095         }
5096
5097         /* Clear firmware's nvram arbitration. */
5098         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5099                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5100         return 0;
5101 }
5102
5103 struct fw_info {
5104         unsigned int text_base;
5105         unsigned int text_len;
5106         const u32 *text_data;
5107         unsigned int rodata_base;
5108         unsigned int rodata_len;
5109         const u32 *rodata_data;
5110         unsigned int data_base;
5111         unsigned int data_len;
5112         const u32 *data_data;
5113 };
5114
5115 /* tp->lock is held. */
5116 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5117                                  int cpu_scratch_size, struct fw_info *info)
5118 {
5119         int err, lock_err, i;
5120         void (*write_op)(struct tg3 *, u32, u32);
5121
5122         if (cpu_base == TX_CPU_BASE &&
5123             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5124                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5125                        "TX cpu firmware on %s which is 5705.\n",
5126                        tp->dev->name);
5127                 return -EINVAL;
5128         }
5129
5130         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5131                 write_op = tg3_write_mem;
5132         else
5133                 write_op = tg3_write_indirect_reg32;
5134
5135         /* It is possible that bootcode is still loading at this point.
5136          * Get the nvram lock first before halting the cpu.
5137          */
5138         lock_err = tg3_nvram_lock(tp);
5139         err = tg3_halt_cpu(tp, cpu_base);
5140         if (!lock_err)
5141                 tg3_nvram_unlock(tp);
5142         if (err)
5143                 goto out;
5144
5145         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5146                 write_op(tp, cpu_scratch_base + i, 0);
5147         tw32(cpu_base + CPU_STATE, 0xffffffff);
5148         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5149         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5150                 write_op(tp, (cpu_scratch_base +
5151                               (info->text_base & 0xffff) +
5152                               (i * sizeof(u32))),
5153                          (info->text_data ?
5154                           info->text_data[i] : 0));
5155         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5156                 write_op(tp, (cpu_scratch_base +
5157                               (info->rodata_base & 0xffff) +
5158                               (i * sizeof(u32))),
5159                          (info->rodata_data ?
5160                           info->rodata_data[i] : 0));
5161         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5162                 write_op(tp, (cpu_scratch_base +
5163                               (info->data_base & 0xffff) +
5164                               (i * sizeof(u32))),
5165                          (info->data_data ?
5166                           info->data_data[i] : 0));
5167
5168         err = 0;
5169
5170 out:
5171         return err;
5172 }
5173
5174 /* tp->lock is held. */
5175 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5176 {
5177         struct fw_info info;
5178         int err, i;
5179
5180         info.text_base = TG3_FW_TEXT_ADDR;
5181         info.text_len = TG3_FW_TEXT_LEN;
5182         info.text_data = &tg3FwText[0];
5183         info.rodata_base = TG3_FW_RODATA_ADDR;
5184         info.rodata_len = TG3_FW_RODATA_LEN;
5185         info.rodata_data = &tg3FwRodata[0];
5186         info.data_base = TG3_FW_DATA_ADDR;
5187         info.data_len = TG3_FW_DATA_LEN;
5188         info.data_data = NULL;
5189
5190         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5191                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5192                                     &info);
5193         if (err)
5194                 return err;
5195
5196         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5197                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5198                                     &info);
5199         if (err)
5200                 return err;
5201
5202         /* Now startup only the RX cpu. */
5203         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5204         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5205
5206         for (i = 0; i < 5; i++) {
5207                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5208                         break;
5209                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5210                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5211                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5212                 udelay(1000);
5213         }
5214         if (i >= 5) {
5215                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5216                        "to set RX CPU PC, is %08x should be %08x\n",
5217                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5218                        TG3_FW_TEXT_ADDR);
5219                 return -ENODEV;
5220         }
5221         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5222         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5223
5224         return 0;
5225 }
5226
5227 #if TG3_TSO_SUPPORT != 0
5228
5229 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5230 #define TG3_TSO_FW_RELASE_MINOR         0x6
5231 #define TG3_TSO_FW_RELEASE_FIX          0x0
5232 #define TG3_TSO_FW_START_ADDR           0x08000000
5233 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5234 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5235 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5236 #define TG3_TSO_FW_RODATA_LEN           0x60
5237 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5238 #define TG3_TSO_FW_DATA_LEN             0x30
5239 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5240 #define TG3_TSO_FW_SBSS_LEN             0x2c
5241 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5242 #define TG3_TSO_FW_BSS_LEN              0x894
5243
5244 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5245         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5246         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5247         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5248         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5249         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5250         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5251         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5252         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5253         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5254         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5255         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5256         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5257         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5258         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5259         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5260         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5261         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5262         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5263         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5264         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5265         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5266         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5267         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5268         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5269         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5270         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5271         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5272         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5273         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5274         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5275         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5276         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5277         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5278         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5279         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5280         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5281         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5282         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5283         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5284         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5285         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5286         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5287         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5288         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5289         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5290         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5291         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5292         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5293         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5294         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5295         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5296         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5297         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5298         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5299         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5300         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5301         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5302         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5303         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5304         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5305         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5306         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5307         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5308         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5309         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5310         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5311         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5312         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5313         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5314         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5315         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5316         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5317         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5318         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5319         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5320         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5321         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5322         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5323         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5324         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5325         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5326         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5327         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5328         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5329         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5330         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5331         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5332         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5333         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5334         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5335         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5336         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5337         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5338         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5339         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5340         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5341         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5342         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5343         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5344         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5345         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5346         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5347         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5348         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5349         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5350         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5351         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5352         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5353         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5354         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5355         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5356         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5357         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5358         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5359         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5360         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5361         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5362         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5363         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5364         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5365         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5366         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5367         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5368         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5369         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5370         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5371         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5372         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5373         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5374         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5375         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5376         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5377         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5378         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5379         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5380         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5381         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5382         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5383         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5384         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5385         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5386         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5387         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5388         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5389         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5390         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5391         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5392         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5393         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5394         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5395         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5396         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5397         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5398         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5399         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5400         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5401         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5402         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5403         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5404         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5405         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5406         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5407         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5408         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5409         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5410         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5411         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5412         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5413         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5414         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5415         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5416         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5417         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5418         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5419         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5420         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5421         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5422         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5423         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5424         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5425         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5426         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5427         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5428         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5429         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5430         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5431         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5432         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5433         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5434         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5435         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5436         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5437         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5438         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5439         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5440         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5441         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5442         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5443         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5444         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5445         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5446         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5447         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5448         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5449         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5450         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5451         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5452         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5453         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5454         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5455         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5456         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5457         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5458         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5459         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5460         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5461         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5462         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5463         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5464         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5465         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5466         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5467         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5468         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5469         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5470         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5471         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5472         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5473         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5474         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5475         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5476         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5477         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5478         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5479         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5480         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5481         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5482         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5483         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5484         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5485         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5486         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5487         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5488         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5489         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5490         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5491         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5492         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5493         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5494         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5495         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5496         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5497         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5498         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5499         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5500         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5501         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5502         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5503         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5504         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5505         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5506         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5507         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5508         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5509         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5510         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5511         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5512         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5513         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5514         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5515         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5516         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5517         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5518         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5519         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5520         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5521         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5522         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5523         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5524         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5525         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5526         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5527         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5528         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5529 };
5530
5531 static const u32 tg3TsoFwRodata[] = {
5532         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5533         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5534         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5535         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5536         0x00000000,
5537 };
5538
5539 static const u32 tg3TsoFwData[] = {
5540         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5541         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5542         0x00000000,
5543 };
5544
5545 /* 5705 needs a special version of the TSO firmware.  */
5546 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5547 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5548 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5549 #define TG3_TSO5_FW_START_ADDR          0x00010000
5550 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5551 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5552 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5553 #define TG3_TSO5_FW_RODATA_LEN          0x50
5554 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5555 #define TG3_TSO5_FW_DATA_LEN            0x20
5556 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5557 #define TG3_TSO5_FW_SBSS_LEN            0x28
5558 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5559 #define TG3_TSO5_FW_BSS_LEN             0x88
5560
5561 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5562         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5563         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5564         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5565         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5566         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5567         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5568         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5569         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5570         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5571         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5572         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5573         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5574         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5575         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5576         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5577         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5578         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5579         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5580         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5581         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5582         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5583         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5584         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5585         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5586         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5587         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5588         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5589         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5590         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5591         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5592         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5593         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5594         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5595         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5596         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5597         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5598         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5599         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5600         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5601         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5602         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5603         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5604         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5605         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5606         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5607         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5608         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5609         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5610         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5611         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5612         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5613         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5614         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5615         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5616         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5617         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5618         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5619         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5620         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5621         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5622         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5623         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5624         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5625         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5626         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5627         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5628         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5629         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5630         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5631         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5632         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5633         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5634         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5635         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5636         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5637         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5638         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5639         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5640         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5641         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5642         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5643         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5644         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5645         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5646         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5647         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5648         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5649         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5650         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5651         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5652         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5653         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5654         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5655         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5656         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5657         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5658         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5659         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5660         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5661         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5662         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5663         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5664         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5665         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5666         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5667         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5668         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5669         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5670         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5671         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5672         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5673         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5674         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5675         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5676         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5677         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5678         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5679         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5680         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5681         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5682         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5683         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5684         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5685         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5686         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5687         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5688         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5689         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5690         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5691         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5692         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5693         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5694         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5695         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5696         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5697         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5698         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5699         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5700         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5701         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5702         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5703         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5704         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5705         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5706         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5707         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5708         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5709         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5710         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5711         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5712         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5713         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5714         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5715         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5716         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5717         0x00000000, 0x00000000, 0x00000000,
5718 };
5719
5720 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5721         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5722         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5723         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5724         0x00000000, 0x00000000, 0x00000000,
5725 };
5726
5727 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5728         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5729         0x00000000, 0x00000000, 0x00000000,
5730 };
5731
5732 /* tp->lock is held. */
5733 static int tg3_load_tso_firmware(struct tg3 *tp)
5734 {
5735         struct fw_info info;
5736         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5737         int err, i;
5738
5739         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5740                 return 0;
5741
5742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5743                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5744                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5745                 info.text_data = &tg3Tso5FwText[0];
5746                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5747                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5748                 info.rodata_data = &tg3Tso5FwRodata[0];
5749                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5750                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5751                 info.data_data = &tg3Tso5FwData[0];
5752                 cpu_base = RX_CPU_BASE;
5753                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5754                 cpu_scratch_size = (info.text_len +
5755                                     info.rodata_len +
5756                                     info.data_len +
5757                                     TG3_TSO5_FW_SBSS_LEN +
5758                                     TG3_TSO5_FW_BSS_LEN);
5759         } else {
5760                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5761                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5762                 info.text_data = &tg3TsoFwText[0];
5763                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5764                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5765                 info.rodata_data = &tg3TsoFwRodata[0];
5766                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5767                 info.data_len = TG3_TSO_FW_DATA_LEN;
5768                 info.data_data = &tg3TsoFwData[0];
5769                 cpu_base = TX_CPU_BASE;
5770                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5771                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5772         }
5773
5774         err = tg3_load_firmware_cpu(tp, cpu_base,
5775                                     cpu_scratch_base, cpu_scratch_size,
5776                                     &info);
5777         if (err)
5778                 return err;
5779
5780         /* Now startup the cpu. */
5781         tw32(cpu_base + CPU_STATE, 0xffffffff);
5782         tw32_f(cpu_base + CPU_PC,    info.text_base);
5783
5784         for (i = 0; i < 5; i++) {
5785                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5786                         break;
5787                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5788                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5789                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5790                 udelay(1000);
5791         }
5792         if (i >= 5) {
5793                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5794                        "to set CPU PC, is %08x should be %08x\n",
5795                        tp->dev->name, tr32(cpu_base + CPU_PC),
5796                        info.text_base);
5797                 return -ENODEV;
5798         }
5799         tw32(cpu_base + CPU_STATE, 0xffffffff);
5800         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5801         return 0;
5802 }
5803
5804 #endif /* TG3_TSO_SUPPORT != 0 */
5805
5806 /* tp->lock is held. */
5807 static void __tg3_set_mac_addr(struct tg3 *tp)
5808 {
5809         u32 addr_high, addr_low;
5810         int i;
5811
5812         addr_high = ((tp->dev->dev_addr[0] << 8) |
5813                      tp->dev->dev_addr[1]);
5814         addr_low = ((tp->dev->dev_addr[2] << 24) |
5815                     (tp->dev->dev_addr[3] << 16) |
5816                     (tp->dev->dev_addr[4] <<  8) |
5817                     (tp->dev->dev_addr[5] <<  0));
5818         for (i = 0; i < 4; i++) {
5819                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5820                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5821         }
5822
5823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5825                 for (i = 0; i < 12; i++) {
5826                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5827                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5828                 }
5829         }
5830
5831         addr_high = (tp->dev->dev_addr[0] +
5832                      tp->dev->dev_addr[1] +
5833                      tp->dev->dev_addr[2] +
5834                      tp->dev->dev_addr[3] +
5835                      tp->dev->dev_addr[4] +
5836                      tp->dev->dev_addr[5]) &
5837                 TX_BACKOFF_SEED_MASK;
5838         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5839 }
5840
5841 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5842 {
5843         struct tg3 *tp = netdev_priv(dev);
5844         struct sockaddr *addr = p;
5845         int err = 0;
5846
5847         if (!is_valid_ether_addr(addr->sa_data))
5848                 return -EINVAL;
5849
5850         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5851
5852         if (!netif_running(dev))
5853                 return 0;
5854
5855         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5856                 /* Reset chip so that ASF can re-init any MAC addresses it
5857                  * needs.
5858                  */
5859                 tg3_netif_stop(tp);
5860                 tg3_full_lock(tp, 1);
5861
5862                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5863                 err = tg3_restart_hw(tp, 0);
5864                 if (!err)
5865                         tg3_netif_start(tp);
5866                 tg3_full_unlock(tp);
5867         } else {
5868                 spin_lock_bh(&tp->lock);
5869                 __tg3_set_mac_addr(tp);
5870                 spin_unlock_bh(&tp->lock);
5871         }
5872
5873         return err;
5874 }
5875
5876 /* tp->lock is held. */
5877 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5878                            dma_addr_t mapping, u32 maxlen_flags,
5879                            u32 nic_addr)
5880 {
5881         tg3_write_mem(tp,
5882                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5883                       ((u64) mapping >> 32));
5884         tg3_write_mem(tp,
5885                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5886                       ((u64) mapping & 0xffffffff));
5887         tg3_write_mem(tp,
5888                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5889                        maxlen_flags);
5890
5891         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5892                 tg3_write_mem(tp,
5893                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5894                               nic_addr);
5895 }
5896
5897 static void __tg3_set_rx_mode(struct net_device *);
5898 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5899 {
5900         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5901         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5902         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5903         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5904         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5905                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5906                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5907         }
5908         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5909         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5910         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5911                 u32 val = ec->stats_block_coalesce_usecs;
5912
5913                 if (!netif_carrier_ok(tp->dev))
5914                         val = 0;
5915
5916                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5917         }
5918 }
5919
5920 /* tp->lock is held. */
5921 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5922 {
5923         u32 val, rdmac_mode;
5924         int i, err, limit;
5925
5926         tg3_disable_ints(tp);
5927
5928         tg3_stop_fw(tp);
5929
5930         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5931
5932         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5933                 tg3_abort_hw(tp, 1);
5934         }
5935
5936         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5937                 tg3_phy_reset(tp);
5938
5939         err = tg3_chip_reset(tp);
5940         if (err)
5941                 return err;
5942
5943         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5944
5945         /* This works around an issue with Athlon chipsets on
5946          * B3 tigon3 silicon.  This bit has no effect on any
5947          * other revision.  But do not set this on PCI Express
5948          * chips.
5949          */
5950         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5951                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5952         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5953
5954         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5955             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5956                 val = tr32(TG3PCI_PCISTATE);
5957                 val |= PCISTATE_RETRY_SAME_DMA;
5958                 tw32(TG3PCI_PCISTATE, val);
5959         }
5960
5961         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5962                 /* Enable some hw fixes.  */
5963                 val = tr32(TG3PCI_MSI_DATA);
5964                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5965                 tw32(TG3PCI_MSI_DATA, val);
5966         }
5967
5968         /* Descriptor ring init may make accesses to the
5969          * NIC SRAM area to setup the TX descriptors, so we
5970          * can only do this after the hardware has been
5971          * successfully reset.
5972          */
5973         err = tg3_init_rings(tp);
5974         if (err)
5975                 return err;
5976
5977         /* This value is determined during the probe time DMA
5978          * engine test, tg3_test_dma.
5979          */
5980         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5981
5982         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5983                           GRC_MODE_4X_NIC_SEND_RINGS |
5984                           GRC_MODE_NO_TX_PHDR_CSUM |
5985                           GRC_MODE_NO_RX_PHDR_CSUM);
5986         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5987
5988         /* Pseudo-header checksum is done by hardware logic and not
5989          * the offload processers, so make the chip do the pseudo-
5990          * header checksums on receive.  For transmit it is more
5991          * convenient to do the pseudo-header checksum in software
5992          * as Linux does that on transmit for us in all cases.
5993          */
5994         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5995
5996         tw32(GRC_MODE,
5997              tp->grc_mode |
5998              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5999
6000         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6001         val = tr32(GRC_MISC_CFG);
6002         val &= ~0xff;
6003         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6004         tw32(GRC_MISC_CFG, val);
6005
6006         /* Initialize MBUF/DESC pool. */
6007         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6008                 /* Do nothing.  */
6009         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6010                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6011                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6012                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6013                 else
6014                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6015                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6016                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6017         }
6018 #if TG3_TSO_SUPPORT != 0
6019         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6020                 int fw_len;
6021
6022                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6023                           TG3_TSO5_FW_RODATA_LEN +
6024                           TG3_TSO5_FW_DATA_LEN +
6025                           TG3_TSO5_FW_SBSS_LEN +
6026                           TG3_TSO5_FW_BSS_LEN);
6027                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6028                 tw32(BUFMGR_MB_POOL_ADDR,
6029                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6030                 tw32(BUFMGR_MB_POOL_SIZE,
6031                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6032         }
6033 #endif
6034
6035         if (tp->dev->mtu <= ETH_DATA_LEN) {
6036                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6037                      tp->bufmgr_config.mbuf_read_dma_low_water);
6038                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6039                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6040                 tw32(BUFMGR_MB_HIGH_WATER,
6041                      tp->bufmgr_config.mbuf_high_water);
6042         } else {
6043                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6044                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6045                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6046                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6047                 tw32(BUFMGR_MB_HIGH_WATER,
6048                      tp->bufmgr_config.mbuf_high_water_jumbo);
6049         }
6050         tw32(BUFMGR_DMA_LOW_WATER,
6051              tp->bufmgr_config.dma_low_water);
6052         tw32(BUFMGR_DMA_HIGH_WATER,
6053              tp->bufmgr_config.dma_high_water);
6054
6055         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6056         for (i = 0; i < 2000; i++) {
6057                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6058                         break;
6059                 udelay(10);
6060         }
6061         if (i >= 2000) {
6062                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6063                        tp->dev->name);
6064                 return -ENODEV;
6065         }
6066
6067         /* Setup replenish threshold. */
6068         val = tp->rx_pending / 8;
6069         if (val == 0)
6070                 val = 1;
6071         else if (val > tp->rx_std_max_post)
6072                 val = tp->rx_std_max_post;
6073
6074         tw32(RCVBDI_STD_THRESH, val);
6075
6076         /* Initialize TG3_BDINFO's at:
6077          *  RCVDBDI_STD_BD:     standard eth size rx ring
6078          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6079          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6080          *
6081          * like so:
6082          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6083          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6084          *                              ring attribute flags
6085          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6086          *
6087          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6088          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6089          *
6090          * The size of each ring is fixed in the firmware, but the location is
6091          * configurable.
6092          */
6093         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6094              ((u64) tp->rx_std_mapping >> 32));
6095         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6096              ((u64) tp->rx_std_mapping & 0xffffffff));
6097         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6098              NIC_SRAM_RX_BUFFER_DESC);
6099
6100         /* Don't even try to program the JUMBO/MINI buffer descriptor
6101          * configs on 5705.
6102          */
6103         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6104                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6105                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6106         } else {
6107                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6108                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6109
6110                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6111                      BDINFO_FLAGS_DISABLED);
6112
6113                 /* Setup replenish threshold. */
6114                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6115
6116                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6117                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6118                              ((u64) tp->rx_jumbo_mapping >> 32));
6119                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6120                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6121                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6122                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6123                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6124                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6125                 } else {
6126                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6127                              BDINFO_FLAGS_DISABLED);
6128                 }
6129
6130         }
6131
6132         /* There is only one send ring on 5705/5750, no need to explicitly
6133          * disable the others.
6134          */
6135         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6136                 /* Clear out send RCB ring in SRAM. */
6137                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6138                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6139                                       BDINFO_FLAGS_DISABLED);
6140         }
6141
6142         tp->tx_prod = 0;
6143         tp->tx_cons = 0;
6144         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6145         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6146
6147         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6148                        tp->tx_desc_mapping,
6149                        (TG3_TX_RING_SIZE <<
6150                         BDINFO_FLAGS_MAXLEN_SHIFT),
6151                        NIC_SRAM_TX_BUFFER_DESC);
6152
6153         /* There is only one receive return ring on 5705/5750, no need
6154          * to explicitly disable the others.
6155          */
6156         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6157                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6158                      i += TG3_BDINFO_SIZE) {
6159                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6160                                       BDINFO_FLAGS_DISABLED);
6161                 }
6162         }
6163
6164         tp->rx_rcb_ptr = 0;
6165         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6166
6167         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6168                        tp->rx_rcb_mapping,
6169                        (TG3_RX_RCB_RING_SIZE(tp) <<
6170                         BDINFO_FLAGS_MAXLEN_SHIFT),
6171                        0);
6172
6173         tp->rx_std_ptr = tp->rx_pending;
6174         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6175                      tp->rx_std_ptr);
6176
6177         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6178                                                 tp->rx_jumbo_pending : 0;
6179         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6180                      tp->rx_jumbo_ptr);
6181
6182         /* Initialize MAC address and backoff seed. */
6183         __tg3_set_mac_addr(tp);
6184
6185         /* MTU + ethernet header + FCS + optional VLAN tag */
6186         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6187
6188         /* The slot time is changed by tg3_setup_phy if we
6189          * run at gigabit with half duplex.
6190          */
6191         tw32(MAC_TX_LENGTHS,
6192              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6193              (6 << TX_LENGTHS_IPG_SHIFT) |
6194              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6195
6196         /* Receive rules. */
6197         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6198         tw32(RCVLPC_CONFIG, 0x0181);
6199
6200         /* Calculate RDMAC_MODE setting early, we need it to determine
6201          * the RCVLPC_STATE_ENABLE mask.
6202          */
6203         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6204                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6205                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6206                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6207                       RDMAC_MODE_LNGREAD_ENAB);
6208         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6209                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6210
6211         /* If statement applies to 5705 and 5750 PCI devices only */
6212         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6213              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6214             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6215                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6216                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6217                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6218                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6219                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6220                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6221                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6222                 }
6223         }
6224
6225         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6226                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6227
6228 #if TG3_TSO_SUPPORT != 0
6229         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6230                 rdmac_mode |= (1 << 27);
6231 #endif
6232
6233         /* Receive/send statistics. */
6234         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6235                 val = tr32(RCVLPC_STATS_ENABLE);
6236                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6237                 tw32(RCVLPC_STATS_ENABLE, val);
6238         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6239                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6240                 val = tr32(RCVLPC_STATS_ENABLE);
6241                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6242                 tw32(RCVLPC_STATS_ENABLE, val);
6243         } else {
6244                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6245         }
6246         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6247         tw32(SNDDATAI_STATSENAB, 0xffffff);
6248         tw32(SNDDATAI_STATSCTRL,
6249              (SNDDATAI_SCTRL_ENABLE |
6250               SNDDATAI_SCTRL_FASTUPD));
6251
6252         /* Setup host coalescing engine. */
6253         tw32(HOSTCC_MODE, 0);
6254         for (i = 0; i < 2000; i++) {
6255                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6256                         break;
6257                 udelay(10);
6258         }
6259
6260         __tg3_set_coalesce(tp, &tp->coal);
6261
6262         /* set status block DMA address */
6263         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6264              ((u64) tp->status_mapping >> 32));
6265         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6266              ((u64) tp->status_mapping & 0xffffffff));
6267
6268         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6269                 /* Status/statistics block address.  See tg3_timer,
6270                  * the tg3_periodic_fetch_stats call there, and
6271                  * tg3_get_stats to see how this works for 5705/5750 chips.
6272                  */
6273                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6274                      ((u64) tp->stats_mapping >> 32));
6275                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6276                      ((u64) tp->stats_mapping & 0xffffffff));
6277                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6278                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6279         }
6280
6281         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6282
6283         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6284         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6285         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6286                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6287
6288         /* Clear statistics/status block in chip, and status block in ram. */
6289         for (i = NIC_SRAM_STATS_BLK;
6290              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6291              i += sizeof(u32)) {
6292                 tg3_write_mem(tp, i, 0);
6293                 udelay(40);
6294         }
6295         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6296
6297         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6298                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6299                 /* reset to prevent losing 1st rx packet intermittently */
6300                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6301                 udelay(10);
6302         }
6303
6304         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6305                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6306         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6307         udelay(40);
6308
6309         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6310          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6311          * register to preserve the GPIO settings for LOMs. The GPIOs,
6312          * whether used as inputs or outputs, are set by boot code after
6313          * reset.
6314          */
6315         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6316                 u32 gpio_mask;
6317
6318                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6319                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6320
6321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6322                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6323                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6324
6325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6326                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6327
6328                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6329
6330                 /* GPIO1 must be driven high for eeprom write protect */
6331                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6332                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6333         }
6334         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6335         udelay(100);
6336
6337         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6338         tp->last_tag = 0;
6339
6340         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6341                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6342                 udelay(40);
6343         }
6344
6345         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6346                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6347                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6348                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6349                WDMAC_MODE_LNGREAD_ENAB);
6350
6351         /* If statement applies to 5705 and 5750 PCI devices only */
6352         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6353              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6354             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6355                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6356                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6357                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6358                         /* nothing */
6359                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6360                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6361                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6362                         val |= WDMAC_MODE_RX_ACCEL;
6363                 }
6364         }
6365
6366         /* Enable host coalescing bug fix */
6367         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6368             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6369                 val |= (1 << 29);
6370
6371         tw32_f(WDMAC_MODE, val);
6372         udelay(40);
6373
6374         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6375                 val = tr32(TG3PCI_X_CAPS);
6376                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6377                         val &= ~PCIX_CAPS_BURST_MASK;
6378                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6379                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6380                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6381                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6382                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6383                                 val |= (tp->split_mode_max_reqs <<
6384                                         PCIX_CAPS_SPLIT_SHIFT);
6385                 }
6386                 tw32(TG3PCI_X_CAPS, val);
6387         }
6388
6389         tw32_f(RDMAC_MODE, rdmac_mode);
6390         udelay(40);
6391
6392         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6393         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6394                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6395         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6396         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6397         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6398         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6399         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6400 #if TG3_TSO_SUPPORT != 0
6401         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6402                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6403 #endif
6404         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6405         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6406
6407         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6408                 err = tg3_load_5701_a0_firmware_fix(tp);
6409                 if (err)
6410                         return err;
6411         }
6412
6413 #if TG3_TSO_SUPPORT != 0
6414         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6415                 err = tg3_load_tso_firmware(tp);
6416                 if (err)
6417                         return err;
6418         }
6419 #endif
6420
6421         tp->tx_mode = TX_MODE_ENABLE;
6422         tw32_f(MAC_TX_MODE, tp->tx_mode);
6423         udelay(100);
6424
6425         tp->rx_mode = RX_MODE_ENABLE;
6426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6427                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6428
6429         tw32_f(MAC_RX_MODE, tp->rx_mode);
6430         udelay(10);
6431
6432         if (tp->link_config.phy_is_low_power) {
6433                 tp->link_config.phy_is_low_power = 0;
6434                 tp->link_config.speed = tp->link_config.orig_speed;
6435                 tp->link_config.duplex = tp->link_config.orig_duplex;
6436                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6437         }
6438
6439         tp->mi_mode = MAC_MI_MODE_BASE;
6440         tw32_f(MAC_MI_MODE, tp->mi_mode);
6441         udelay(80);
6442
6443         tw32(MAC_LED_CTRL, tp->led_ctrl);
6444
6445         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6446         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6447                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6448                 udelay(10);
6449         }
6450         tw32_f(MAC_RX_MODE, tp->rx_mode);
6451         udelay(10);
6452
6453         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6454                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6455                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6456                         /* Set drive transmission level to 1.2V  */
6457                         /* only if the signal pre-emphasis bit is not set  */
6458                         val = tr32(MAC_SERDES_CFG);
6459                         val &= 0xfffff000;
6460                         val |= 0x880;
6461                         tw32(MAC_SERDES_CFG, val);
6462                 }
6463                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6464                         tw32(MAC_SERDES_CFG, 0x616000);
6465         }
6466
6467         /* Prevent chip from dropping frames when flow control
6468          * is enabled.
6469          */
6470         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6471
6472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6473             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6474                 /* Use hardware link auto-negotiation */
6475                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6476         }
6477
6478         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6479             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6480                 u32 tmp;
6481
6482                 tmp = tr32(SERDES_RX_CTRL);
6483                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6484                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6485                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6486                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6487         }
6488
6489         err = tg3_setup_phy(tp, reset_phy);
6490         if (err)
6491                 return err;
6492
6493         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6494                 u32 tmp;
6495
6496                 /* Clear CRC stats. */
6497                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6498                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6499                         tg3_readphy(tp, 0x14, &tmp);
6500                 }
6501         }
6502
6503         __tg3_set_rx_mode(tp->dev);
6504
6505         /* Initialize receive rules. */
6506         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6507         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6508         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6509         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6510
6511         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6512             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6513                 limit = 8;
6514         else
6515                 limit = 16;
6516         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6517                 limit -= 4;
6518         switch (limit) {
6519         case 16:
6520                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6521         case 15:
6522                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6523         case 14:
6524                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6525         case 13:
6526                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6527         case 12:
6528                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6529         case 11:
6530                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6531         case 10:
6532                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6533         case 9:
6534                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6535         case 8:
6536                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6537         case 7:
6538                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6539         case 6:
6540                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6541         case 5:
6542                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6543         case 4:
6544                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6545         case 3:
6546                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6547         case 2:
6548         case 1:
6549
6550         default:
6551                 break;
6552         };
6553
6554         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6555
6556         return 0;
6557 }
6558
6559 /* Called at device open time to get the chip ready for
6560  * packet processing.  Invoked with tp->lock held.
6561  */
6562 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6563 {
6564         int err;
6565
6566         /* Force the chip into D0. */
6567         err = tg3_set_power_state(tp, PCI_D0);
6568         if (err)
6569                 goto out;
6570
6571         tg3_switch_clocks(tp);
6572
6573         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6574
6575         err = tg3_reset_hw(tp, reset_phy);
6576
6577 out:
6578         return err;
6579 }
6580
6581 #define TG3_STAT_ADD32(PSTAT, REG) \
6582 do {    u32 __val = tr32(REG); \
6583         (PSTAT)->low += __val; \
6584         if ((PSTAT)->low < __val) \
6585                 (PSTAT)->high += 1; \
6586 } while (0)
6587
6588 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6589 {
6590         struct tg3_hw_stats *sp = tp->hw_stats;
6591
6592         if (!netif_carrier_ok(tp->dev))
6593                 return;
6594
6595         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6596         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6597         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6598         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6599         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6600         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6601         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6602         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6603         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6604         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6605         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6606         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6607         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6608
6609         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6610         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6611         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6612         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6613         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6614         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6615         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6616         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6617         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6618         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6619         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6620         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6621         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6622         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6623
6624         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6625         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6626         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6627 }
6628
6629 static void tg3_timer(unsigned long __opaque)
6630 {
6631         struct tg3 *tp = (struct tg3 *) __opaque;
6632
6633         if (tp->irq_sync)
6634                 goto restart_timer;
6635
6636         spin_lock(&tp->lock);
6637
6638         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6639                 /* All of this garbage is because when using non-tagged
6640                  * IRQ status the mailbox/status_block protocol the chip
6641                  * uses with the cpu is race prone.
6642                  */
6643                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6644                         tw32(GRC_LOCAL_CTRL,
6645                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6646                 } else {
6647                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6648                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6649                 }
6650
6651                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6652                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6653                         spin_unlock(&tp->lock);
6654                         schedule_work(&tp->reset_task);
6655                         return;
6656                 }
6657         }
6658
6659         /* This part only runs once per second. */
6660         if (!--tp->timer_counter) {
6661                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6662                         tg3_periodic_fetch_stats(tp);
6663
6664                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6665                         u32 mac_stat;
6666                         int phy_event;
6667
6668                         mac_stat = tr32(MAC_STATUS);
6669
6670                         phy_event = 0;
6671                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6672                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6673                                         phy_event = 1;
6674                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6675                                 phy_event = 1;
6676
6677                         if (phy_event)
6678                                 tg3_setup_phy(tp, 0);
6679                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6680                         u32 mac_stat = tr32(MAC_STATUS);
6681                         int need_setup = 0;
6682
6683                         if (netif_carrier_ok(tp->dev) &&
6684                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6685                                 need_setup = 1;
6686                         }
6687                         if (! netif_carrier_ok(tp->dev) &&
6688                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6689                                          MAC_STATUS_SIGNAL_DET))) {
6690                                 need_setup = 1;
6691                         }
6692                         if (need_setup) {
6693                                 if (!tp->serdes_counter) {
6694                                         tw32_f(MAC_MODE,
6695                                              (tp->mac_mode &
6696                                               ~MAC_MODE_PORT_MODE_MASK));
6697                                         udelay(40);
6698                                         tw32_f(MAC_MODE, tp->mac_mode);
6699                                         udelay(40);
6700                                 }
6701                                 tg3_setup_phy(tp, 0);
6702                         }
6703                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6704                         tg3_serdes_parallel_detect(tp);
6705
6706                 tp->timer_counter = tp->timer_multiplier;
6707         }
6708
6709         /* Heartbeat is only sent once every 2 seconds.
6710          *
6711          * The heartbeat is to tell the ASF firmware that the host
6712          * driver is still alive.  In the event that the OS crashes,
6713          * ASF needs to reset the hardware to free up the FIFO space
6714          * that may be filled with rx packets destined for the host.
6715          * If the FIFO is full, ASF will no longer function properly.
6716          *
6717          * Unintended resets have been reported on real time kernels
6718          * where the timer doesn't run on time.  Netpoll will also have
6719          * same problem.
6720          *
6721          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6722          * to check the ring condition when the heartbeat is expiring
6723          * before doing the reset.  This will prevent most unintended
6724          * resets.
6725          */
6726         if (!--tp->asf_counter) {
6727                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6728                         u32 val;
6729
6730                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6731                                       FWCMD_NICDRV_ALIVE3);
6732                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6733                         /* 5 seconds timeout */
6734                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6735                         val = tr32(GRC_RX_CPU_EVENT);
6736                         val |= (1 << 14);
6737                         tw32(GRC_RX_CPU_EVENT, val);
6738                 }
6739                 tp->asf_counter = tp->asf_multiplier;
6740         }
6741
6742         spin_unlock(&tp->lock);
6743
6744 restart_timer:
6745         tp->timer.expires = jiffies + tp->timer_offset;
6746         add_timer(&tp->timer);
6747 }
6748
6749 static int tg3_request_irq(struct tg3 *tp)
6750 {
6751         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6752         unsigned long flags;
6753         struct net_device *dev = tp->dev;
6754
6755         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6756                 fn = tg3_msi;
6757                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6758                         fn = tg3_msi_1shot;
6759                 flags = IRQF_SAMPLE_RANDOM;
6760         } else {
6761                 fn = tg3_interrupt;
6762                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6763                         fn = tg3_interrupt_tagged;
6764                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6765         }
6766         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6767 }
6768
6769 static int tg3_test_interrupt(struct tg3 *tp)
6770 {
6771         struct net_device *dev = tp->dev;
6772         int err, i;
6773         u32 int_mbox = 0;
6774
6775         if (!netif_running(dev))
6776                 return -ENODEV;
6777
6778         tg3_disable_ints(tp);
6779
6780         free_irq(tp->pdev->irq, dev);
6781
6782         err = request_irq(tp->pdev->irq, tg3_test_isr,
6783                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6784         if (err)
6785                 return err;
6786
6787         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6788         tg3_enable_ints(tp);
6789
6790         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6791                HOSTCC_MODE_NOW);
6792
6793         for (i = 0; i < 5; i++) {
6794                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6795                                         TG3_64BIT_REG_LOW);
6796                 if (int_mbox != 0)
6797                         break;
6798                 msleep(10);
6799         }
6800
6801         tg3_disable_ints(tp);
6802
6803         free_irq(tp->pdev->irq, dev);
6804
6805         err = tg3_request_irq(tp);
6806
6807         if (err)
6808                 return err;
6809
6810         if (int_mbox != 0)
6811                 return 0;
6812
6813         return -EIO;
6814 }
6815
6816 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6817  * successfully restored
6818  */
6819 static int tg3_test_msi(struct tg3 *tp)
6820 {
6821         struct net_device *dev = tp->dev;
6822         int err;
6823         u16 pci_cmd;
6824
6825         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6826                 return 0;
6827
6828         /* Turn off SERR reporting in case MSI terminates with Master
6829          * Abort.
6830          */
6831         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6832         pci_write_config_word(tp->pdev, PCI_COMMAND,
6833                               pci_cmd & ~PCI_COMMAND_SERR);
6834
6835         err = tg3_test_interrupt(tp);
6836
6837         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6838
6839         if (!err)
6840                 return 0;
6841
6842         /* other failures */
6843         if (err != -EIO)
6844                 return err;
6845
6846         /* MSI test failed, go back to INTx mode */
6847         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6848                "switching to INTx mode. Please report this failure to "
6849                "the PCI maintainer and include system chipset information.\n",
6850                        tp->dev->name);
6851
6852         free_irq(tp->pdev->irq, dev);
6853         pci_disable_msi(tp->pdev);
6854
6855         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6856
6857         err = tg3_request_irq(tp);
6858         if (err)
6859                 return err;
6860
6861         /* Need to reset the chip because the MSI cycle may have terminated
6862          * with Master Abort.
6863          */
6864         tg3_full_lock(tp, 1);
6865
6866         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6867         err = tg3_init_hw(tp, 1);
6868
6869         tg3_full_unlock(tp);
6870
6871         if (err)
6872                 free_irq(tp->pdev->irq, dev);
6873
6874         return err;
6875 }
6876
6877 static int tg3_open(struct net_device *dev)
6878 {
6879         struct tg3 *tp = netdev_priv(dev);
6880         int err;
6881
6882         tg3_full_lock(tp, 0);
6883
6884         err = tg3_set_power_state(tp, PCI_D0);
6885         if (err)
6886                 return err;
6887
6888         tg3_disable_ints(tp);
6889         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6890
6891         tg3_full_unlock(tp);
6892
6893         /* The placement of this call is tied
6894          * to the setup and use of Host TX descriptors.
6895          */
6896         err = tg3_alloc_consistent(tp);
6897         if (err)
6898                 return err;
6899
6900         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6901             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6902             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6903             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6904               (tp->pdev_peer == tp->pdev))) {
6905                 /* All MSI supporting chips should support tagged
6906                  * status.  Assert that this is the case.
6907                  */
6908                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6909                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6910                                "Not using MSI.\n", tp->dev->name);
6911                 } else if (pci_enable_msi(tp->pdev) == 0) {
6912                         u32 msi_mode;
6913
6914                         msi_mode = tr32(MSGINT_MODE);
6915                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6916                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6917                 }
6918         }
6919         err = tg3_request_irq(tp);
6920
6921         if (err) {
6922                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6923                         pci_disable_msi(tp->pdev);
6924                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6925                 }
6926                 tg3_free_consistent(tp);
6927                 return err;
6928         }
6929
6930         tg3_full_lock(tp, 0);
6931
6932         err = tg3_init_hw(tp, 1);
6933         if (err) {
6934                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6935                 tg3_free_rings(tp);
6936         } else {
6937                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6938                         tp->timer_offset = HZ;
6939                 else
6940                         tp->timer_offset = HZ / 10;
6941
6942                 BUG_ON(tp->timer_offset > HZ);
6943                 tp->timer_counter = tp->timer_multiplier =
6944                         (HZ / tp->timer_offset);
6945                 tp->asf_counter = tp->asf_multiplier =
6946                         ((HZ / tp->timer_offset) * 2);
6947
6948                 init_timer(&tp->timer);
6949                 tp->timer.expires = jiffies + tp->timer_offset;
6950                 tp->timer.data = (unsigned long) tp;
6951                 tp->timer.function = tg3_timer;
6952         }
6953
6954         tg3_full_unlock(tp);
6955
6956         if (err) {
6957                 free_irq(tp->pdev->irq, dev);
6958                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6959                         pci_disable_msi(tp->pdev);
6960                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6961                 }
6962                 tg3_free_consistent(tp);
6963                 return err;
6964         }
6965
6966         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6967                 err = tg3_test_msi(tp);
6968
6969                 if (err) {
6970                         tg3_full_lock(tp, 0);
6971
6972                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6973                                 pci_disable_msi(tp->pdev);
6974                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6975                         }
6976                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6977                         tg3_free_rings(tp);
6978                         tg3_free_consistent(tp);
6979
6980                         tg3_full_unlock(tp);
6981
6982                         return err;
6983                 }
6984
6985                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6986                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6987                                 u32 val = tr32(0x7c04);
6988
6989                                 tw32(0x7c04, val | (1 << 29));
6990                         }
6991                 }
6992         }
6993
6994         tg3_full_lock(tp, 0);
6995
6996         add_timer(&tp->timer);
6997         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6998         tg3_enable_ints(tp);
6999
7000         tg3_full_unlock(tp);
7001
7002         netif_start_queue(dev);
7003
7004         return 0;
7005 }
7006
7007 #if 0
7008 /*static*/ void tg3_dump_state(struct tg3 *tp)
7009 {
7010         u32 val32, val32_2, val32_3, val32_4, val32_5;
7011         u16 val16;
7012         int i;
7013
7014         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7015         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7016         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7017                val16, val32);
7018
7019         /* MAC block */
7020         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7021                tr32(MAC_MODE), tr32(MAC_STATUS));
7022         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7023                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7024         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7025                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7026         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7027                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7028
7029         /* Send data initiator control block */
7030         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7031                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7032         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7033                tr32(SNDDATAI_STATSCTRL));
7034
7035         /* Send data completion control block */
7036         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7037
7038         /* Send BD ring selector block */
7039         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7040                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7041
7042         /* Send BD initiator control block */
7043         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7044                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7045
7046         /* Send BD completion control block */
7047         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7048
7049         /* Receive list placement control block */
7050         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7051                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7052         printk("       RCVLPC_STATSCTRL[%08x]\n",
7053                tr32(RCVLPC_STATSCTRL));
7054
7055         /* Receive data and receive BD initiator control block */
7056         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7057                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7058
7059         /* Receive data completion control block */
7060         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7061                tr32(RCVDCC_MODE));
7062
7063         /* Receive BD initiator control block */
7064         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7065                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7066
7067         /* Receive BD completion control block */
7068         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7069                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7070
7071         /* Receive list selector control block */
7072         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7073                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7074
7075         /* Mbuf cluster free block */
7076         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7077                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7078
7079         /* Host coalescing control block */
7080         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7081                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7082         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7083                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7084                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7085         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7086                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7087                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7088         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7089                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7090         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7091                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7092
7093         /* Memory arbiter control block */
7094         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7095                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7096
7097         /* Buffer manager control block */
7098         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7099                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7100         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7101                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7102         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7103                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7104                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7105                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7106
7107         /* Read DMA control block */
7108         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7109                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7110
7111         /* Write DMA control block */
7112         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7113                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7114
7115         /* DMA completion block */
7116         printk("DEBUG: DMAC_MODE[%08x]\n",
7117                tr32(DMAC_MODE));
7118
7119         /* GRC block */
7120         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7121                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7122         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7123                tr32(GRC_LOCAL_CTRL));
7124
7125         /* TG3_BDINFOs */
7126         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7127                tr32(RCVDBDI_JUMBO_BD + 0x0),
7128                tr32(RCVDBDI_JUMBO_BD + 0x4),
7129                tr32(RCVDBDI_JUMBO_BD + 0x8),
7130                tr32(RCVDBDI_JUMBO_BD + 0xc));
7131         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7132                tr32(RCVDBDI_STD_BD + 0x0),
7133                tr32(RCVDBDI_STD_BD + 0x4),
7134                tr32(RCVDBDI_STD_BD + 0x8),
7135                tr32(RCVDBDI_STD_BD + 0xc));
7136         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7137                tr32(RCVDBDI_MINI_BD + 0x0),
7138                tr32(RCVDBDI_MINI_BD + 0x4),
7139                tr32(RCVDBDI_MINI_BD + 0x8),
7140                tr32(RCVDBDI_MINI_BD + 0xc));
7141
7142         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7143         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7144         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7145         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7146         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7147                val32, val32_2, val32_3, val32_4);
7148
7149         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7150         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7151         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7152         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7153         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7154                val32, val32_2, val32_3, val32_4);
7155
7156         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7157         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7158         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7159         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7160         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7161         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7162                val32, val32_2, val32_3, val32_4, val32_5);
7163
7164         /* SW status block */
7165         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7166                tp->hw_status->status,
7167                tp->hw_status->status_tag,
7168                tp->hw_status->rx_jumbo_consumer,
7169                tp->hw_status->rx_consumer,
7170                tp->hw_status->rx_mini_consumer,
7171                tp->hw_status->idx[0].rx_producer,
7172                tp->hw_status->idx[0].tx_consumer);
7173
7174         /* SW statistics block */
7175         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7176                ((u32 *)tp->hw_stats)[0],
7177                ((u32 *)tp->hw_stats)[1],
7178                ((u32 *)tp->hw_stats)[2],
7179                ((u32 *)tp->hw_stats)[3]);
7180
7181         /* Mailboxes */
7182         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7183                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7184                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7185                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7186                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7187
7188         /* NIC side send descriptors. */
7189         for (i = 0; i < 6; i++) {
7190                 unsigned long txd;
7191
7192                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7193                         + (i * sizeof(struct tg3_tx_buffer_desc));
7194                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7195                        i,
7196                        readl(txd + 0x0), readl(txd + 0x4),
7197                        readl(txd + 0x8), readl(txd + 0xc));
7198         }
7199
7200         /* NIC side RX descriptors. */
7201         for (i = 0; i < 6; i++) {
7202                 unsigned long rxd;
7203
7204                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7205                         + (i * sizeof(struct tg3_rx_buffer_desc));
7206                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7207                        i,
7208                        readl(rxd + 0x0), readl(rxd + 0x4),
7209                        readl(rxd + 0x8), readl(rxd + 0xc));
7210                 rxd += (4 * sizeof(u32));
7211                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7212                        i,
7213                        readl(rxd + 0x0), readl(rxd + 0x4),
7214                        readl(rxd + 0x8), readl(rxd + 0xc));
7215         }
7216
7217         for (i = 0; i < 6; i++) {
7218                 unsigned long rxd;
7219
7220                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7221                         + (i * sizeof(struct tg3_rx_buffer_desc));
7222                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7223                        i,
7224                        readl(rxd + 0x0), readl(rxd + 0x4),
7225                        readl(rxd + 0x8), readl(rxd + 0xc));
7226                 rxd += (4 * sizeof(u32));
7227                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7228                        i,
7229                        readl(rxd + 0x0), readl(rxd + 0x4),
7230                        readl(rxd + 0x8), readl(rxd + 0xc));
7231         }
7232 }
7233 #endif
7234
7235 static struct net_device_stats *tg3_get_stats(struct net_device *);
7236 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7237
7238 static int tg3_close(struct net_device *dev)
7239 {
7240         struct tg3 *tp = netdev_priv(dev);
7241
7242         /* Calling flush_scheduled_work() may deadlock because
7243          * linkwatch_event() may be on the workqueue and it will try to get
7244          * the rtnl_lock which we are holding.
7245          */
7246         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7247                 msleep(1);
7248
7249         netif_stop_queue(dev);
7250
7251         del_timer_sync(&tp->timer);
7252
7253         tg3_full_lock(tp, 1);
7254 #if 0
7255         tg3_dump_state(tp);
7256 #endif
7257
7258         tg3_disable_ints(tp);
7259
7260         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7261         tg3_free_rings(tp);
7262         tp->tg3_flags &=
7263                 ~(TG3_FLAG_INIT_COMPLETE |
7264                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7265
7266         tg3_full_unlock(tp);
7267
7268         free_irq(tp->pdev->irq, dev);
7269         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7270                 pci_disable_msi(tp->pdev);
7271                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7272         }
7273
7274         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7275                sizeof(tp->net_stats_prev));
7276         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7277                sizeof(tp->estats_prev));
7278
7279         tg3_free_consistent(tp);
7280
7281         tg3_set_power_state(tp, PCI_D3hot);
7282
7283         netif_carrier_off(tp->dev);
7284
7285         return 0;
7286 }
7287
7288 static inline unsigned long get_stat64(tg3_stat64_t *val)
7289 {
7290         unsigned long ret;
7291
7292 #if (BITS_PER_LONG == 32)
7293         ret = val->low;
7294 #else
7295         ret = ((u64)val->high << 32) | ((u64)val->low);
7296 #endif
7297         return ret;
7298 }
7299
7300 static unsigned long calc_crc_errors(struct tg3 *tp)
7301 {
7302         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7303
7304         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7305             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7306              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7307                 u32 val;
7308
7309                 spin_lock_bh(&tp->lock);
7310                 if (!tg3_readphy(tp, 0x1e, &val)) {
7311                         tg3_writephy(tp, 0x1e, val | 0x8000);
7312                         tg3_readphy(tp, 0x14, &val);
7313                 } else
7314                         val = 0;
7315                 spin_unlock_bh(&tp->lock);
7316
7317                 tp->phy_crc_errors += val;
7318
7319                 return tp->phy_crc_errors;
7320         }
7321
7322         return get_stat64(&hw_stats->rx_fcs_errors);
7323 }
7324
7325 #define ESTAT_ADD(member) \
7326         estats->member =        old_estats->member + \
7327                                 get_stat64(&hw_stats->member)
7328
7329 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7330 {
7331         struct tg3_ethtool_stats *estats = &tp->estats;
7332         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7333         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7334
7335         if (!hw_stats)
7336                 return old_estats;
7337
7338         ESTAT_ADD(rx_octets);
7339         ESTAT_ADD(rx_fragments);
7340         ESTAT_ADD(rx_ucast_packets);
7341         ESTAT_ADD(rx_mcast_packets);
7342         ESTAT_ADD(rx_bcast_packets);
7343         ESTAT_ADD(rx_fcs_errors);
7344         ESTAT_ADD(rx_align_errors);
7345         ESTAT_ADD(rx_xon_pause_rcvd);
7346         ESTAT_ADD(rx_xoff_pause_rcvd);
7347         ESTAT_ADD(rx_mac_ctrl_rcvd);
7348         ESTAT_ADD(rx_xoff_entered);
7349         ESTAT_ADD(rx_frame_too_long_errors);
7350         ESTAT_ADD(rx_jabbers);
7351         ESTAT_ADD(rx_undersize_packets);
7352         ESTAT_ADD(rx_in_length_errors);
7353         ESTAT_ADD(rx_out_length_errors);
7354         ESTAT_ADD(rx_64_or_less_octet_packets);
7355         ESTAT_ADD(rx_65_to_127_octet_packets);
7356         ESTAT_ADD(rx_128_to_255_octet_packets);
7357         ESTAT_ADD(rx_256_to_511_octet_packets);
7358         ESTAT_ADD(rx_512_to_1023_octet_packets);
7359         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7360         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7361         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7362         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7363         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7364
7365         ESTAT_ADD(tx_octets);
7366         ESTAT_ADD(tx_collisions);
7367         ESTAT_ADD(tx_xon_sent);
7368         ESTAT_ADD(tx_xoff_sent);
7369         ESTAT_ADD(tx_flow_control);
7370         ESTAT_ADD(tx_mac_errors);
7371         ESTAT_ADD(tx_single_collisions);
7372         ESTAT_ADD(tx_mult_collisions);
7373         ESTAT_ADD(tx_deferred);
7374         ESTAT_ADD(tx_excessive_collisions);
7375         ESTAT_ADD(tx_late_collisions);
7376         ESTAT_ADD(tx_collide_2times);
7377         ESTAT_ADD(tx_collide_3times);
7378         ESTAT_ADD(tx_collide_4times);
7379         ESTAT_ADD(tx_collide_5times);
7380         ESTAT_ADD(tx_collide_6times);
7381         ESTAT_ADD(tx_collide_7times);
7382         ESTAT_ADD(tx_collide_8times);
7383         ESTAT_ADD(tx_collide_9times);
7384         ESTAT_ADD(tx_collide_10times);
7385         ESTAT_ADD(tx_collide_11times);
7386         ESTAT_ADD(tx_collide_12times);
7387         ESTAT_ADD(tx_collide_13times);
7388         ESTAT_ADD(tx_collide_14times);
7389         ESTAT_ADD(tx_collide_15times);
7390         ESTAT_ADD(tx_ucast_packets);
7391         ESTAT_ADD(tx_mcast_packets);
7392         ESTAT_ADD(tx_bcast_packets);
7393         ESTAT_ADD(tx_carrier_sense_errors);
7394         ESTAT_ADD(tx_discards);
7395         ESTAT_ADD(tx_errors);
7396
7397         ESTAT_ADD(dma_writeq_full);
7398         ESTAT_ADD(dma_write_prioq_full);
7399         ESTAT_ADD(rxbds_empty);
7400         ESTAT_ADD(rx_discards);
7401         ESTAT_ADD(rx_errors);
7402         ESTAT_ADD(rx_threshold_hit);
7403
7404         ESTAT_ADD(dma_readq_full);
7405         ESTAT_ADD(dma_read_prioq_full);
7406         ESTAT_ADD(tx_comp_queue_full);
7407
7408         ESTAT_ADD(ring_set_send_prod_index);
7409         ESTAT_ADD(ring_status_update);
7410         ESTAT_ADD(nic_irqs);
7411         ESTAT_ADD(nic_avoided_irqs);
7412         ESTAT_ADD(nic_tx_threshold_hit);
7413
7414         return estats;
7415 }
7416
7417 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7418 {
7419         struct tg3 *tp = netdev_priv(dev);
7420         struct net_device_stats *stats = &tp->net_stats;
7421         struct net_device_stats *old_stats = &tp->net_stats_prev;
7422         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7423
7424         if (!hw_stats)
7425                 return old_stats;
7426
7427         stats->rx_packets = old_stats->rx_packets +
7428                 get_stat64(&hw_stats->rx_ucast_packets) +
7429                 get_stat64(&hw_stats->rx_mcast_packets) +
7430                 get_stat64(&hw_stats->rx_bcast_packets);
7431
7432         stats->tx_packets = old_stats->tx_packets +
7433                 get_stat64(&hw_stats->tx_ucast_packets) +
7434                 get_stat64(&hw_stats->tx_mcast_packets) +
7435                 get_stat64(&hw_stats->tx_bcast_packets);
7436
7437         stats->rx_bytes = old_stats->rx_bytes +
7438                 get_stat64(&hw_stats->rx_octets);
7439         stats->tx_bytes = old_stats->tx_bytes +
7440                 get_stat64(&hw_stats->tx_octets);
7441
7442         stats->rx_errors = old_stats->rx_errors +
7443                 get_stat64(&hw_stats->rx_errors);
7444         stats->tx_errors = old_stats->tx_errors +
7445                 get_stat64(&hw_stats->tx_errors) +
7446                 get_stat64(&hw_stats->tx_mac_errors) +
7447                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7448                 get_stat64(&hw_stats->tx_discards);
7449
7450         stats->multicast = old_stats->multicast +
7451                 get_stat64(&hw_stats->rx_mcast_packets);
7452         stats->collisions = old_stats->collisions +
7453                 get_stat64(&hw_stats->tx_collisions);
7454
7455         stats->rx_length_errors = old_stats->rx_length_errors +
7456                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7457                 get_stat64(&hw_stats->rx_undersize_packets);
7458
7459         stats->rx_over_errors = old_stats->rx_over_errors +
7460                 get_stat64(&hw_stats->rxbds_empty);
7461         stats->rx_frame_errors = old_stats->rx_frame_errors +
7462                 get_stat64(&hw_stats->rx_align_errors);
7463         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7464                 get_stat64(&hw_stats->tx_discards);
7465         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7466                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7467
7468         stats->rx_crc_errors = old_stats->rx_crc_errors +
7469                 calc_crc_errors(tp);
7470
7471         stats->rx_missed_errors = old_stats->rx_missed_errors +
7472                 get_stat64(&hw_stats->rx_discards);
7473
7474         return stats;
7475 }
7476
7477 static inline u32 calc_crc(unsigned char *buf, int len)
7478 {
7479         u32 reg;
7480         u32 tmp;
7481         int j, k;
7482
7483         reg = 0xffffffff;
7484
7485         for (j = 0; j < len; j++) {
7486                 reg ^= buf[j];
7487
7488                 for (k = 0; k < 8; k++) {
7489                         tmp = reg & 0x01;
7490
7491                         reg >>= 1;
7492
7493                         if (tmp) {
7494                                 reg ^= 0xedb88320;
7495                         }
7496                 }
7497         }
7498
7499         return ~reg;
7500 }
7501
7502 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7503 {
7504         /* accept or reject all multicast frames */
7505         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7506         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7507         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7508         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7509 }
7510
7511 static void __tg3_set_rx_mode(struct net_device *dev)
7512 {
7513         struct tg3 *tp = netdev_priv(dev);
7514         u32 rx_mode;
7515
7516         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7517                                   RX_MODE_KEEP_VLAN_TAG);
7518
7519         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7520          * flag clear.
7521          */
7522 #if TG3_VLAN_TAG_USED
7523         if (!tp->vlgrp &&
7524             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7525                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7526 #else
7527         /* By definition, VLAN is disabled always in this
7528          * case.
7529          */
7530         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7531                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7532 #endif
7533
7534         if (dev->flags & IFF_PROMISC) {
7535                 /* Promiscuous mode. */
7536                 rx_mode |= RX_MODE_PROMISC;
7537         } else if (dev->flags & IFF_ALLMULTI) {
7538                 /* Accept all multicast. */
7539                 tg3_set_multi (tp, 1);
7540         } else if (dev->mc_count < 1) {
7541                 /* Reject all multicast. */
7542                 tg3_set_multi (tp, 0);
7543         } else {
7544                 /* Accept one or more multicast(s). */
7545                 struct dev_mc_list *mclist;
7546                 unsigned int i;
7547                 u32 mc_filter[4] = { 0, };
7548                 u32 regidx;
7549                 u32 bit;
7550                 u32 crc;
7551
7552                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7553                      i++, mclist = mclist->next) {
7554
7555                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7556                         bit = ~crc & 0x7f;
7557                         regidx = (bit & 0x60) >> 5;
7558                         bit &= 0x1f;
7559                         mc_filter[regidx] |= (1 << bit);
7560                 }
7561
7562                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7563                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7564                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7565                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7566         }
7567
7568         if (rx_mode != tp->rx_mode) {
7569                 tp->rx_mode = rx_mode;
7570                 tw32_f(MAC_RX_MODE, rx_mode);
7571                 udelay(10);
7572         }
7573 }
7574
7575 static void tg3_set_rx_mode(struct net_device *dev)
7576 {
7577         struct tg3 *tp = netdev_priv(dev);
7578
7579         if (!netif_running(dev))
7580                 return;
7581
7582         tg3_full_lock(tp, 0);
7583         __tg3_set_rx_mode(dev);
7584         tg3_full_unlock(tp);
7585 }
7586
7587 #define TG3_REGDUMP_LEN         (32 * 1024)
7588
7589 static int tg3_get_regs_len(struct net_device *dev)
7590 {
7591         return TG3_REGDUMP_LEN;
7592 }
7593
7594 static void tg3_get_regs(struct net_device *dev,
7595                 struct ethtool_regs *regs, void *_p)
7596 {
7597         u32 *p = _p;
7598         struct tg3 *tp = netdev_priv(dev);
7599         u8 *orig_p = _p;
7600         int i;
7601
7602         regs->version = 0;
7603
7604         memset(p, 0, TG3_REGDUMP_LEN);
7605
7606         if (tp->link_config.phy_is_low_power)
7607                 return;
7608
7609         tg3_full_lock(tp, 0);
7610
7611 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7612 #define GET_REG32_LOOP(base,len)                \
7613 do {    p = (u32 *)(orig_p + (base));           \
7614         for (i = 0; i < len; i += 4)            \
7615                 __GET_REG32((base) + i);        \
7616 } while (0)
7617 #define GET_REG32_1(reg)                        \
7618 do {    p = (u32 *)(orig_p + (reg));            \
7619         __GET_REG32((reg));                     \
7620 } while (0)
7621
7622         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7623         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7624         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7625         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7626         GET_REG32_1(SNDDATAC_MODE);
7627         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7628         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7629         GET_REG32_1(SNDBDC_MODE);
7630         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7631         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7632         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7633         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7634         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7635         GET_REG32_1(RCVDCC_MODE);
7636         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7637         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7638         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7639         GET_REG32_1(MBFREE_MODE);
7640         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7641         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7642         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7643         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7644         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7645         GET_REG32_1(RX_CPU_MODE);
7646         GET_REG32_1(RX_CPU_STATE);
7647         GET_REG32_1(RX_CPU_PGMCTR);
7648         GET_REG32_1(RX_CPU_HWBKPT);
7649         GET_REG32_1(TX_CPU_MODE);
7650         GET_REG32_1(TX_CPU_STATE);
7651         GET_REG32_1(TX_CPU_PGMCTR);
7652         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7653         GET_REG32_LOOP(FTQ_RESET, 0x120);
7654         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7655         GET_REG32_1(DMAC_MODE);
7656         GET_REG32_LOOP(GRC_MODE, 0x4c);
7657         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7658                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7659
7660 #undef __GET_REG32
7661 #undef GET_REG32_LOOP
7662 #undef GET_REG32_1
7663
7664         tg3_full_unlock(tp);
7665 }
7666
7667 static int tg3_get_eeprom_len(struct net_device *dev)
7668 {
7669         struct tg3 *tp = netdev_priv(dev);
7670
7671         return tp->nvram_size;
7672 }
7673
7674 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7675 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7676
7677 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7678 {
7679         struct tg3 *tp = netdev_priv(dev);
7680         int ret;
7681         u8  *pd;
7682         u32 i, offset, len, val, b_offset, b_count;
7683
7684         if (tp->link_config.phy_is_low_power)
7685                 return -EAGAIN;
7686
7687         offset = eeprom->offset;
7688         len = eeprom->len;
7689         eeprom->len = 0;
7690
7691         eeprom->magic = TG3_EEPROM_MAGIC;
7692
7693         if (offset & 3) {
7694                 /* adjustments to start on required 4 byte boundary */
7695                 b_offset = offset & 3;
7696                 b_count = 4 - b_offset;
7697                 if (b_count > len) {
7698                         /* i.e. offset=1 len=2 */
7699                         b_count = len;
7700                 }
7701                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7702                 if (ret)
7703                         return ret;
7704                 val = cpu_to_le32(val);
7705                 memcpy(data, ((char*)&val) + b_offset, b_count);
7706                 len -= b_count;
7707                 offset += b_count;
7708                 eeprom->len += b_count;
7709         }
7710
7711         /* read bytes upto the last 4 byte boundary */
7712         pd = &data[eeprom->len];
7713         for (i = 0; i < (len - (len & 3)); i += 4) {
7714                 ret = tg3_nvram_read(tp, offset + i, &val);
7715                 if (ret) {
7716                         eeprom->len += i;
7717                         return ret;
7718                 }
7719                 val = cpu_to_le32(val);
7720                 memcpy(pd + i, &val, 4);
7721         }
7722         eeprom->len += i;
7723
7724         if (len & 3) {
7725                 /* read last bytes not ending on 4 byte boundary */
7726                 pd = &data[eeprom->len];
7727                 b_count = len & 3;
7728                 b_offset = offset + len - b_count;
7729                 ret = tg3_nvram_read(tp, b_offset, &val);
7730                 if (ret)
7731                         return ret;
7732                 val = cpu_to_le32(val);
7733                 memcpy(pd, ((char*)&val), b_count);
7734                 eeprom->len += b_count;
7735         }
7736         return 0;
7737 }
7738
7739 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7740
7741 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744         int ret;
7745         u32 offset, len, b_offset, odd_len, start, end;
7746         u8 *buf;
7747
7748         if (tp->link_config.phy_is_low_power)
7749                 return -EAGAIN;
7750
7751         if (eeprom->magic != TG3_EEPROM_MAGIC)
7752                 return -EINVAL;
7753
7754         offset = eeprom->offset;
7755         len = eeprom->len;
7756
7757         if ((b_offset = (offset & 3))) {
7758                 /* adjustments to start on required 4 byte boundary */
7759                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7760                 if (ret)
7761                         return ret;
7762                 start = cpu_to_le32(start);
7763                 len += b_offset;
7764                 offset &= ~3;
7765                 if (len < 4)
7766                         len = 4;
7767         }
7768
7769         odd_len = 0;
7770         if (len & 3) {
7771                 /* adjustments to end on required 4 byte boundary */
7772                 odd_len = 1;
7773                 len = (len + 3) & ~3;
7774                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7775                 if (ret)
7776                         return ret;
7777                 end = cpu_to_le32(end);
7778         }
7779
7780         buf = data;
7781         if (b_offset || odd_len) {
7782                 buf = kmalloc(len, GFP_KERNEL);
7783                 if (buf == 0)
7784                         return -ENOMEM;
7785                 if (b_offset)
7786                         memcpy(buf, &start, 4);
7787                 if (odd_len)
7788                         memcpy(buf+len-4, &end, 4);
7789                 memcpy(buf + b_offset, data, eeprom->len);
7790         }
7791
7792         ret = tg3_nvram_write_block(tp, offset, len, buf);
7793
7794         if (buf != data)
7795                 kfree(buf);
7796
7797         return ret;
7798 }
7799
7800 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7801 {
7802         struct tg3 *tp = netdev_priv(dev);
7803
7804         cmd->supported = (SUPPORTED_Autoneg);
7805
7806         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7807                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7808                                    SUPPORTED_1000baseT_Full);
7809
7810         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7811                 cmd->supported |= (SUPPORTED_100baseT_Half |
7812                                   SUPPORTED_100baseT_Full |
7813                                   SUPPORTED_10baseT_Half |
7814                                   SUPPORTED_10baseT_Full |
7815                                   SUPPORTED_MII);
7816                 cmd->port = PORT_TP;
7817         } else {
7818                 cmd->supported |= SUPPORTED_FIBRE;
7819                 cmd->port = PORT_FIBRE;
7820         }
7821
7822         cmd->advertising = tp->link_config.advertising;
7823         if (netif_running(dev)) {
7824                 cmd->speed = tp->link_config.active_speed;
7825                 cmd->duplex = tp->link_config.active_duplex;
7826         }
7827         cmd->phy_address = PHY_ADDR;
7828         cmd->transceiver = 0;
7829         cmd->autoneg = tp->link_config.autoneg;
7830         cmd->maxtxpkt = 0;
7831         cmd->maxrxpkt = 0;
7832         return 0;
7833 }
7834
7835 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7836 {
7837         struct tg3 *tp = netdev_priv(dev);
7838
7839         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7840                 /* These are the only valid advertisement bits allowed.  */
7841                 if (cmd->autoneg == AUTONEG_ENABLE &&
7842                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7843                                           ADVERTISED_1000baseT_Full |
7844                                           ADVERTISED_Autoneg |
7845                                           ADVERTISED_FIBRE)))
7846                         return -EINVAL;
7847                 /* Fiber can only do SPEED_1000.  */
7848                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7849                          (cmd->speed != SPEED_1000))
7850                         return -EINVAL;
7851         /* Copper cannot force SPEED_1000.  */
7852         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7853                    (cmd->speed == SPEED_1000))
7854                 return -EINVAL;
7855         else if ((cmd->speed == SPEED_1000) &&
7856                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7857                 return -EINVAL;
7858
7859         tg3_full_lock(tp, 0);
7860
7861         tp->link_config.autoneg = cmd->autoneg;
7862         if (cmd->autoneg == AUTONEG_ENABLE) {
7863                 tp->link_config.advertising = cmd->advertising;
7864                 tp->link_config.speed = SPEED_INVALID;
7865                 tp->link_config.duplex = DUPLEX_INVALID;
7866         } else {
7867                 tp->link_config.advertising = 0;
7868                 tp->link_config.speed = cmd->speed;
7869                 tp->link_config.duplex = cmd->duplex;
7870         }
7871
7872         if (netif_running(dev))
7873                 tg3_setup_phy(tp, 1);
7874
7875         tg3_full_unlock(tp);
7876
7877         return 0;
7878 }
7879
7880 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7881 {
7882         struct tg3 *tp = netdev_priv(dev);
7883
7884         strcpy(info->driver, DRV_MODULE_NAME);
7885         strcpy(info->version, DRV_MODULE_VERSION);
7886         strcpy(info->fw_version, tp->fw_ver);
7887         strcpy(info->bus_info, pci_name(tp->pdev));
7888 }
7889
7890 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7891 {
7892         struct tg3 *tp = netdev_priv(dev);
7893
7894         wol->supported = WAKE_MAGIC;
7895         wol->wolopts = 0;
7896         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7897                 wol->wolopts = WAKE_MAGIC;
7898         memset(&wol->sopass, 0, sizeof(wol->sopass));
7899 }
7900
7901 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904
7905         if (wol->wolopts & ~WAKE_MAGIC)
7906                 return -EINVAL;
7907         if ((wol->wolopts & WAKE_MAGIC) &&
7908             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
7909             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7910                 return -EINVAL;
7911
7912         spin_lock_bh(&tp->lock);
7913         if (wol->wolopts & WAKE_MAGIC)
7914                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7915         else
7916                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7917         spin_unlock_bh(&tp->lock);
7918
7919         return 0;
7920 }
7921
7922 static u32 tg3_get_msglevel(struct net_device *dev)
7923 {
7924         struct tg3 *tp = netdev_priv(dev);
7925         return tp->msg_enable;
7926 }
7927
7928 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7929 {
7930         struct tg3 *tp = netdev_priv(dev);
7931         tp->msg_enable = value;
7932 }
7933
7934 #if TG3_TSO_SUPPORT != 0
7935 static int tg3_set_tso(struct net_device *dev, u32 value)
7936 {
7937         struct tg3 *tp = netdev_priv(dev);
7938
7939         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7940                 if (value)
7941                         return -EINVAL;
7942                 return 0;
7943         }
7944         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7945                 if (value)
7946                         dev->features |= NETIF_F_TSO6;
7947                 else
7948                         dev->features &= ~NETIF_F_TSO6;
7949         }
7950         return ethtool_op_set_tso(dev, value);
7951 }
7952 #endif
7953
7954 static int tg3_nway_reset(struct net_device *dev)
7955 {
7956         struct tg3 *tp = netdev_priv(dev);
7957         u32 bmcr;
7958         int r;
7959
7960         if (!netif_running(dev))
7961                 return -EAGAIN;
7962
7963         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7964                 return -EINVAL;
7965
7966         spin_lock_bh(&tp->lock);
7967         r = -EINVAL;
7968         tg3_readphy(tp, MII_BMCR, &bmcr);
7969         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7970             ((bmcr & BMCR_ANENABLE) ||
7971              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7972                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7973                                            BMCR_ANENABLE);
7974                 r = 0;
7975         }
7976         spin_unlock_bh(&tp->lock);
7977
7978         return r;
7979 }
7980
7981 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7982 {
7983         struct tg3 *tp = netdev_priv(dev);
7984
7985         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7986         ering->rx_mini_max_pending = 0;
7987         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7988                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7989         else
7990                 ering->rx_jumbo_max_pending = 0;
7991
7992         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7993
7994         ering->rx_pending = tp->rx_pending;
7995         ering->rx_mini_pending = 0;
7996         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7997                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7998         else
7999                 ering->rx_jumbo_pending = 0;
8000
8001         ering->tx_pending = tp->tx_pending;
8002 }
8003
8004 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8005 {
8006         struct tg3 *tp = netdev_priv(dev);
8007         int irq_sync = 0, err = 0;
8008
8009         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8010             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8011             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8012                 return -EINVAL;
8013
8014         if (netif_running(dev)) {
8015                 tg3_netif_stop(tp);
8016                 irq_sync = 1;
8017         }
8018
8019         tg3_full_lock(tp, irq_sync);
8020
8021         tp->rx_pending = ering->rx_pending;
8022
8023         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8024             tp->rx_pending > 63)
8025                 tp->rx_pending = 63;
8026         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8027         tp->tx_pending = ering->tx_pending;
8028
8029         if (netif_running(dev)) {
8030                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8031                 err = tg3_restart_hw(tp, 1);
8032                 if (!err)
8033                         tg3_netif_start(tp);
8034         }
8035
8036         tg3_full_unlock(tp);
8037
8038         return err;
8039 }
8040
8041 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8042 {
8043         struct tg3 *tp = netdev_priv(dev);
8044
8045         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8046         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8047         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8048 }
8049
8050 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8051 {
8052         struct tg3 *tp = netdev_priv(dev);
8053         int irq_sync = 0, err = 0;
8054
8055         if (netif_running(dev)) {
8056                 tg3_netif_stop(tp);
8057                 irq_sync = 1;
8058         }
8059
8060         tg3_full_lock(tp, irq_sync);
8061
8062         if (epause->autoneg)
8063                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8064         else
8065                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8066         if (epause->rx_pause)
8067                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8068         else
8069                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8070         if (epause->tx_pause)
8071                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8072         else
8073                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8074
8075         if (netif_running(dev)) {
8076                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8077                 err = tg3_restart_hw(tp, 1);
8078                 if (!err)
8079                         tg3_netif_start(tp);
8080         }
8081
8082         tg3_full_unlock(tp);
8083
8084         return err;
8085 }
8086
8087 static u32 tg3_get_rx_csum(struct net_device *dev)
8088 {
8089         struct tg3 *tp = netdev_priv(dev);
8090         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8091 }
8092
8093 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8094 {
8095         struct tg3 *tp = netdev_priv(dev);
8096
8097         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8098                 if (data != 0)
8099                         return -EINVAL;
8100                 return 0;
8101         }
8102
8103         spin_lock_bh(&tp->lock);
8104         if (data)
8105                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8106         else
8107                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8108         spin_unlock_bh(&tp->lock);
8109
8110         return 0;
8111 }
8112
8113 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8114 {
8115         struct tg3 *tp = netdev_priv(dev);
8116
8117         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8118                 if (data != 0)
8119                         return -EINVAL;
8120                 return 0;
8121         }
8122
8123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8125                 ethtool_op_set_tx_hw_csum(dev, data);
8126         else
8127                 ethtool_op_set_tx_csum(dev, data);
8128
8129         return 0;
8130 }
8131
8132 static int tg3_get_stats_count (struct net_device *dev)
8133 {
8134         return TG3_NUM_STATS;
8135 }
8136
8137 static int tg3_get_test_count (struct net_device *dev)
8138 {
8139         return TG3_NUM_TEST;
8140 }
8141
8142 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8143 {
8144         switch (stringset) {
8145         case ETH_SS_STATS:
8146                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8147                 break;
8148         case ETH_SS_TEST:
8149                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8150                 break;
8151         default:
8152                 WARN_ON(1);     /* we need a WARN() */
8153                 break;
8154         }
8155 }
8156
8157 static int tg3_phys_id(struct net_device *dev, u32 data)
8158 {
8159         struct tg3 *tp = netdev_priv(dev);
8160         int i;
8161
8162         if (!netif_running(tp->dev))
8163                 return -EAGAIN;
8164
8165         if (data == 0)
8166                 data = 2;
8167
8168         for (i = 0; i < (data * 2); i++) {
8169                 if ((i % 2) == 0)
8170                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8171                                            LED_CTRL_1000MBPS_ON |
8172                                            LED_CTRL_100MBPS_ON |
8173                                            LED_CTRL_10MBPS_ON |
8174                                            LED_CTRL_TRAFFIC_OVERRIDE |
8175                                            LED_CTRL_TRAFFIC_BLINK |
8176                                            LED_CTRL_TRAFFIC_LED);
8177
8178                 else
8179                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8180                                            LED_CTRL_TRAFFIC_OVERRIDE);
8181
8182                 if (msleep_interruptible(500))
8183                         break;
8184         }
8185         tw32(MAC_LED_CTRL, tp->led_ctrl);
8186         return 0;
8187 }
8188
8189 static void tg3_get_ethtool_stats (struct net_device *dev,
8190                                    struct ethtool_stats *estats, u64 *tmp_stats)
8191 {
8192         struct tg3 *tp = netdev_priv(dev);
8193         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8194 }
8195
8196 #define NVRAM_TEST_SIZE 0x100
8197 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8198
8199 static int tg3_test_nvram(struct tg3 *tp)
8200 {
8201         u32 *buf, csum, magic;
8202         int i, j, err = 0, size;
8203
8204         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8205                 return -EIO;
8206
8207         if (magic == TG3_EEPROM_MAGIC)
8208                 size = NVRAM_TEST_SIZE;
8209         else if ((magic & 0xff000000) == 0xa5000000) {
8210                 if ((magic & 0xe00000) == 0x200000)
8211                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8212                 else
8213                         return 0;
8214         } else
8215                 return -EIO;
8216
8217         buf = kmalloc(size, GFP_KERNEL);
8218         if (buf == NULL)
8219                 return -ENOMEM;
8220
8221         err = -EIO;
8222         for (i = 0, j = 0; i < size; i += 4, j++) {
8223                 u32 val;
8224
8225                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8226                         break;
8227                 buf[j] = cpu_to_le32(val);
8228         }
8229         if (i < size)
8230                 goto out;
8231
8232         /* Selfboot format */
8233         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8234                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8235
8236                 for (i = 0; i < size; i++)
8237                         csum8 += buf8[i];
8238
8239                 if (csum8 == 0) {
8240                         err = 0;
8241                         goto out;
8242                 }
8243
8244                 err = -EIO;
8245                 goto out;
8246         }
8247
8248         /* Bootstrap checksum at offset 0x10 */
8249         csum = calc_crc((unsigned char *) buf, 0x10);
8250         if(csum != cpu_to_le32(buf[0x10/4]))
8251                 goto out;
8252
8253         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8254         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8255         if (csum != cpu_to_le32(buf[0xfc/4]))
8256                  goto out;
8257
8258         err = 0;
8259
8260 out:
8261         kfree(buf);
8262         return err;
8263 }
8264
8265 #define TG3_SERDES_TIMEOUT_SEC  2
8266 #define TG3_COPPER_TIMEOUT_SEC  6
8267
8268 static int tg3_test_link(struct tg3 *tp)
8269 {
8270         int i, max;
8271
8272         if (!netif_running(tp->dev))
8273                 return -ENODEV;
8274
8275         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8276                 max = TG3_SERDES_TIMEOUT_SEC;
8277         else
8278                 max = TG3_COPPER_TIMEOUT_SEC;
8279
8280         for (i = 0; i < max; i++) {
8281                 if (netif_carrier_ok(tp->dev))
8282                         return 0;
8283
8284                 if (msleep_interruptible(1000))
8285                         break;
8286         }
8287
8288         return -EIO;
8289 }
8290
8291 /* Only test the commonly used registers */
8292 static int tg3_test_registers(struct tg3 *tp)
8293 {
8294         int i, is_5705;
8295         u32 offset, read_mask, write_mask, val, save_val, read_val;
8296         static struct {
8297                 u16 offset;
8298                 u16 flags;
8299 #define TG3_FL_5705     0x1
8300 #define TG3_FL_NOT_5705 0x2
8301 #define TG3_FL_NOT_5788 0x4
8302                 u32 read_mask;
8303                 u32 write_mask;
8304         } reg_tbl[] = {
8305                 /* MAC Control Registers */
8306                 { MAC_MODE, TG3_FL_NOT_5705,
8307                         0x00000000, 0x00ef6f8c },
8308                 { MAC_MODE, TG3_FL_5705,
8309                         0x00000000, 0x01ef6b8c },
8310                 { MAC_STATUS, TG3_FL_NOT_5705,
8311                         0x03800107, 0x00000000 },
8312                 { MAC_STATUS, TG3_FL_5705,
8313                         0x03800100, 0x00000000 },
8314                 { MAC_ADDR_0_HIGH, 0x0000,
8315                         0x00000000, 0x0000ffff },
8316                 { MAC_ADDR_0_LOW, 0x0000,
8317                         0x00000000, 0xffffffff },
8318                 { MAC_RX_MTU_SIZE, 0x0000,
8319                         0x00000000, 0x0000ffff },
8320                 { MAC_TX_MODE, 0x0000,
8321                         0x00000000, 0x00000070 },
8322                 { MAC_TX_LENGTHS, 0x0000,
8323                         0x00000000, 0x00003fff },
8324                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8325                         0x00000000, 0x000007fc },
8326                 { MAC_RX_MODE, TG3_FL_5705,
8327                         0x00000000, 0x000007dc },
8328                 { MAC_HASH_REG_0, 0x0000,
8329                         0x00000000, 0xffffffff },
8330                 { MAC_HASH_REG_1, 0x0000,
8331                         0x00000000, 0xffffffff },
8332                 { MAC_HASH_REG_2, 0x0000,
8333                         0x00000000, 0xffffffff },
8334                 { MAC_HASH_REG_3, 0x0000,
8335                         0x00000000, 0xffffffff },
8336
8337                 /* Receive Data and Receive BD Initiator Control Registers. */
8338                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8339                         0x00000000, 0xffffffff },
8340                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8341                         0x00000000, 0xffffffff },
8342                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8343                         0x00000000, 0x00000003 },
8344                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8345                         0x00000000, 0xffffffff },
8346                 { RCVDBDI_STD_BD+0, 0x0000,
8347                         0x00000000, 0xffffffff },
8348                 { RCVDBDI_STD_BD+4, 0x0000,
8349                         0x00000000, 0xffffffff },
8350                 { RCVDBDI_STD_BD+8, 0x0000,
8351                         0x00000000, 0xffff0002 },
8352                 { RCVDBDI_STD_BD+0xc, 0x0000,
8353                         0x00000000, 0xffffffff },
8354
8355                 /* Receive BD Initiator Control Registers. */
8356                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8357                         0x00000000, 0xffffffff },
8358                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8359                         0x00000000, 0x000003ff },
8360                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8361                         0x00000000, 0xffffffff },
8362
8363                 /* Host Coalescing Control Registers. */
8364                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8365                         0x00000000, 0x00000004 },
8366                 { HOSTCC_MODE, TG3_FL_5705,
8367                         0x00000000, 0x000000f6 },
8368                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8369                         0x00000000, 0xffffffff },
8370                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8371                         0x00000000, 0x000003ff },
8372                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8373                         0x00000000, 0xffffffff },
8374                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8375                         0x00000000, 0x000003ff },
8376                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8377                         0x00000000, 0xffffffff },
8378                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8379                         0x00000000, 0x000000ff },
8380                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8381                         0x00000000, 0xffffffff },
8382                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8383                         0x00000000, 0x000000ff },
8384                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8385                         0x00000000, 0xffffffff },
8386                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8387                         0x00000000, 0xffffffff },
8388                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8389                         0x00000000, 0xffffffff },
8390                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8391                         0x00000000, 0x000000ff },
8392                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8393                         0x00000000, 0xffffffff },
8394                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8395                         0x00000000, 0x000000ff },
8396                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8397                         0x00000000, 0xffffffff },
8398                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8399                         0x00000000, 0xffffffff },
8400                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8401                         0x00000000, 0xffffffff },
8402                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8403                         0x00000000, 0xffffffff },
8404                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8405                         0x00000000, 0xffffffff },
8406                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8407                         0xffffffff, 0x00000000 },
8408                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8409                         0xffffffff, 0x00000000 },
8410
8411                 /* Buffer Manager Control Registers. */
8412                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8413                         0x00000000, 0x007fff80 },
8414                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8415                         0x00000000, 0x007fffff },
8416                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8417                         0x00000000, 0x0000003f },
8418                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8419                         0x00000000, 0x000001ff },
8420                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8421                         0x00000000, 0x000001ff },
8422                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8423                         0xffffffff, 0x00000000 },
8424                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8425                         0xffffffff, 0x00000000 },
8426
8427                 /* Mailbox Registers */
8428                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8429                         0x00000000, 0x000001ff },
8430                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8431                         0x00000000, 0x000001ff },
8432                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8433                         0x00000000, 0x000007ff },
8434                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8435                         0x00000000, 0x000001ff },
8436
8437                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8438         };
8439
8440         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8441                 is_5705 = 1;
8442         else
8443                 is_5705 = 0;
8444
8445         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8446                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8447                         continue;
8448
8449                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8450                         continue;
8451
8452                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8453                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8454                         continue;
8455
8456                 offset = (u32) reg_tbl[i].offset;
8457                 read_mask = reg_tbl[i].read_mask;
8458                 write_mask = reg_tbl[i].write_mask;
8459
8460                 /* Save the original register content */
8461                 save_val = tr32(offset);
8462
8463                 /* Determine the read-only value. */
8464                 read_val = save_val & read_mask;
8465
8466                 /* Write zero to the register, then make sure the read-only bits
8467                  * are not changed and the read/write bits are all zeros.
8468                  */
8469                 tw32(offset, 0);
8470
8471                 val = tr32(offset);
8472
8473                 /* Test the read-only and read/write bits. */
8474                 if (((val & read_mask) != read_val) || (val & write_mask))
8475                         goto out;
8476
8477                 /* Write ones to all the bits defined by RdMask and WrMask, then
8478                  * make sure the read-only bits are not changed and the
8479                  * read/write bits are all ones.
8480                  */
8481                 tw32(offset, read_mask | write_mask);
8482
8483                 val = tr32(offset);
8484
8485                 /* Test the read-only bits. */
8486                 if ((val & read_mask) != read_val)
8487                         goto out;
8488
8489                 /* Test the read/write bits. */
8490                 if ((val & write_mask) != write_mask)
8491                         goto out;
8492
8493                 tw32(offset, save_val);
8494         }
8495
8496         return 0;
8497
8498 out:
8499         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8500         tw32(offset, save_val);
8501         return -EIO;
8502 }
8503
8504 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8505 {
8506         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8507         int i;
8508         u32 j;
8509
8510         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8511                 for (j = 0; j < len; j += 4) {
8512                         u32 val;
8513
8514                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8515                         tg3_read_mem(tp, offset + j, &val);
8516                         if (val != test_pattern[i])
8517                                 return -EIO;
8518                 }
8519         }
8520         return 0;
8521 }
8522
8523 static int tg3_test_memory(struct tg3 *tp)
8524 {
8525         static struct mem_entry {
8526                 u32 offset;
8527                 u32 len;
8528         } mem_tbl_570x[] = {
8529                 { 0x00000000, 0x00b50},
8530                 { 0x00002000, 0x1c000},
8531                 { 0xffffffff, 0x00000}
8532         }, mem_tbl_5705[] = {
8533                 { 0x00000100, 0x0000c},
8534                 { 0x00000200, 0x00008},
8535                 { 0x00004000, 0x00800},
8536                 { 0x00006000, 0x01000},
8537                 { 0x00008000, 0x02000},
8538                 { 0x00010000, 0x0e000},
8539                 { 0xffffffff, 0x00000}
8540         }, mem_tbl_5755[] = {
8541                 { 0x00000200, 0x00008},
8542                 { 0x00004000, 0x00800},
8543                 { 0x00006000, 0x00800},
8544                 { 0x00008000, 0x02000},
8545                 { 0x00010000, 0x0c000},
8546                 { 0xffffffff, 0x00000}
8547         };
8548         struct mem_entry *mem_tbl;
8549         int err = 0;
8550         int i;
8551
8552         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8553                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8554                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8555                         mem_tbl = mem_tbl_5755;
8556                 else
8557                         mem_tbl = mem_tbl_5705;
8558         } else
8559                 mem_tbl = mem_tbl_570x;
8560
8561         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8562                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8563                     mem_tbl[i].len)) != 0)
8564                         break;
8565         }
8566
8567         return err;
8568 }
8569
8570 #define TG3_MAC_LOOPBACK        0
8571 #define TG3_PHY_LOOPBACK        1
8572
8573 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8574 {
8575         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8576         u32 desc_idx;
8577         struct sk_buff *skb, *rx_skb;
8578         u8 *tx_data;
8579         dma_addr_t map;
8580         int num_pkts, tx_len, rx_len, i, err;
8581         struct tg3_rx_buffer_desc *desc;
8582
8583         if (loopback_mode == TG3_MAC_LOOPBACK) {
8584                 /* HW errata - mac loopback fails in some cases on 5780.
8585                  * Normal traffic and PHY loopback are not affected by
8586                  * errata.
8587                  */
8588                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8589                         return 0;
8590
8591                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8592                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8593                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8594                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8595                 else
8596                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8597                 tw32(MAC_MODE, mac_mode);
8598         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8599                 u32 val;
8600
8601                 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8602                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8603                         val |= BMCR_SPEED100;
8604                 else
8605                         val |= BMCR_SPEED1000;
8606
8607                 tg3_writephy(tp, MII_BMCR, val);
8608                 udelay(40);
8609                 /* reset to prevent losing 1st rx packet intermittently */
8610                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8611                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8612                         udelay(10);
8613                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8614                 }
8615                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8616                            MAC_MODE_LINK_POLARITY;
8617                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8618                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8619                 else
8620                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8621                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8622                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8623                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8624                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8625                 }
8626                 tw32(MAC_MODE, mac_mode);
8627         }
8628         else
8629                 return -EINVAL;
8630
8631         err = -EIO;
8632
8633         tx_len = 1514;
8634         skb = netdev_alloc_skb(tp->dev, tx_len);
8635         if (!skb)
8636                 return -ENOMEM;
8637
8638         tx_data = skb_put(skb, tx_len);
8639         memcpy(tx_data, tp->dev->dev_addr, 6);
8640         memset(tx_data + 6, 0x0, 8);
8641
8642         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8643
8644         for (i = 14; i < tx_len; i++)
8645                 tx_data[i] = (u8) (i & 0xff);
8646
8647         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8648
8649         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8650              HOSTCC_MODE_NOW);
8651
8652         udelay(10);
8653
8654         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8655
8656         num_pkts = 0;
8657
8658         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8659
8660         tp->tx_prod++;
8661         num_pkts++;
8662
8663         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8664                      tp->tx_prod);
8665         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8666
8667         udelay(10);
8668
8669         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8670         for (i = 0; i < 25; i++) {
8671                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8672                        HOSTCC_MODE_NOW);
8673
8674                 udelay(10);
8675
8676                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8677                 rx_idx = tp->hw_status->idx[0].rx_producer;
8678                 if ((tx_idx == tp->tx_prod) &&
8679                     (rx_idx == (rx_start_idx + num_pkts)))
8680                         break;
8681         }
8682
8683         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8684         dev_kfree_skb(skb);
8685
8686         if (tx_idx != tp->tx_prod)
8687                 goto out;
8688
8689         if (rx_idx != rx_start_idx + num_pkts)
8690                 goto out;
8691
8692         desc = &tp->rx_rcb[rx_start_idx];
8693         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8694         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8695         if (opaque_key != RXD_OPAQUE_RING_STD)
8696                 goto out;
8697
8698         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8699             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8700                 goto out;
8701
8702         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8703         if (rx_len != tx_len)
8704                 goto out;
8705
8706         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8707
8708         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8709         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8710
8711         for (i = 14; i < tx_len; i++) {
8712                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8713                         goto out;
8714         }
8715         err = 0;
8716
8717         /* tg3_free_rings will unmap and free the rx_skb */
8718 out:
8719         return err;
8720 }
8721
8722 #define TG3_MAC_LOOPBACK_FAILED         1
8723 #define TG3_PHY_LOOPBACK_FAILED         2
8724 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8725                                          TG3_PHY_LOOPBACK_FAILED)
8726
8727 static int tg3_test_loopback(struct tg3 *tp)
8728 {
8729         int err = 0;
8730
8731         if (!netif_running(tp->dev))
8732                 return TG3_LOOPBACK_FAILED;
8733
8734         err = tg3_reset_hw(tp, 1);
8735         if (err)
8736                 return TG3_LOOPBACK_FAILED;
8737
8738         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8739                 err |= TG3_MAC_LOOPBACK_FAILED;
8740         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8741                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8742                         err |= TG3_PHY_LOOPBACK_FAILED;
8743         }
8744
8745         return err;
8746 }
8747
8748 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8749                           u64 *data)
8750 {
8751         struct tg3 *tp = netdev_priv(dev);
8752
8753         if (tp->link_config.phy_is_low_power)
8754                 tg3_set_power_state(tp, PCI_D0);
8755
8756         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8757
8758         if (tg3_test_nvram(tp) != 0) {
8759                 etest->flags |= ETH_TEST_FL_FAILED;
8760                 data[0] = 1;
8761         }
8762         if (tg3_test_link(tp) != 0) {
8763                 etest->flags |= ETH_TEST_FL_FAILED;
8764                 data[1] = 1;
8765         }
8766         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8767                 int err, irq_sync = 0;
8768
8769                 if (netif_running(dev)) {
8770                         tg3_netif_stop(tp);
8771                         irq_sync = 1;
8772                 }
8773
8774                 tg3_full_lock(tp, irq_sync);
8775
8776                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8777                 err = tg3_nvram_lock(tp);
8778                 tg3_halt_cpu(tp, RX_CPU_BASE);
8779                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8780                         tg3_halt_cpu(tp, TX_CPU_BASE);
8781                 if (!err)
8782                         tg3_nvram_unlock(tp);
8783
8784                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8785                         tg3_phy_reset(tp);
8786
8787                 if (tg3_test_registers(tp) != 0) {
8788                         etest->flags |= ETH_TEST_FL_FAILED;
8789                         data[2] = 1;
8790                 }
8791                 if (tg3_test_memory(tp) != 0) {
8792                         etest->flags |= ETH_TEST_FL_FAILED;
8793                         data[3] = 1;
8794                 }
8795                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8796                         etest->flags |= ETH_TEST_FL_FAILED;
8797
8798                 tg3_full_unlock(tp);
8799
8800                 if (tg3_test_interrupt(tp) != 0) {
8801                         etest->flags |= ETH_TEST_FL_FAILED;
8802                         data[5] = 1;
8803                 }
8804
8805                 tg3_full_lock(tp, 0);
8806
8807                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8808                 if (netif_running(dev)) {
8809                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8810                         if (!tg3_restart_hw(tp, 1))
8811                                 tg3_netif_start(tp);
8812                 }
8813
8814                 tg3_full_unlock(tp);
8815         }
8816         if (tp->link_config.phy_is_low_power)
8817                 tg3_set_power_state(tp, PCI_D3hot);
8818
8819 }
8820
8821 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8822 {
8823         struct mii_ioctl_data *data = if_mii(ifr);
8824         struct tg3 *tp = netdev_priv(dev);
8825         int err;
8826
8827         switch(cmd) {
8828         case SIOCGMIIPHY:
8829                 data->phy_id = PHY_ADDR;
8830
8831                 /* fallthru */
8832         case SIOCGMIIREG: {
8833                 u32 mii_regval;
8834
8835                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8836                         break;                  /* We have no PHY */
8837
8838                 if (tp->link_config.phy_is_low_power)
8839                         return -EAGAIN;
8840
8841                 spin_lock_bh(&tp->lock);
8842                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8843                 spin_unlock_bh(&tp->lock);
8844
8845                 data->val_out = mii_regval;
8846
8847                 return err;
8848         }
8849
8850         case SIOCSMIIREG:
8851                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8852                         break;                  /* We have no PHY */
8853
8854                 if (!capable(CAP_NET_ADMIN))
8855                         return -EPERM;
8856
8857                 if (tp->link_config.phy_is_low_power)
8858                         return -EAGAIN;
8859
8860                 spin_lock_bh(&tp->lock);
8861                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8862                 spin_unlock_bh(&tp->lock);
8863
8864                 return err;
8865
8866         default:
8867                 /* do nothing */
8868                 break;
8869         }
8870         return -EOPNOTSUPP;
8871 }
8872
8873 #if TG3_VLAN_TAG_USED
8874 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8875 {
8876         struct tg3 *tp = netdev_priv(dev);
8877
8878         if (netif_running(dev))
8879                 tg3_netif_stop(tp);
8880
8881         tg3_full_lock(tp, 0);
8882
8883         tp->vlgrp = grp;
8884
8885         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8886         __tg3_set_rx_mode(dev);
8887
8888         tg3_full_unlock(tp);
8889
8890         if (netif_running(dev))
8891                 tg3_netif_start(tp);
8892 }
8893
8894 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8895 {
8896         struct tg3 *tp = netdev_priv(dev);
8897
8898         if (netif_running(dev))
8899                 tg3_netif_stop(tp);
8900
8901         tg3_full_lock(tp, 0);
8902         if (tp->vlgrp)
8903                 tp->vlgrp->vlan_devices[vid] = NULL;
8904         tg3_full_unlock(tp);
8905
8906         if (netif_running(dev))
8907                 tg3_netif_start(tp);
8908 }
8909 #endif
8910
8911 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8912 {
8913         struct tg3 *tp = netdev_priv(dev);
8914
8915         memcpy(ec, &tp->coal, sizeof(*ec));
8916         return 0;
8917 }
8918
8919 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8920 {
8921         struct tg3 *tp = netdev_priv(dev);
8922         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8923         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8924
8925         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8926                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8927                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8928                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8929                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8930         }
8931
8932         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8933             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8934             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8935             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8936             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8937             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8938             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8939             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8940             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8941             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8942                 return -EINVAL;
8943
8944         /* No rx interrupts will be generated if both are zero */
8945         if ((ec->rx_coalesce_usecs == 0) &&
8946             (ec->rx_max_coalesced_frames == 0))
8947                 return -EINVAL;
8948
8949         /* No tx interrupts will be generated if both are zero */
8950         if ((ec->tx_coalesce_usecs == 0) &&
8951             (ec->tx_max_coalesced_frames == 0))
8952                 return -EINVAL;
8953
8954         /* Only copy relevant parameters, ignore all others. */
8955         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8956         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8957         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8958         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8959         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8960         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8961         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8962         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8963         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8964
8965         if (netif_running(dev)) {
8966                 tg3_full_lock(tp, 0);
8967                 __tg3_set_coalesce(tp, &tp->coal);
8968                 tg3_full_unlock(tp);
8969         }
8970         return 0;
8971 }
8972
8973 static const struct ethtool_ops tg3_ethtool_ops = {
8974         .get_settings           = tg3_get_settings,
8975         .set_settings           = tg3_set_settings,
8976         .get_drvinfo            = tg3_get_drvinfo,
8977         .get_regs_len           = tg3_get_regs_len,
8978         .get_regs               = tg3_get_regs,
8979         .get_wol                = tg3_get_wol,
8980         .set_wol                = tg3_set_wol,
8981         .get_msglevel           = tg3_get_msglevel,
8982         .set_msglevel           = tg3_set_msglevel,
8983         .nway_reset             = tg3_nway_reset,
8984         .get_link               = ethtool_op_get_link,
8985         .get_eeprom_len         = tg3_get_eeprom_len,
8986         .get_eeprom             = tg3_get_eeprom,
8987         .set_eeprom             = tg3_set_eeprom,
8988         .get_ringparam          = tg3_get_ringparam,
8989         .set_ringparam          = tg3_set_ringparam,
8990         .get_pauseparam         = tg3_get_pauseparam,
8991         .set_pauseparam         = tg3_set_pauseparam,
8992         .get_rx_csum            = tg3_get_rx_csum,
8993         .set_rx_csum            = tg3_set_rx_csum,
8994         .get_tx_csum            = ethtool_op_get_tx_csum,
8995         .set_tx_csum            = tg3_set_tx_csum,
8996         .get_sg                 = ethtool_op_get_sg,
8997         .set_sg                 = ethtool_op_set_sg,
8998 #if TG3_TSO_SUPPORT != 0
8999         .get_tso                = ethtool_op_get_tso,
9000         .set_tso                = tg3_set_tso,
9001 #endif
9002         .self_test_count        = tg3_get_test_count,
9003         .self_test              = tg3_self_test,
9004         .get_strings            = tg3_get_strings,
9005         .phys_id                = tg3_phys_id,
9006         .get_stats_count        = tg3_get_stats_count,
9007         .get_ethtool_stats      = tg3_get_ethtool_stats,
9008         .get_coalesce           = tg3_get_coalesce,
9009         .set_coalesce           = tg3_set_coalesce,
9010         .get_perm_addr          = ethtool_op_get_perm_addr,
9011 };
9012
9013 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9014 {
9015         u32 cursize, val, magic;
9016
9017         tp->nvram_size = EEPROM_CHIP_SIZE;
9018
9019         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9020                 return;
9021
9022         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9023                 return;
9024
9025         /*
9026          * Size the chip by reading offsets at increasing powers of two.
9027          * When we encounter our validation signature, we know the addressing
9028          * has wrapped around, and thus have our chip size.
9029          */
9030         cursize = 0x10;
9031
9032         while (cursize < tp->nvram_size) {
9033                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9034                         return;
9035
9036                 if (val == magic)
9037                         break;
9038
9039                 cursize <<= 1;
9040         }
9041
9042         tp->nvram_size = cursize;
9043 }
9044
9045 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9046 {
9047         u32 val;
9048
9049         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9050                 return;
9051
9052         /* Selfboot format */
9053         if (val != TG3_EEPROM_MAGIC) {
9054                 tg3_get_eeprom_size(tp);
9055                 return;
9056         }
9057
9058         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9059                 if (val != 0) {
9060                         tp->nvram_size = (val >> 16) * 1024;
9061                         return;
9062                 }
9063         }
9064         tp->nvram_size = 0x20000;
9065 }
9066
9067 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9068 {
9069         u32 nvcfg1;
9070
9071         nvcfg1 = tr32(NVRAM_CFG1);
9072         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9073                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9074         }
9075         else {
9076                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9077                 tw32(NVRAM_CFG1, nvcfg1);
9078         }
9079
9080         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9081             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9082                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9083                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9084                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9085                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9086                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9087                                 break;
9088                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9089                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9090                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9091                                 break;
9092                         case FLASH_VENDOR_ATMEL_EEPROM:
9093                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9094                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9095                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9096                                 break;
9097                         case FLASH_VENDOR_ST:
9098                                 tp->nvram_jedecnum = JEDEC_ST;
9099                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9100                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9101                                 break;
9102                         case FLASH_VENDOR_SAIFUN:
9103                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9104                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9105                                 break;
9106                         case FLASH_VENDOR_SST_SMALL:
9107                         case FLASH_VENDOR_SST_LARGE:
9108                                 tp->nvram_jedecnum = JEDEC_SST;
9109                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9110                                 break;
9111                 }
9112         }
9113         else {
9114                 tp->nvram_jedecnum = JEDEC_ATMEL;
9115                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9116                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9117         }
9118 }
9119
9120 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9121 {
9122         u32 nvcfg1;
9123
9124         nvcfg1 = tr32(NVRAM_CFG1);
9125
9126         /* NVRAM protection for TPM */
9127         if (nvcfg1 & (1 << 27))
9128                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9129
9130         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9131                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9132                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9133                         tp->nvram_jedecnum = JEDEC_ATMEL;
9134                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9135                         break;
9136                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9137                         tp->nvram_jedecnum = JEDEC_ATMEL;
9138                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9139                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9140                         break;
9141                 case FLASH_5752VENDOR_ST_M45PE10:
9142                 case FLASH_5752VENDOR_ST_M45PE20:
9143                 case FLASH_5752VENDOR_ST_M45PE40:
9144                         tp->nvram_jedecnum = JEDEC_ST;
9145                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9146                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9147                         break;
9148         }
9149
9150         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9151                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9152                         case FLASH_5752PAGE_SIZE_256:
9153                                 tp->nvram_pagesize = 256;
9154                                 break;
9155                         case FLASH_5752PAGE_SIZE_512:
9156                                 tp->nvram_pagesize = 512;
9157                                 break;
9158                         case FLASH_5752PAGE_SIZE_1K:
9159                                 tp->nvram_pagesize = 1024;
9160                                 break;
9161                         case FLASH_5752PAGE_SIZE_2K:
9162                                 tp->nvram_pagesize = 2048;
9163                                 break;
9164                         case FLASH_5752PAGE_SIZE_4K:
9165                                 tp->nvram_pagesize = 4096;
9166                                 break;
9167                         case FLASH_5752PAGE_SIZE_264:
9168                                 tp->nvram_pagesize = 264;
9169                                 break;
9170                 }
9171         }
9172         else {
9173                 /* For eeprom, set pagesize to maximum eeprom size */
9174                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9175
9176                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9177                 tw32(NVRAM_CFG1, nvcfg1);
9178         }
9179 }
9180
9181 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9182 {
9183         u32 nvcfg1;
9184
9185         nvcfg1 = tr32(NVRAM_CFG1);
9186
9187         /* NVRAM protection for TPM */
9188         if (nvcfg1 & (1 << 27))
9189                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9190
9191         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9192                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9193                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9194                         tp->nvram_jedecnum = JEDEC_ATMEL;
9195                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9196                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9197
9198                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9199                         tw32(NVRAM_CFG1, nvcfg1);
9200                         break;
9201                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9202                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9203                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9204                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9205                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9206                         tp->nvram_jedecnum = JEDEC_ATMEL;
9207                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9208                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9209                         tp->nvram_pagesize = 264;
9210                         break;
9211                 case FLASH_5752VENDOR_ST_M45PE10:
9212                 case FLASH_5752VENDOR_ST_M45PE20:
9213                 case FLASH_5752VENDOR_ST_M45PE40:
9214                         tp->nvram_jedecnum = JEDEC_ST;
9215                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9216                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9217                         tp->nvram_pagesize = 256;
9218                         break;
9219         }
9220 }
9221
9222 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9223 {
9224         u32 nvcfg1;
9225
9226         nvcfg1 = tr32(NVRAM_CFG1);
9227
9228         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9229                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9230                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9231                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9232                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9233                         tp->nvram_jedecnum = JEDEC_ATMEL;
9234                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9235                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9236
9237                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9238                         tw32(NVRAM_CFG1, nvcfg1);
9239                         break;
9240                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9241                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9242                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9243                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9244                         tp->nvram_jedecnum = JEDEC_ATMEL;
9245                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9246                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9247                         tp->nvram_pagesize = 264;
9248                         break;
9249                 case FLASH_5752VENDOR_ST_M45PE10:
9250                 case FLASH_5752VENDOR_ST_M45PE20:
9251                 case FLASH_5752VENDOR_ST_M45PE40:
9252                         tp->nvram_jedecnum = JEDEC_ST;
9253                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9254                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9255                         tp->nvram_pagesize = 256;
9256                         break;
9257         }
9258 }
9259
9260 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9261 static void __devinit tg3_nvram_init(struct tg3 *tp)
9262 {
9263         int j;
9264
9265         tw32_f(GRC_EEPROM_ADDR,
9266              (EEPROM_ADDR_FSM_RESET |
9267               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9268                EEPROM_ADDR_CLKPERD_SHIFT)));
9269
9270         /* XXX schedule_timeout() ... */
9271         for (j = 0; j < 100; j++)
9272                 udelay(10);
9273
9274         /* Enable seeprom accesses. */
9275         tw32_f(GRC_LOCAL_CTRL,
9276              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9277         udelay(100);
9278
9279         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9280             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9281                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9282
9283                 if (tg3_nvram_lock(tp)) {
9284                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9285                                "tg3_nvram_init failed.\n", tp->dev->name);
9286                         return;
9287                 }
9288                 tg3_enable_nvram_access(tp);
9289
9290                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9291                         tg3_get_5752_nvram_info(tp);
9292                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9293                         tg3_get_5755_nvram_info(tp);
9294                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9295                         tg3_get_5787_nvram_info(tp);
9296                 else
9297                         tg3_get_nvram_info(tp);
9298
9299                 tg3_get_nvram_size(tp);
9300
9301                 tg3_disable_nvram_access(tp);
9302                 tg3_nvram_unlock(tp);
9303
9304         } else {
9305                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9306
9307                 tg3_get_eeprom_size(tp);
9308         }
9309 }
9310
9311 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9312                                         u32 offset, u32 *val)
9313 {
9314         u32 tmp;
9315         int i;
9316
9317         if (offset > EEPROM_ADDR_ADDR_MASK ||
9318             (offset % 4) != 0)
9319                 return -EINVAL;
9320
9321         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9322                                         EEPROM_ADDR_DEVID_MASK |
9323                                         EEPROM_ADDR_READ);
9324         tw32(GRC_EEPROM_ADDR,
9325              tmp |
9326              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9327              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9328               EEPROM_ADDR_ADDR_MASK) |
9329              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9330
9331         for (i = 0; i < 10000; i++) {
9332                 tmp = tr32(GRC_EEPROM_ADDR);
9333
9334                 if (tmp & EEPROM_ADDR_COMPLETE)
9335                         break;
9336                 udelay(100);
9337         }
9338         if (!(tmp & EEPROM_ADDR_COMPLETE))
9339                 return -EBUSY;
9340
9341         *val = tr32(GRC_EEPROM_DATA);
9342         return 0;
9343 }
9344
9345 #define NVRAM_CMD_TIMEOUT 10000
9346
9347 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9348 {
9349         int i;
9350
9351         tw32(NVRAM_CMD, nvram_cmd);
9352         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9353                 udelay(10);
9354                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9355                         udelay(10);
9356                         break;
9357                 }
9358         }
9359         if (i == NVRAM_CMD_TIMEOUT) {
9360                 return -EBUSY;
9361         }
9362         return 0;
9363 }
9364
9365 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9366 {
9367         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9368             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9369             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9370             (tp->nvram_jedecnum == JEDEC_ATMEL))
9371
9372                 addr = ((addr / tp->nvram_pagesize) <<
9373                         ATMEL_AT45DB0X1B_PAGE_POS) +
9374                        (addr % tp->nvram_pagesize);
9375
9376         return addr;
9377 }
9378
9379 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9380 {
9381         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9382             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9383             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9384             (tp->nvram_jedecnum == JEDEC_ATMEL))
9385
9386                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9387                         tp->nvram_pagesize) +
9388                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9389
9390         return addr;
9391 }
9392
9393 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9394 {
9395         int ret;
9396
9397         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9398                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9399
9400         offset = tg3_nvram_phys_addr(tp, offset);
9401
9402         if (offset > NVRAM_ADDR_MSK)
9403                 return -EINVAL;
9404
9405         ret = tg3_nvram_lock(tp);
9406         if (ret)
9407                 return ret;
9408
9409         tg3_enable_nvram_access(tp);
9410
9411         tw32(NVRAM_ADDR, offset);
9412         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9413                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9414
9415         if (ret == 0)
9416                 *val = swab32(tr32(NVRAM_RDDATA));
9417
9418         tg3_disable_nvram_access(tp);
9419
9420         tg3_nvram_unlock(tp);
9421
9422         return ret;
9423 }
9424
9425 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9426 {
9427         int err;
9428         u32 tmp;
9429
9430         err = tg3_nvram_read(tp, offset, &tmp);
9431         *val = swab32(tmp);
9432         return err;
9433 }
9434
9435 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9436                                     u32 offset, u32 len, u8 *buf)
9437 {
9438         int i, j, rc = 0;
9439         u32 val;
9440
9441         for (i = 0; i < len; i += 4) {
9442                 u32 addr, data;
9443
9444                 addr = offset + i;
9445
9446                 memcpy(&data, buf + i, 4);
9447
9448                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9449
9450                 val = tr32(GRC_EEPROM_ADDR);
9451                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9452
9453                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9454                         EEPROM_ADDR_READ);
9455                 tw32(GRC_EEPROM_ADDR, val |
9456                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9457                         (addr & EEPROM_ADDR_ADDR_MASK) |
9458                         EEPROM_ADDR_START |
9459                         EEPROM_ADDR_WRITE);
9460
9461                 for (j = 0; j < 10000; j++) {
9462                         val = tr32(GRC_EEPROM_ADDR);
9463
9464                         if (val & EEPROM_ADDR_COMPLETE)
9465                                 break;
9466                         udelay(100);
9467                 }
9468                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9469                         rc = -EBUSY;
9470                         break;
9471                 }
9472         }
9473
9474         return rc;
9475 }
9476
9477 /* offset and length are dword aligned */
9478 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9479                 u8 *buf)
9480 {
9481         int ret = 0;
9482         u32 pagesize = tp->nvram_pagesize;
9483         u32 pagemask = pagesize - 1;
9484         u32 nvram_cmd;
9485         u8 *tmp;
9486
9487         tmp = kmalloc(pagesize, GFP_KERNEL);
9488         if (tmp == NULL)
9489                 return -ENOMEM;
9490
9491         while (len) {
9492                 int j;
9493                 u32 phy_addr, page_off, size;
9494
9495                 phy_addr = offset & ~pagemask;
9496
9497                 for (j = 0; j < pagesize; j += 4) {
9498                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9499                                                 (u32 *) (tmp + j))))
9500                                 break;
9501                 }
9502                 if (ret)
9503                         break;
9504
9505                 page_off = offset & pagemask;
9506                 size = pagesize;
9507                 if (len < size)
9508                         size = len;
9509
9510                 len -= size;
9511
9512                 memcpy(tmp + page_off, buf, size);
9513
9514                 offset = offset + (pagesize - page_off);
9515
9516                 tg3_enable_nvram_access(tp);
9517
9518                 /*
9519                  * Before we can erase the flash page, we need
9520                  * to issue a special "write enable" command.
9521                  */
9522                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9523
9524                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9525                         break;
9526
9527                 /* Erase the target page */
9528                 tw32(NVRAM_ADDR, phy_addr);
9529
9530                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9531                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9532
9533                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9534                         break;
9535
9536                 /* Issue another write enable to start the write. */
9537                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9538
9539                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9540                         break;
9541
9542                 for (j = 0; j < pagesize; j += 4) {
9543                         u32 data;
9544
9545                         data = *((u32 *) (tmp + j));
9546                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9547
9548                         tw32(NVRAM_ADDR, phy_addr + j);
9549
9550                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9551                                 NVRAM_CMD_WR;
9552
9553                         if (j == 0)
9554                                 nvram_cmd |= NVRAM_CMD_FIRST;
9555                         else if (j == (pagesize - 4))
9556                                 nvram_cmd |= NVRAM_CMD_LAST;
9557
9558                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9559                                 break;
9560                 }
9561                 if (ret)
9562                         break;
9563         }
9564
9565         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9566         tg3_nvram_exec_cmd(tp, nvram_cmd);
9567
9568         kfree(tmp);
9569
9570         return ret;
9571 }
9572
9573 /* offset and length are dword aligned */
9574 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9575                 u8 *buf)
9576 {
9577         int i, ret = 0;
9578
9579         for (i = 0; i < len; i += 4, offset += 4) {
9580                 u32 data, page_off, phy_addr, nvram_cmd;
9581
9582                 memcpy(&data, buf + i, 4);
9583                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9584
9585                 page_off = offset % tp->nvram_pagesize;
9586
9587                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9588
9589                 tw32(NVRAM_ADDR, phy_addr);
9590
9591                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9592
9593                 if ((page_off == 0) || (i == 0))
9594                         nvram_cmd |= NVRAM_CMD_FIRST;
9595                 if (page_off == (tp->nvram_pagesize - 4))
9596                         nvram_cmd |= NVRAM_CMD_LAST;
9597
9598                 if (i == (len - 4))
9599                         nvram_cmd |= NVRAM_CMD_LAST;
9600
9601                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9602                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9603                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9604                     (tp->nvram_jedecnum == JEDEC_ST) &&
9605                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9606
9607                         if ((ret = tg3_nvram_exec_cmd(tp,
9608                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9609                                 NVRAM_CMD_DONE)))
9610
9611                                 break;
9612                 }
9613                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9614                         /* We always do complete word writes to eeprom. */
9615                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9616                 }
9617
9618                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9619                         break;
9620         }
9621         return ret;
9622 }
9623
9624 /* offset and length are dword aligned */
9625 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9626 {
9627         int ret;
9628
9629         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9630                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9631                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9632                 udelay(40);
9633         }
9634
9635         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9636                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9637         }
9638         else {
9639                 u32 grc_mode;
9640
9641                 ret = tg3_nvram_lock(tp);
9642                 if (ret)
9643                         return ret;
9644
9645                 tg3_enable_nvram_access(tp);
9646                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9647                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9648                         tw32(NVRAM_WRITE1, 0x406);
9649
9650                 grc_mode = tr32(GRC_MODE);
9651                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9652
9653                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9654                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9655
9656                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9657                                 buf);
9658                 }
9659                 else {
9660                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9661                                 buf);
9662                 }
9663
9664                 grc_mode = tr32(GRC_MODE);
9665                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9666
9667                 tg3_disable_nvram_access(tp);
9668                 tg3_nvram_unlock(tp);
9669         }
9670
9671         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9672                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9673                 udelay(40);
9674         }
9675
9676         return ret;
9677 }
9678
9679 struct subsys_tbl_ent {
9680         u16 subsys_vendor, subsys_devid;
9681         u32 phy_id;
9682 };
9683
9684 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9685         /* Broadcom boards. */
9686         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9687         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9688         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9689         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9690         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9691         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9692         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9693         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9694         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9695         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9696         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9697
9698         /* 3com boards. */
9699         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9700         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9701         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9702         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9703         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9704
9705         /* DELL boards. */
9706         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9707         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9708         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9709         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9710
9711         /* Compaq boards. */
9712         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9713         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9714         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9715         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9716         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9717
9718         /* IBM boards. */
9719         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9720 };
9721
9722 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9723 {
9724         int i;
9725
9726         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9727                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9728                      tp->pdev->subsystem_vendor) &&
9729                     (subsys_id_to_phy_id[i].subsys_devid ==
9730                      tp->pdev->subsystem_device))
9731                         return &subsys_id_to_phy_id[i];
9732         }
9733         return NULL;
9734 }
9735
9736 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9737 {
9738         u32 val;
9739         u16 pmcsr;
9740
9741         /* On some early chips the SRAM cannot be accessed in D3hot state,
9742          * so need make sure we're in D0.
9743          */
9744         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9745         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9746         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9747         msleep(1);
9748
9749         /* Make sure register accesses (indirect or otherwise)
9750          * will function correctly.
9751          */
9752         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9753                                tp->misc_host_ctrl);
9754
9755         /* The memory arbiter has to be enabled in order for SRAM accesses
9756          * to succeed.  Normally on powerup the tg3 chip firmware will make
9757          * sure it is enabled, but other entities such as system netboot
9758          * code might disable it.
9759          */
9760         val = tr32(MEMARB_MODE);
9761         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9762
9763         tp->phy_id = PHY_ID_INVALID;
9764         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9765
9766         /* Assume an onboard device by default.  */
9767         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9768
9769         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9770         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9771                 u32 nic_cfg, led_cfg;
9772                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9773                 int eeprom_phy_serdes = 0;
9774
9775                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9776                 tp->nic_sram_data_cfg = nic_cfg;
9777
9778                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9779                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9780                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9781                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9782                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9783                     (ver > 0) && (ver < 0x100))
9784                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9785
9786                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9787                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9788                         eeprom_phy_serdes = 1;
9789
9790                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9791                 if (nic_phy_id != 0) {
9792                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9793                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9794
9795                         eeprom_phy_id  = (id1 >> 16) << 10;
9796                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9797                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9798                 } else
9799                         eeprom_phy_id = 0;
9800
9801                 tp->phy_id = eeprom_phy_id;
9802                 if (eeprom_phy_serdes) {
9803                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9804                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9805                         else
9806                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9807                 }
9808
9809                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9810                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9811                                     SHASTA_EXT_LED_MODE_MASK);
9812                 else
9813                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9814
9815                 switch (led_cfg) {
9816                 default:
9817                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9818                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9819                         break;
9820
9821                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9822                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9823                         break;
9824
9825                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9826                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9827
9828                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9829                          * read on some older 5700/5701 bootcode.
9830                          */
9831                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9832                             ASIC_REV_5700 ||
9833                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9834                             ASIC_REV_5701)
9835                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9836
9837                         break;
9838
9839                 case SHASTA_EXT_LED_SHARED:
9840                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9841                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9842                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9843                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9844                                                  LED_CTRL_MODE_PHY_2);
9845                         break;
9846
9847                 case SHASTA_EXT_LED_MAC:
9848                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9849                         break;
9850
9851                 case SHASTA_EXT_LED_COMBO:
9852                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9853                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9854                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9855                                                  LED_CTRL_MODE_PHY_2);
9856                         break;
9857
9858                 };
9859
9860                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9861                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9862                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9863                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9864
9865                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9866                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9867                 else
9868                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9869
9870                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9871                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9872                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9873                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9874                 }
9875                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9876                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9877
9878                 if (cfg2 & (1 << 17))
9879                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9880
9881                 /* serdes signal pre-emphasis in register 0x590 set by */
9882                 /* bootcode if bit 18 is set */
9883                 if (cfg2 & (1 << 18))
9884                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9885         }
9886 }
9887
9888 static int __devinit tg3_phy_probe(struct tg3 *tp)
9889 {
9890         u32 hw_phy_id_1, hw_phy_id_2;
9891         u32 hw_phy_id, hw_phy_id_masked;
9892         int err;
9893
9894         /* Reading the PHY ID register can conflict with ASF
9895          * firwmare access to the PHY hardware.
9896          */
9897         err = 0;
9898         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9899                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9900         } else {
9901                 /* Now read the physical PHY_ID from the chip and verify
9902                  * that it is sane.  If it doesn't look good, we fall back
9903                  * to either the hard-coded table based PHY_ID and failing
9904                  * that the value found in the eeprom area.
9905                  */
9906                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9907                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9908
9909                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9910                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9911                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9912
9913                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9914         }
9915
9916         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9917                 tp->phy_id = hw_phy_id;
9918                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9919                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9920                 else
9921                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9922         } else {
9923                 if (tp->phy_id != PHY_ID_INVALID) {
9924                         /* Do nothing, phy ID already set up in
9925                          * tg3_get_eeprom_hw_cfg().
9926                          */
9927                 } else {
9928                         struct subsys_tbl_ent *p;
9929
9930                         /* No eeprom signature?  Try the hardcoded
9931                          * subsys device table.
9932                          */
9933                         p = lookup_by_subsys(tp);
9934                         if (!p)
9935                                 return -ENODEV;
9936
9937                         tp->phy_id = p->phy_id;
9938                         if (!tp->phy_id ||
9939                             tp->phy_id == PHY_ID_BCM8002)
9940                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9941                 }
9942         }
9943
9944         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9945             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9946                 u32 bmsr, adv_reg, tg3_ctrl;
9947
9948                 tg3_readphy(tp, MII_BMSR, &bmsr);
9949                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9950                     (bmsr & BMSR_LSTATUS))
9951                         goto skip_phy_reset;
9952
9953                 err = tg3_phy_reset(tp);
9954                 if (err)
9955                         return err;
9956
9957                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9958                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9959                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9960                 tg3_ctrl = 0;
9961                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9962                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9963                                     MII_TG3_CTRL_ADV_1000_FULL);
9964                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9965                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9966                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9967                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9968                 }
9969
9970                 if (!tg3_copper_is_advertising_all(tp)) {
9971                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9972
9973                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9974                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9975
9976                         tg3_writephy(tp, MII_BMCR,
9977                                      BMCR_ANENABLE | BMCR_ANRESTART);
9978                 }
9979                 tg3_phy_set_wirespeed(tp);
9980
9981                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9982                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9983                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9984         }
9985
9986 skip_phy_reset:
9987         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9988                 err = tg3_init_5401phy_dsp(tp);
9989                 if (err)
9990                         return err;
9991         }
9992
9993         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9994                 err = tg3_init_5401phy_dsp(tp);
9995         }
9996
9997         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9998                 tp->link_config.advertising =
9999                         (ADVERTISED_1000baseT_Half |
10000                          ADVERTISED_1000baseT_Full |
10001                          ADVERTISED_Autoneg |
10002                          ADVERTISED_FIBRE);
10003         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10004                 tp->link_config.advertising &=
10005                         ~(ADVERTISED_1000baseT_Half |
10006                           ADVERTISED_1000baseT_Full);
10007
10008         return err;
10009 }
10010
10011 static void __devinit tg3_read_partno(struct tg3 *tp)
10012 {
10013         unsigned char vpd_data[256];
10014         int i;
10015         u32 magic;
10016
10017         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10018                 goto out_not_found;
10019
10020         if (magic == TG3_EEPROM_MAGIC) {
10021                 for (i = 0; i < 256; i += 4) {
10022                         u32 tmp;
10023
10024                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10025                                 goto out_not_found;
10026
10027                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10028                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10029                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10030                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10031                 }
10032         } else {
10033                 int vpd_cap;
10034
10035                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10036                 for (i = 0; i < 256; i += 4) {
10037                         u32 tmp, j = 0;
10038                         u16 tmp16;
10039
10040                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10041                                               i);
10042                         while (j++ < 100) {
10043                                 pci_read_config_word(tp->pdev, vpd_cap +
10044                                                      PCI_VPD_ADDR, &tmp16);
10045                                 if (tmp16 & 0x8000)
10046                                         break;
10047                                 msleep(1);
10048                         }
10049                         if (!(tmp16 & 0x8000))
10050                                 goto out_not_found;
10051
10052                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10053                                               &tmp);
10054                         tmp = cpu_to_le32(tmp);
10055                         memcpy(&vpd_data[i], &tmp, 4);
10056                 }
10057         }
10058
10059         /* Now parse and find the part number. */
10060         for (i = 0; i < 256; ) {
10061                 unsigned char val = vpd_data[i];
10062                 int block_end;
10063
10064                 if (val == 0x82 || val == 0x91) {
10065                         i = (i + 3 +
10066                              (vpd_data[i + 1] +
10067                               (vpd_data[i + 2] << 8)));
10068                         continue;
10069                 }
10070
10071                 if (val != 0x90)
10072                         goto out_not_found;
10073
10074                 block_end = (i + 3 +
10075                              (vpd_data[i + 1] +
10076                               (vpd_data[i + 2] << 8)));
10077                 i += 3;
10078                 while (i < block_end) {
10079                         if (vpd_data[i + 0] == 'P' &&
10080                             vpd_data[i + 1] == 'N') {
10081                                 int partno_len = vpd_data[i + 2];
10082
10083                                 if (partno_len > 24)
10084                                         goto out_not_found;
10085
10086                                 memcpy(tp->board_part_number,
10087                                        &vpd_data[i + 3],
10088                                        partno_len);
10089
10090                                 /* Success. */
10091                                 return;
10092                         }
10093                 }
10094
10095                 /* Part number not found. */
10096                 goto out_not_found;
10097         }
10098
10099 out_not_found:
10100         strcpy(tp->board_part_number, "none");
10101 }
10102
10103 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10104 {
10105         u32 val, offset, start;
10106
10107         if (tg3_nvram_read_swab(tp, 0, &val))
10108                 return;
10109
10110         if (val != TG3_EEPROM_MAGIC)
10111                 return;
10112
10113         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10114             tg3_nvram_read_swab(tp, 0x4, &start))
10115                 return;
10116
10117         offset = tg3_nvram_logical_addr(tp, offset);
10118         if (tg3_nvram_read_swab(tp, offset, &val))
10119                 return;
10120
10121         if ((val & 0xfc000000) == 0x0c000000) {
10122                 u32 ver_offset, addr;
10123                 int i;
10124
10125                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10126                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10127                         return;
10128
10129                 if (val != 0)
10130                         return;
10131
10132                 addr = offset + ver_offset - start;
10133                 for (i = 0; i < 16; i += 4) {
10134                         if (tg3_nvram_read(tp, addr + i, &val))
10135                                 return;
10136
10137                         val = cpu_to_le32(val);
10138                         memcpy(tp->fw_ver + i, &val, 4);
10139                 }
10140         }
10141 }
10142
10143 static int __devinit tg3_get_invariants(struct tg3 *tp)
10144 {
10145         static struct pci_device_id write_reorder_chipsets[] = {
10146                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10147                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10148                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10149                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10150                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10151                              PCI_DEVICE_ID_VIA_8385_0) },
10152                 { },
10153         };
10154         u32 misc_ctrl_reg;
10155         u32 cacheline_sz_reg;
10156         u32 pci_state_reg, grc_misc_cfg;
10157         u32 val;
10158         u16 pci_cmd;
10159         int err;
10160
10161         /* Force memory write invalidate off.  If we leave it on,
10162          * then on 5700_BX chips we have to enable a workaround.
10163          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10164          * to match the cacheline size.  The Broadcom driver have this
10165          * workaround but turns MWI off all the times so never uses
10166          * it.  This seems to suggest that the workaround is insufficient.
10167          */
10168         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10169         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10170         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10171
10172         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10173          * has the register indirect write enable bit set before
10174          * we try to access any of the MMIO registers.  It is also
10175          * critical that the PCI-X hw workaround situation is decided
10176          * before that as well.
10177          */
10178         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10179                               &misc_ctrl_reg);
10180
10181         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10182                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10183
10184         /* Wrong chip ID in 5752 A0. This code can be removed later
10185          * as A0 is not in production.
10186          */
10187         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10188                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10189
10190         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10191          * we need to disable memory and use config. cycles
10192          * only to access all registers. The 5702/03 chips
10193          * can mistakenly decode the special cycles from the
10194          * ICH chipsets as memory write cycles, causing corruption
10195          * of register and memory space. Only certain ICH bridges
10196          * will drive special cycles with non-zero data during the
10197          * address phase which can fall within the 5703's address
10198          * range. This is not an ICH bug as the PCI spec allows
10199          * non-zero address during special cycles. However, only
10200          * these ICH bridges are known to drive non-zero addresses
10201          * during special cycles.
10202          *
10203          * Since special cycles do not cross PCI bridges, we only
10204          * enable this workaround if the 5703 is on the secondary
10205          * bus of these ICH bridges.
10206          */
10207         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10208             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10209                 static struct tg3_dev_id {
10210                         u32     vendor;
10211                         u32     device;
10212                         u32     rev;
10213                 } ich_chipsets[] = {
10214                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10215                           PCI_ANY_ID },
10216                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10217                           PCI_ANY_ID },
10218                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10219                           0xa },
10220                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10221                           PCI_ANY_ID },
10222                         { },
10223                 };
10224                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10225                 struct pci_dev *bridge = NULL;
10226
10227                 while (pci_id->vendor != 0) {
10228                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10229                                                 bridge);
10230                         if (!bridge) {
10231                                 pci_id++;
10232                                 continue;
10233                         }
10234                         if (pci_id->rev != PCI_ANY_ID) {
10235                                 u8 rev;
10236
10237                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10238                                                      &rev);
10239                                 if (rev > pci_id->rev)
10240                                         continue;
10241                         }
10242                         if (bridge->subordinate &&
10243                             (bridge->subordinate->number ==
10244                              tp->pdev->bus->number)) {
10245
10246                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10247                                 pci_dev_put(bridge);
10248                                 break;
10249                         }
10250                 }
10251         }
10252
10253         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10254          * DMA addresses > 40-bit. This bridge may have other additional
10255          * 57xx devices behind it in some 4-port NIC designs for example.
10256          * Any tg3 device found behind the bridge will also need the 40-bit
10257          * DMA workaround.
10258          */
10259         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10261                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10262                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10263                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10264         }
10265         else {
10266                 struct pci_dev *bridge = NULL;
10267
10268                 do {
10269                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10270                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10271                                                 bridge);
10272                         if (bridge && bridge->subordinate &&
10273                             (bridge->subordinate->number <=
10274                              tp->pdev->bus->number) &&
10275                             (bridge->subordinate->subordinate >=
10276                              tp->pdev->bus->number)) {
10277                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10278                                 pci_dev_put(bridge);
10279                                 break;
10280                         }
10281                 } while (bridge);
10282         }
10283
10284         /* Initialize misc host control in PCI block. */
10285         tp->misc_host_ctrl |= (misc_ctrl_reg &
10286                                MISC_HOST_CTRL_CHIPREV);
10287         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10288                                tp->misc_host_ctrl);
10289
10290         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10291                               &cacheline_sz_reg);
10292
10293         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10294         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10295         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10296         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10297
10298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10299             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10300             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10301             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10302             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10303                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10304
10305         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10306             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10307                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10308
10309         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10311                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10312                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10313                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10314                 } else {
10315                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10316                                           TG3_FLG2_HW_TSO_1_BUG;
10317                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10318                                 ASIC_REV_5750 &&
10319                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10320                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10321                 }
10322         }
10323
10324         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10325             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10326             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10327             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10328             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10329                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10330
10331         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10332                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10333
10334         /* If we have an AMD 762 or VIA K8T800 chipset, write
10335          * reordering to the mailbox registers done by the host
10336          * controller can cause major troubles.  We read back from
10337          * every mailbox register write to force the writes to be
10338          * posted to the chip in order.
10339          */
10340         if (pci_dev_present(write_reorder_chipsets) &&
10341             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10342                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10343
10344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10345             tp->pci_lat_timer < 64) {
10346                 tp->pci_lat_timer = 64;
10347
10348                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10349                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10350                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10351                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10352
10353                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10354                                        cacheline_sz_reg);
10355         }
10356
10357         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10358                               &pci_state_reg);
10359
10360         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10361                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10362
10363                 /* If this is a 5700 BX chipset, and we are in PCI-X
10364                  * mode, enable register write workaround.
10365                  *
10366                  * The workaround is to use indirect register accesses
10367                  * for all chip writes not to mailbox registers.
10368                  */
10369                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10370                         u32 pm_reg;
10371                         u16 pci_cmd;
10372
10373                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10374
10375                         /* The chip can have it's power management PCI config
10376                          * space registers clobbered due to this bug.
10377                          * So explicitly force the chip into D0 here.
10378                          */
10379                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10380                                               &pm_reg);
10381                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10382                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10383                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10384                                                pm_reg);
10385
10386                         /* Also, force SERR#/PERR# in PCI command. */
10387                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10388                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10389                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10390                 }
10391         }
10392
10393         /* 5700 BX chips need to have their TX producer index mailboxes
10394          * written twice to workaround a bug.
10395          */
10396         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10397                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10398
10399         /* Back to back register writes can cause problems on this chip,
10400          * the workaround is to read back all reg writes except those to
10401          * mailbox regs.  See tg3_write_indirect_reg32().
10402          *
10403          * PCI Express 5750_A0 rev chips need this workaround too.
10404          */
10405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10406             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10407              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10408                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10409
10410         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10411                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10412         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10413                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10414
10415         /* Chip-specific fixup from Broadcom driver */
10416         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10417             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10418                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10419                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10420         }
10421
10422         /* Default fast path register access methods */
10423         tp->read32 = tg3_read32;
10424         tp->write32 = tg3_write32;
10425         tp->read32_mbox = tg3_read32;
10426         tp->write32_mbox = tg3_write32;
10427         tp->write32_tx_mbox = tg3_write32;
10428         tp->write32_rx_mbox = tg3_write32;
10429
10430         /* Various workaround register access methods */
10431         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10432                 tp->write32 = tg3_write_indirect_reg32;
10433         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10434                 tp->write32 = tg3_write_flush_reg32;
10435
10436         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10437             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10438                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10439                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10440                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10441         }
10442
10443         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10444                 tp->read32 = tg3_read_indirect_reg32;
10445                 tp->write32 = tg3_write_indirect_reg32;
10446                 tp->read32_mbox = tg3_read_indirect_mbox;
10447                 tp->write32_mbox = tg3_write_indirect_mbox;
10448                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10449                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10450
10451                 iounmap(tp->regs);
10452                 tp->regs = NULL;
10453
10454                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10455                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10456                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10457         }
10458
10459         if (tp->write32 == tg3_write_indirect_reg32 ||
10460             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10461              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10462               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10463                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10464
10465         /* Get eeprom hw config before calling tg3_set_power_state().
10466          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10467          * determined before calling tg3_set_power_state() so that
10468          * we know whether or not to switch out of Vaux power.
10469          * When the flag is set, it means that GPIO1 is used for eeprom
10470          * write protect and also implies that it is a LOM where GPIOs
10471          * are not used to switch power.
10472          */
10473         tg3_get_eeprom_hw_cfg(tp);
10474
10475         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10476          * GPIO1 driven high will bring 5700's external PHY out of reset.
10477          * It is also used as eeprom write protect on LOMs.
10478          */
10479         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10481             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10482                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10483                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10484         /* Unused GPIO3 must be driven as output on 5752 because there
10485          * are no pull-up resistors on unused GPIO pins.
10486          */
10487         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10488                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10489
10490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10491                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10492
10493         /* Force the chip into D0. */
10494         err = tg3_set_power_state(tp, PCI_D0);
10495         if (err) {
10496                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10497                        pci_name(tp->pdev));
10498                 return err;
10499         }
10500
10501         /* 5700 B0 chips do not support checksumming correctly due
10502          * to hardware bugs.
10503          */
10504         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10505                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10506
10507         /* Derive initial jumbo mode from MTU assigned in
10508          * ether_setup() via the alloc_etherdev() call
10509          */
10510         if (tp->dev->mtu > ETH_DATA_LEN &&
10511             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10512                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10513
10514         /* Determine WakeOnLan speed to use. */
10515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10516             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10517             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10518             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10519                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10520         } else {
10521                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10522         }
10523
10524         /* A few boards don't want Ethernet@WireSpeed phy feature */
10525         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10526             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10527              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10528              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10529             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10530                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10531
10532         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10533             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10534                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10535         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10536                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10537
10538         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10539                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10541                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10542                 else
10543                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10544         }
10545
10546         tp->coalesce_mode = 0;
10547         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10548             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10549                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10550
10551         /* Initialize MAC MI mode, polling disabled. */
10552         tw32_f(MAC_MI_MODE, tp->mi_mode);
10553         udelay(80);
10554
10555         /* Initialize data/descriptor byte/word swapping. */
10556         val = tr32(GRC_MODE);
10557         val &= GRC_MODE_HOST_STACKUP;
10558         tw32(GRC_MODE, val | tp->grc_mode);
10559
10560         tg3_switch_clocks(tp);
10561
10562         /* Clear this out for sanity. */
10563         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10564
10565         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10566                               &pci_state_reg);
10567         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10568             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10569                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10570
10571                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10572                     chiprevid == CHIPREV_ID_5701_B0 ||
10573                     chiprevid == CHIPREV_ID_5701_B2 ||
10574                     chiprevid == CHIPREV_ID_5701_B5) {
10575                         void __iomem *sram_base;
10576
10577                         /* Write some dummy words into the SRAM status block
10578                          * area, see if it reads back correctly.  If the return
10579                          * value is bad, force enable the PCIX workaround.
10580                          */
10581                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10582
10583                         writel(0x00000000, sram_base);
10584                         writel(0x00000000, sram_base + 4);
10585                         writel(0xffffffff, sram_base + 4);
10586                         if (readl(sram_base) != 0x00000000)
10587                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10588                 }
10589         }
10590
10591         udelay(50);
10592         tg3_nvram_init(tp);
10593
10594         grc_misc_cfg = tr32(GRC_MISC_CFG);
10595         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10596
10597         /* Broadcom's driver says that CIOBE multisplit has a bug */
10598 #if 0
10599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10600             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10601                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10602                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10603         }
10604 #endif
10605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10606             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10607              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10608                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10609
10610         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10611             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10612                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10613         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10614                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10615                                       HOSTCC_MODE_CLRTICK_TXBD);
10616
10617                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10618                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10619                                        tp->misc_host_ctrl);
10620         }
10621
10622         /* these are limited to 10/100 only */
10623         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10624              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10625             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10626              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10627              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10628               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10629               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10630             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10631              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10632               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10633                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10634
10635         err = tg3_phy_probe(tp);
10636         if (err) {
10637                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10638                        pci_name(tp->pdev), err);
10639                 /* ... but do not return immediately ... */
10640         }
10641
10642         tg3_read_partno(tp);
10643         tg3_read_fw_ver(tp);
10644
10645         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10646                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10647         } else {
10648                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10649                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10650                 else
10651                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10652         }
10653
10654         /* 5700 {AX,BX} chips have a broken status block link
10655          * change bit implementation, so we must use the
10656          * status register in those cases.
10657          */
10658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10659                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10660         else
10661                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10662
10663         /* The led_ctrl is set during tg3_phy_probe, here we might
10664          * have to force the link status polling mechanism based
10665          * upon subsystem IDs.
10666          */
10667         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10668             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10669                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10670                                   TG3_FLAG_USE_LINKCHG_REG);
10671         }
10672
10673         /* For all SERDES we poll the MAC status register. */
10674         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10675                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10676         else
10677                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10678
10679         /* All chips before 5787 can get confused if TX buffers
10680          * straddle the 4GB address boundary in some cases.
10681          */
10682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10684                 tp->dev->hard_start_xmit = tg3_start_xmit;
10685         else
10686                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10687
10688         tp->rx_offset = 2;
10689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10690             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10691                 tp->rx_offset = 0;
10692
10693         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10694
10695         /* Increment the rx prod index on the rx std ring by at most
10696          * 8 for these chips to workaround hw errata.
10697          */
10698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10699             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10701                 tp->rx_std_max_post = 8;
10702
10703         /* By default, disable wake-on-lan.  User can change this
10704          * using ETHTOOL_SWOL.
10705          */
10706         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10707
10708         return err;
10709 }
10710
10711 #ifdef CONFIG_SPARC64
10712 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10713 {
10714         struct net_device *dev = tp->dev;
10715         struct pci_dev *pdev = tp->pdev;
10716         struct pcidev_cookie *pcp = pdev->sysdata;
10717
10718         if (pcp != NULL) {
10719                 unsigned char *addr;
10720                 int len;
10721
10722                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10723                                         &len);
10724                 if (addr && len == 6) {
10725                         memcpy(dev->dev_addr, addr, 6);
10726                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10727                         return 0;
10728                 }
10729         }
10730         return -ENODEV;
10731 }
10732
10733 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10734 {
10735         struct net_device *dev = tp->dev;
10736
10737         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10738         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10739         return 0;
10740 }
10741 #endif
10742
10743 static int __devinit tg3_get_device_address(struct tg3 *tp)
10744 {
10745         struct net_device *dev = tp->dev;
10746         u32 hi, lo, mac_offset;
10747         int addr_ok = 0;
10748
10749 #ifdef CONFIG_SPARC64
10750         if (!tg3_get_macaddr_sparc(tp))
10751                 return 0;
10752 #endif
10753
10754         mac_offset = 0x7c;
10755         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10756             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10757                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10758                         mac_offset = 0xcc;
10759                 if (tg3_nvram_lock(tp))
10760                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10761                 else
10762                         tg3_nvram_unlock(tp);
10763         }
10764
10765         /* First try to get it from MAC address mailbox. */
10766         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10767         if ((hi >> 16) == 0x484b) {
10768                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10769                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10770
10771                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10772                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10773                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10774                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10775                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10776
10777                 /* Some old bootcode may report a 0 MAC address in SRAM */
10778                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10779         }
10780         if (!addr_ok) {
10781                 /* Next, try NVRAM. */
10782                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10783                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10784                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10785                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10786                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10787                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10788                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10789                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10790                 }
10791                 /* Finally just fetch it out of the MAC control regs. */
10792                 else {
10793                         hi = tr32(MAC_ADDR_0_HIGH);
10794                         lo = tr32(MAC_ADDR_0_LOW);
10795
10796                         dev->dev_addr[5] = lo & 0xff;
10797                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10798                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10799                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10800                         dev->dev_addr[1] = hi & 0xff;
10801                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10802                 }
10803         }
10804
10805         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10806 #ifdef CONFIG_SPARC64
10807                 if (!tg3_get_default_macaddr_sparc(tp))
10808                         return 0;
10809 #endif
10810                 return -EINVAL;
10811         }
10812         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10813         return 0;
10814 }
10815
10816 #define BOUNDARY_SINGLE_CACHELINE       1
10817 #define BOUNDARY_MULTI_CACHELINE        2
10818
10819 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10820 {
10821         int cacheline_size;
10822         u8 byte;
10823         int goal;
10824
10825         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10826         if (byte == 0)
10827                 cacheline_size = 1024;
10828         else
10829                 cacheline_size = (int) byte * 4;
10830
10831         /* On 5703 and later chips, the boundary bits have no
10832          * effect.
10833          */
10834         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10835             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10836             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10837                 goto out;
10838
10839 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10840         goal = BOUNDARY_MULTI_CACHELINE;
10841 #else
10842 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10843         goal = BOUNDARY_SINGLE_CACHELINE;
10844 #else
10845         goal = 0;
10846 #endif
10847 #endif
10848
10849         if (!goal)
10850                 goto out;
10851
10852         /* PCI controllers on most RISC systems tend to disconnect
10853          * when a device tries to burst across a cache-line boundary.
10854          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10855          *
10856          * Unfortunately, for PCI-E there are only limited
10857          * write-side controls for this, and thus for reads
10858          * we will still get the disconnects.  We'll also waste
10859          * these PCI cycles for both read and write for chips
10860          * other than 5700 and 5701 which do not implement the
10861          * boundary bits.
10862          */
10863         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10864             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10865                 switch (cacheline_size) {
10866                 case 16:
10867                 case 32:
10868                 case 64:
10869                 case 128:
10870                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10871                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10872                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10873                         } else {
10874                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10875                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10876                         }
10877                         break;
10878
10879                 case 256:
10880                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10881                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10882                         break;
10883
10884                 default:
10885                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10886                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10887                         break;
10888                 };
10889         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10890                 switch (cacheline_size) {
10891                 case 16:
10892                 case 32:
10893                 case 64:
10894                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10895                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10896                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10897                                 break;
10898                         }
10899                         /* fallthrough */
10900                 case 128:
10901                 default:
10902                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10903                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10904                         break;
10905                 };
10906         } else {
10907                 switch (cacheline_size) {
10908                 case 16:
10909                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10910                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10911                                         DMA_RWCTRL_WRITE_BNDRY_16);
10912                                 break;
10913                         }
10914                         /* fallthrough */
10915                 case 32:
10916                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10917                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10918                                         DMA_RWCTRL_WRITE_BNDRY_32);
10919                                 break;
10920                         }
10921                         /* fallthrough */
10922                 case 64:
10923                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10924                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10925                                         DMA_RWCTRL_WRITE_BNDRY_64);
10926                                 break;
10927                         }
10928                         /* fallthrough */
10929                 case 128:
10930                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10931                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10932                                         DMA_RWCTRL_WRITE_BNDRY_128);
10933                                 break;
10934                         }
10935                         /* fallthrough */
10936                 case 256:
10937                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10938                                 DMA_RWCTRL_WRITE_BNDRY_256);
10939                         break;
10940                 case 512:
10941                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10942                                 DMA_RWCTRL_WRITE_BNDRY_512);
10943                         break;
10944                 case 1024:
10945                 default:
10946                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10947                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10948                         break;
10949                 };
10950         }
10951
10952 out:
10953         return val;
10954 }
10955
10956 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10957 {
10958         struct tg3_internal_buffer_desc test_desc;
10959         u32 sram_dma_descs;
10960         int i, ret;
10961
10962         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10963
10964         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10965         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10966         tw32(RDMAC_STATUS, 0);
10967         tw32(WDMAC_STATUS, 0);
10968
10969         tw32(BUFMGR_MODE, 0);
10970         tw32(FTQ_RESET, 0);
10971
10972         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10973         test_desc.addr_lo = buf_dma & 0xffffffff;
10974         test_desc.nic_mbuf = 0x00002100;
10975         test_desc.len = size;
10976
10977         /*
10978          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10979          * the *second* time the tg3 driver was getting loaded after an
10980          * initial scan.
10981          *
10982          * Broadcom tells me:
10983          *   ...the DMA engine is connected to the GRC block and a DMA
10984          *   reset may affect the GRC block in some unpredictable way...
10985          *   The behavior of resets to individual blocks has not been tested.
10986          *
10987          * Broadcom noted the GRC reset will also reset all sub-components.
10988          */
10989         if (to_device) {
10990                 test_desc.cqid_sqid = (13 << 8) | 2;
10991
10992                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10993                 udelay(40);
10994         } else {
10995                 test_desc.cqid_sqid = (16 << 8) | 7;
10996
10997                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10998                 udelay(40);
10999         }
11000         test_desc.flags = 0x00000005;
11001
11002         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11003                 u32 val;
11004
11005                 val = *(((u32 *)&test_desc) + i);
11006                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11007                                        sram_dma_descs + (i * sizeof(u32)));
11008                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11009         }
11010         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11011
11012         if (to_device) {
11013                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11014         } else {
11015                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11016         }
11017
11018         ret = -ENODEV;
11019         for (i = 0; i < 40; i++) {
11020                 u32 val;
11021
11022                 if (to_device)
11023                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11024                 else
11025                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11026                 if ((val & 0xffff) == sram_dma_descs) {
11027                         ret = 0;
11028                         break;
11029                 }
11030
11031                 udelay(100);
11032         }
11033
11034         return ret;
11035 }
11036
11037 #define TEST_BUFFER_SIZE        0x2000
11038
11039 static int __devinit tg3_test_dma(struct tg3 *tp)
11040 {
11041         dma_addr_t buf_dma;
11042         u32 *buf, saved_dma_rwctrl;
11043         int ret;
11044
11045         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11046         if (!buf) {
11047                 ret = -ENOMEM;
11048                 goto out_nofree;
11049         }
11050
11051         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11052                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11053
11054         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11055
11056         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11057                 /* DMA read watermark not used on PCIE */
11058                 tp->dma_rwctrl |= 0x00180000;
11059         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11062                         tp->dma_rwctrl |= 0x003f0000;
11063                 else
11064                         tp->dma_rwctrl |= 0x003f000f;
11065         } else {
11066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11067                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11068                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11069
11070                         /* If the 5704 is behind the EPB bridge, we can
11071                          * do the less restrictive ONE_DMA workaround for
11072                          * better performance.
11073                          */
11074                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11075                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11076                                 tp->dma_rwctrl |= 0x8000;
11077                         else if (ccval == 0x6 || ccval == 0x7)
11078                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11079
11080                         /* Set bit 23 to enable PCIX hw bug fix */
11081                         tp->dma_rwctrl |= 0x009f0000;
11082                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11083                         /* 5780 always in PCIX mode */
11084                         tp->dma_rwctrl |= 0x00144000;
11085                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11086                         /* 5714 always in PCIX mode */
11087                         tp->dma_rwctrl |= 0x00148000;
11088                 } else {
11089                         tp->dma_rwctrl |= 0x001b000f;
11090                 }
11091         }
11092
11093         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11094             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11095                 tp->dma_rwctrl &= 0xfffffff0;
11096
11097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11099                 /* Remove this if it causes problems for some boards. */
11100                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11101
11102                 /* On 5700/5701 chips, we need to set this bit.
11103                  * Otherwise the chip will issue cacheline transactions
11104                  * to streamable DMA memory with not all the byte
11105                  * enables turned on.  This is an error on several
11106                  * RISC PCI controllers, in particular sparc64.
11107                  *
11108                  * On 5703/5704 chips, this bit has been reassigned
11109                  * a different meaning.  In particular, it is used
11110                  * on those chips to enable a PCI-X workaround.
11111                  */
11112                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11113         }
11114
11115         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11116
11117 #if 0
11118         /* Unneeded, already done by tg3_get_invariants.  */
11119         tg3_switch_clocks(tp);
11120 #endif
11121
11122         ret = 0;
11123         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11124             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11125                 goto out;
11126
11127         /* It is best to perform DMA test with maximum write burst size
11128          * to expose the 5700/5701 write DMA bug.
11129          */
11130         saved_dma_rwctrl = tp->dma_rwctrl;
11131         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11132         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11133
11134         while (1) {
11135                 u32 *p = buf, i;
11136
11137                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11138                         p[i] = i;
11139
11140                 /* Send the buffer to the chip. */
11141                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11142                 if (ret) {
11143                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11144                         break;
11145                 }
11146
11147 #if 0
11148                 /* validate data reached card RAM correctly. */
11149                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11150                         u32 val;
11151                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11152                         if (le32_to_cpu(val) != p[i]) {
11153                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11154                                 /* ret = -ENODEV here? */
11155                         }
11156                         p[i] = 0;
11157                 }
11158 #endif
11159                 /* Now read it back. */
11160                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11161                 if (ret) {
11162                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11163
11164                         break;
11165                 }
11166
11167                 /* Verify it. */
11168                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11169                         if (p[i] == i)
11170                                 continue;
11171
11172                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11173                             DMA_RWCTRL_WRITE_BNDRY_16) {
11174                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11175                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11176                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11177                                 break;
11178                         } else {
11179                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11180                                 ret = -ENODEV;
11181                                 goto out;
11182                         }
11183                 }
11184
11185                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11186                         /* Success. */
11187                         ret = 0;
11188                         break;
11189                 }
11190         }
11191         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11192             DMA_RWCTRL_WRITE_BNDRY_16) {
11193                 static struct pci_device_id dma_wait_state_chipsets[] = {
11194                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11195                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11196                         { },
11197                 };
11198
11199                 /* DMA test passed without adjusting DMA boundary,
11200                  * now look for chipsets that are known to expose the
11201                  * DMA bug without failing the test.
11202                  */
11203                 if (pci_dev_present(dma_wait_state_chipsets)) {
11204                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11205                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11206                 }
11207                 else
11208                         /* Safe to use the calculated DMA boundary. */
11209                         tp->dma_rwctrl = saved_dma_rwctrl;
11210
11211                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11212         }
11213
11214 out:
11215         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11216 out_nofree:
11217         return ret;
11218 }
11219
11220 static void __devinit tg3_init_link_config(struct tg3 *tp)
11221 {
11222         tp->link_config.advertising =
11223                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11224                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11225                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11226                  ADVERTISED_Autoneg | ADVERTISED_MII);
11227         tp->link_config.speed = SPEED_INVALID;
11228         tp->link_config.duplex = DUPLEX_INVALID;
11229         tp->link_config.autoneg = AUTONEG_ENABLE;
11230         tp->link_config.active_speed = SPEED_INVALID;
11231         tp->link_config.active_duplex = DUPLEX_INVALID;
11232         tp->link_config.phy_is_low_power = 0;
11233         tp->link_config.orig_speed = SPEED_INVALID;
11234         tp->link_config.orig_duplex = DUPLEX_INVALID;
11235         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11236 }
11237
11238 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11239 {
11240         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11241                 tp->bufmgr_config.mbuf_read_dma_low_water =
11242                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11243                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11244                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11245                 tp->bufmgr_config.mbuf_high_water =
11246                         DEFAULT_MB_HIGH_WATER_5705;
11247
11248                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11249                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11250                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11251                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11252                 tp->bufmgr_config.mbuf_high_water_jumbo =
11253                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11254         } else {
11255                 tp->bufmgr_config.mbuf_read_dma_low_water =
11256                         DEFAULT_MB_RDMA_LOW_WATER;
11257                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11258                         DEFAULT_MB_MACRX_LOW_WATER;
11259                 tp->bufmgr_config.mbuf_high_water =
11260                         DEFAULT_MB_HIGH_WATER;
11261
11262                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11263                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11264                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11265                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11266                 tp->bufmgr_config.mbuf_high_water_jumbo =
11267                         DEFAULT_MB_HIGH_WATER_JUMBO;
11268         }
11269
11270         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11271         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11272 }
11273
11274 static char * __devinit tg3_phy_string(struct tg3 *tp)
11275 {
11276         switch (tp->phy_id & PHY_ID_MASK) {
11277         case PHY_ID_BCM5400:    return "5400";
11278         case PHY_ID_BCM5401:    return "5401";
11279         case PHY_ID_BCM5411:    return "5411";
11280         case PHY_ID_BCM5701:    return "5701";
11281         case PHY_ID_BCM5703:    return "5703";
11282         case PHY_ID_BCM5704:    return "5704";
11283         case PHY_ID_BCM5705:    return "5705";
11284         case PHY_ID_BCM5750:    return "5750";
11285         case PHY_ID_BCM5752:    return "5752";
11286         case PHY_ID_BCM5714:    return "5714";
11287         case PHY_ID_BCM5780:    return "5780";
11288         case PHY_ID_BCM5755:    return "5755";
11289         case PHY_ID_BCM5787:    return "5787";
11290         case PHY_ID_BCM5756:    return "5722/5756";
11291         case PHY_ID_BCM8002:    return "8002/serdes";
11292         case 0:                 return "serdes";
11293         default:                return "unknown";
11294         };
11295 }
11296
11297 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11298 {
11299         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11300                 strcpy(str, "PCI Express");
11301                 return str;
11302         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11303                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11304
11305                 strcpy(str, "PCIX:");
11306
11307                 if ((clock_ctrl == 7) ||
11308                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11309                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11310                         strcat(str, "133MHz");
11311                 else if (clock_ctrl == 0)
11312                         strcat(str, "33MHz");
11313                 else if (clock_ctrl == 2)
11314                         strcat(str, "50MHz");
11315                 else if (clock_ctrl == 4)
11316                         strcat(str, "66MHz");
11317                 else if (clock_ctrl == 6)
11318                         strcat(str, "100MHz");
11319         } else {
11320                 strcpy(str, "PCI:");
11321                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11322                         strcat(str, "66MHz");
11323                 else
11324                         strcat(str, "33MHz");
11325         }
11326         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11327                 strcat(str, ":32-bit");
11328         else
11329                 strcat(str, ":64-bit");
11330         return str;
11331 }
11332
11333 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11334 {
11335         struct pci_dev *peer;
11336         unsigned int func, devnr = tp->pdev->devfn & ~7;
11337
11338         for (func = 0; func < 8; func++) {
11339                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11340                 if (peer && peer != tp->pdev)
11341                         break;
11342                 pci_dev_put(peer);
11343         }
11344         /* 5704 can be configured in single-port mode, set peer to
11345          * tp->pdev in that case.
11346          */
11347         if (!peer) {
11348                 peer = tp->pdev;
11349                 return peer;
11350         }
11351
11352         /*
11353          * We don't need to keep the refcount elevated; there's no way
11354          * to remove one half of this device without removing the other
11355          */
11356         pci_dev_put(peer);
11357
11358         return peer;
11359 }
11360
11361 static void __devinit tg3_init_coal(struct tg3 *tp)
11362 {
11363         struct ethtool_coalesce *ec = &tp->coal;
11364
11365         memset(ec, 0, sizeof(*ec));
11366         ec->cmd = ETHTOOL_GCOALESCE;
11367         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11368         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11369         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11370         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11371         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11372         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11373         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11374         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11375         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11376
11377         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11378                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11379                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11380                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11381                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11382                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11383         }
11384
11385         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11386                 ec->rx_coalesce_usecs_irq = 0;
11387                 ec->tx_coalesce_usecs_irq = 0;
11388                 ec->stats_block_coalesce_usecs = 0;
11389         }
11390 }
11391
11392 static int __devinit tg3_init_one(struct pci_dev *pdev,
11393                                   const struct pci_device_id *ent)
11394 {
11395         static int tg3_version_printed = 0;
11396         unsigned long tg3reg_base, tg3reg_len;
11397         struct net_device *dev;
11398         struct tg3 *tp;
11399         int i, err, pm_cap;
11400         char str[40];
11401         u64 dma_mask, persist_dma_mask;
11402
11403         if (tg3_version_printed++ == 0)
11404                 printk(KERN_INFO "%s", version);
11405
11406         err = pci_enable_device(pdev);
11407         if (err) {
11408                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11409                        "aborting.\n");
11410                 return err;
11411         }
11412
11413         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11414                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11415                        "base address, aborting.\n");
11416                 err = -ENODEV;
11417                 goto err_out_disable_pdev;
11418         }
11419
11420         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11421         if (err) {
11422                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11423                        "aborting.\n");
11424                 goto err_out_disable_pdev;
11425         }
11426
11427         pci_set_master(pdev);
11428
11429         /* Find power-management capability. */
11430         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11431         if (pm_cap == 0) {
11432                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11433                        "aborting.\n");
11434                 err = -EIO;
11435                 goto err_out_free_res;
11436         }
11437
11438         tg3reg_base = pci_resource_start(pdev, 0);
11439         tg3reg_len = pci_resource_len(pdev, 0);
11440
11441         dev = alloc_etherdev(sizeof(*tp));
11442         if (!dev) {
11443                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11444                 err = -ENOMEM;
11445                 goto err_out_free_res;
11446         }
11447
11448         SET_MODULE_OWNER(dev);
11449         SET_NETDEV_DEV(dev, &pdev->dev);
11450
11451 #if TG3_VLAN_TAG_USED
11452         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11453         dev->vlan_rx_register = tg3_vlan_rx_register;
11454         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11455 #endif
11456
11457         tp = netdev_priv(dev);
11458         tp->pdev = pdev;
11459         tp->dev = dev;
11460         tp->pm_cap = pm_cap;
11461         tp->mac_mode = TG3_DEF_MAC_MODE;
11462         tp->rx_mode = TG3_DEF_RX_MODE;
11463         tp->tx_mode = TG3_DEF_TX_MODE;
11464         tp->mi_mode = MAC_MI_MODE_BASE;
11465         if (tg3_debug > 0)
11466                 tp->msg_enable = tg3_debug;
11467         else
11468                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11469
11470         /* The word/byte swap controls here control register access byte
11471          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11472          * setting below.
11473          */
11474         tp->misc_host_ctrl =
11475                 MISC_HOST_CTRL_MASK_PCI_INT |
11476                 MISC_HOST_CTRL_WORD_SWAP |
11477                 MISC_HOST_CTRL_INDIR_ACCESS |
11478                 MISC_HOST_CTRL_PCISTATE_RW;
11479
11480         /* The NONFRM (non-frame) byte/word swap controls take effect
11481          * on descriptor entries, anything which isn't packet data.
11482          *
11483          * The StrongARM chips on the board (one for tx, one for rx)
11484          * are running in big-endian mode.
11485          */
11486         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11487                         GRC_MODE_WSWAP_NONFRM_DATA);
11488 #ifdef __BIG_ENDIAN
11489         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11490 #endif
11491         spin_lock_init(&tp->lock);
11492         spin_lock_init(&tp->indirect_lock);
11493         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11494
11495         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11496         if (tp->regs == 0UL) {
11497                 printk(KERN_ERR PFX "Cannot map device registers, "
11498                        "aborting.\n");
11499                 err = -ENOMEM;
11500                 goto err_out_free_dev;
11501         }
11502
11503         tg3_init_link_config(tp);
11504
11505         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11506         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11507         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11508
11509         dev->open = tg3_open;
11510         dev->stop = tg3_close;
11511         dev->get_stats = tg3_get_stats;
11512         dev->set_multicast_list = tg3_set_rx_mode;
11513         dev->set_mac_address = tg3_set_mac_addr;
11514         dev->do_ioctl = tg3_ioctl;
11515         dev->tx_timeout = tg3_tx_timeout;
11516         dev->poll = tg3_poll;
11517         dev->ethtool_ops = &tg3_ethtool_ops;
11518         dev->weight = 64;
11519         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11520         dev->change_mtu = tg3_change_mtu;
11521         dev->irq = pdev->irq;
11522 #ifdef CONFIG_NET_POLL_CONTROLLER
11523         dev->poll_controller = tg3_poll_controller;
11524 #endif
11525
11526         err = tg3_get_invariants(tp);
11527         if (err) {
11528                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11529                        "aborting.\n");
11530                 goto err_out_iounmap;
11531         }
11532
11533         /* The EPB bridge inside 5714, 5715, and 5780 and any
11534          * device behind the EPB cannot support DMA addresses > 40-bit.
11535          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11536          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11537          * do DMA address check in tg3_start_xmit().
11538          */
11539         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11540                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11541         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11542                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11543 #ifdef CONFIG_HIGHMEM
11544                 dma_mask = DMA_64BIT_MASK;
11545 #endif
11546         } else
11547                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11548
11549         /* Configure DMA attributes. */
11550         if (dma_mask > DMA_32BIT_MASK) {
11551                 err = pci_set_dma_mask(pdev, dma_mask);
11552                 if (!err) {
11553                         dev->features |= NETIF_F_HIGHDMA;
11554                         err = pci_set_consistent_dma_mask(pdev,
11555                                                           persist_dma_mask);
11556                         if (err < 0) {
11557                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11558                                        "DMA for consistent allocations\n");
11559                                 goto err_out_iounmap;
11560                         }
11561                 }
11562         }
11563         if (err || dma_mask == DMA_32BIT_MASK) {
11564                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11565                 if (err) {
11566                         printk(KERN_ERR PFX "No usable DMA configuration, "
11567                                "aborting.\n");
11568                         goto err_out_iounmap;
11569                 }
11570         }
11571
11572         tg3_init_bufmgr_config(tp);
11573
11574 #if TG3_TSO_SUPPORT != 0
11575         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11576                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11577         }
11578         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11580             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11581             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11582                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11583         } else {
11584                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11585         }
11586
11587         /* TSO is on by default on chips that support hardware TSO.
11588          * Firmware TSO on older chips gives lower performance, so it
11589          * is off by default, but can be enabled using ethtool.
11590          */
11591         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11592                 dev->features |= NETIF_F_TSO;
11593                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11594                         dev->features |= NETIF_F_TSO6;
11595         }
11596
11597 #endif
11598
11599         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11600             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11601             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11602                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11603                 tp->rx_pending = 63;
11604         }
11605
11606         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11607             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11608                 tp->pdev_peer = tg3_find_peer(tp);
11609
11610         err = tg3_get_device_address(tp);
11611         if (err) {
11612                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11613                        "aborting.\n");
11614                 goto err_out_iounmap;
11615         }
11616
11617         /*
11618          * Reset chip in case UNDI or EFI driver did not shutdown
11619          * DMA self test will enable WDMAC and we'll see (spurious)
11620          * pending DMA on the PCI bus at that point.
11621          */
11622         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11623             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11624                 pci_save_state(tp->pdev);
11625                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11626                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11627         }
11628
11629         err = tg3_test_dma(tp);
11630         if (err) {
11631                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11632                 goto err_out_iounmap;
11633         }
11634
11635         /* Tigon3 can do ipv4 only... and some chips have buggy
11636          * checksumming.
11637          */
11638         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11639                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11640                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11641                         dev->features |= NETIF_F_HW_CSUM;
11642                 else
11643                         dev->features |= NETIF_F_IP_CSUM;
11644                 dev->features |= NETIF_F_SG;
11645                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11646         } else
11647                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11648
11649         /* flow control autonegotiation is default behavior */
11650         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11651
11652         tg3_init_coal(tp);
11653
11654         /* Now that we have fully setup the chip, save away a snapshot
11655          * of the PCI config space.  We need to restore this after
11656          * GRC_MISC_CFG core clock resets and some resume events.
11657          */
11658         pci_save_state(tp->pdev);
11659
11660         err = register_netdev(dev);
11661         if (err) {
11662                 printk(KERN_ERR PFX "Cannot register net device, "
11663                        "aborting.\n");
11664                 goto err_out_iounmap;
11665         }
11666
11667         pci_set_drvdata(pdev, dev);
11668
11669         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11670                dev->name,
11671                tp->board_part_number,
11672                tp->pci_chip_rev_id,
11673                tg3_phy_string(tp),
11674                tg3_bus_string(tp, str),
11675                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11676
11677         for (i = 0; i < 6; i++)
11678                 printk("%2.2x%c", dev->dev_addr[i],
11679                        i == 5 ? '\n' : ':');
11680
11681         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11682                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11683                "TSOcap[%d] \n",
11684                dev->name,
11685                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11686                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11687                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11688                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11689                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11690                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11691                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11692         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11693                dev->name, tp->dma_rwctrl,
11694                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11695                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11696
11697         netif_carrier_off(tp->dev);
11698
11699         return 0;
11700
11701 err_out_iounmap:
11702         if (tp->regs) {
11703                 iounmap(tp->regs);
11704                 tp->regs = NULL;
11705         }
11706
11707 err_out_free_dev:
11708         free_netdev(dev);
11709
11710 err_out_free_res:
11711         pci_release_regions(pdev);
11712
11713 err_out_disable_pdev:
11714         pci_disable_device(pdev);
11715         pci_set_drvdata(pdev, NULL);
11716         return err;
11717 }
11718
11719 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11720 {
11721         struct net_device *dev = pci_get_drvdata(pdev);
11722
11723         if (dev) {
11724                 struct tg3 *tp = netdev_priv(dev);
11725
11726                 flush_scheduled_work();
11727                 unregister_netdev(dev);
11728                 if (tp->regs) {
11729                         iounmap(tp->regs);
11730                         tp->regs = NULL;
11731                 }
11732                 free_netdev(dev);
11733                 pci_release_regions(pdev);
11734                 pci_disable_device(pdev);
11735                 pci_set_drvdata(pdev, NULL);
11736         }
11737 }
11738
11739 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11740 {
11741         struct net_device *dev = pci_get_drvdata(pdev);
11742         struct tg3 *tp = netdev_priv(dev);
11743         int err;
11744
11745         if (!netif_running(dev))
11746                 return 0;
11747
11748         flush_scheduled_work();
11749         tg3_netif_stop(tp);
11750
11751         del_timer_sync(&tp->timer);
11752
11753         tg3_full_lock(tp, 1);
11754         tg3_disable_ints(tp);
11755         tg3_full_unlock(tp);
11756
11757         netif_device_detach(dev);
11758
11759         tg3_full_lock(tp, 0);
11760         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11761         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11762         tg3_full_unlock(tp);
11763
11764         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11765         if (err) {
11766                 tg3_full_lock(tp, 0);
11767
11768                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11769                 if (tg3_restart_hw(tp, 1))
11770                         goto out;
11771
11772                 tp->timer.expires = jiffies + tp->timer_offset;
11773                 add_timer(&tp->timer);
11774
11775                 netif_device_attach(dev);
11776                 tg3_netif_start(tp);
11777
11778 out:
11779                 tg3_full_unlock(tp);
11780         }
11781
11782         return err;
11783 }
11784
11785 static int tg3_resume(struct pci_dev *pdev)
11786 {
11787         struct net_device *dev = pci_get_drvdata(pdev);
11788         struct tg3 *tp = netdev_priv(dev);
11789         int err;
11790
11791         if (!netif_running(dev))
11792                 return 0;
11793
11794         pci_restore_state(tp->pdev);
11795
11796         err = tg3_set_power_state(tp, PCI_D0);
11797         if (err)
11798                 return err;
11799
11800         netif_device_attach(dev);
11801
11802         tg3_full_lock(tp, 0);
11803
11804         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11805         err = tg3_restart_hw(tp, 1);
11806         if (err)
11807                 goto out;
11808
11809         tp->timer.expires = jiffies + tp->timer_offset;
11810         add_timer(&tp->timer);
11811
11812         tg3_netif_start(tp);
11813
11814 out:
11815         tg3_full_unlock(tp);
11816
11817         return err;
11818 }
11819
11820 static struct pci_driver tg3_driver = {
11821         .name           = DRV_MODULE_NAME,
11822         .id_table       = tg3_pci_tbl,
11823         .probe          = tg3_init_one,
11824         .remove         = __devexit_p(tg3_remove_one),
11825         .suspend        = tg3_suspend,
11826         .resume         = tg3_resume
11827 };
11828
11829 static int __init tg3_init(void)
11830 {
11831         return pci_register_driver(&tg3_driver);
11832 }
11833
11834 static void __exit tg3_cleanup(void)
11835 {
11836         pci_unregister_driver(&tg3_driver);
11837 }
11838
11839 module_init(tg3_init);
11840 module_exit(tg3_cleanup);