]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Use constant for PHY register 0x1e.
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.72"
68 #define DRV_MODULE_RELDATE      "January 8, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
207         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
208         {}
209 };
210
211 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
212
213 static const struct {
214         const char string[ETH_GSTRING_LEN];
215 } ethtool_stats_keys[TG3_NUM_STATS] = {
216         { "rx_octets" },
217         { "rx_fragments" },
218         { "rx_ucast_packets" },
219         { "rx_mcast_packets" },
220         { "rx_bcast_packets" },
221         { "rx_fcs_errors" },
222         { "rx_align_errors" },
223         { "rx_xon_pause_rcvd" },
224         { "rx_xoff_pause_rcvd" },
225         { "rx_mac_ctrl_rcvd" },
226         { "rx_xoff_entered" },
227         { "rx_frame_too_long_errors" },
228         { "rx_jabbers" },
229         { "rx_undersize_packets" },
230         { "rx_in_length_errors" },
231         { "rx_out_length_errors" },
232         { "rx_64_or_less_octet_packets" },
233         { "rx_65_to_127_octet_packets" },
234         { "rx_128_to_255_octet_packets" },
235         { "rx_256_to_511_octet_packets" },
236         { "rx_512_to_1023_octet_packets" },
237         { "rx_1024_to_1522_octet_packets" },
238         { "rx_1523_to_2047_octet_packets" },
239         { "rx_2048_to_4095_octet_packets" },
240         { "rx_4096_to_8191_octet_packets" },
241         { "rx_8192_to_9022_octet_packets" },
242
243         { "tx_octets" },
244         { "tx_collisions" },
245
246         { "tx_xon_sent" },
247         { "tx_xoff_sent" },
248         { "tx_flow_control" },
249         { "tx_mac_errors" },
250         { "tx_single_collisions" },
251         { "tx_mult_collisions" },
252         { "tx_deferred" },
253         { "tx_excessive_collisions" },
254         { "tx_late_collisions" },
255         { "tx_collide_2times" },
256         { "tx_collide_3times" },
257         { "tx_collide_4times" },
258         { "tx_collide_5times" },
259         { "tx_collide_6times" },
260         { "tx_collide_7times" },
261         { "tx_collide_8times" },
262         { "tx_collide_9times" },
263         { "tx_collide_10times" },
264         { "tx_collide_11times" },
265         { "tx_collide_12times" },
266         { "tx_collide_13times" },
267         { "tx_collide_14times" },
268         { "tx_collide_15times" },
269         { "tx_ucast_packets" },
270         { "tx_mcast_packets" },
271         { "tx_bcast_packets" },
272         { "tx_carrier_sense_errors" },
273         { "tx_discards" },
274         { "tx_errors" },
275
276         { "dma_writeq_full" },
277         { "dma_write_prioq_full" },
278         { "rxbds_empty" },
279         { "rx_discards" },
280         { "rx_errors" },
281         { "rx_threshold_hit" },
282
283         { "dma_readq_full" },
284         { "dma_read_prioq_full" },
285         { "tx_comp_queue_full" },
286
287         { "ring_set_send_prod_index" },
288         { "ring_status_update" },
289         { "nic_irqs" },
290         { "nic_avoided_irqs" },
291         { "nic_tx_threshold_hit" }
292 };
293
294 static const struct {
295         const char string[ETH_GSTRING_LEN];
296 } ethtool_test_keys[TG3_NUM_TEST] = {
297         { "nvram test     (online) " },
298         { "link test      (online) " },
299         { "register test  (offline)" },
300         { "memory test    (offline)" },
301         { "loopback test  (offline)" },
302         { "interrupt test (offline)" },
303 };
304
305 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
306 {
307         writel(val, tp->regs + off);
308 }
309
310 static u32 tg3_read32(struct tg3 *tp, u32 off)
311 {
312         return (readl(tp->regs + off));
313 }
314
315 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
316 {
317         unsigned long flags;
318
319         spin_lock_irqsave(&tp->indirect_lock, flags);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322         spin_unlock_irqrestore(&tp->indirect_lock, flags);
323 }
324
325 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
326 {
327         writel(val, tp->regs + off);
328         readl(tp->regs + off);
329 }
330
331 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
332 {
333         unsigned long flags;
334         u32 val;
335
336         spin_lock_irqsave(&tp->indirect_lock, flags);
337         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
338         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
339         spin_unlock_irqrestore(&tp->indirect_lock, flags);
340         return val;
341 }
342
343 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
348                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
349                                        TG3_64BIT_REG_LOW, val);
350                 return;
351         }
352         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
353                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
354                                        TG3_64BIT_REG_LOW, val);
355                 return;
356         }
357
358         spin_lock_irqsave(&tp->indirect_lock, flags);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361         spin_unlock_irqrestore(&tp->indirect_lock, flags);
362
363         /* In indirect mode when disabling interrupts, we also need
364          * to clear the interrupt bit in the GRC local ctrl register.
365          */
366         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
367             (val == 0x1)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
369                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
370         }
371 }
372
373 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
374 {
375         unsigned long flags;
376         u32 val;
377
378         spin_lock_irqsave(&tp->indirect_lock, flags);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382         return val;
383 }
384
385 /* usec_wait specifies the wait time in usec when writing to certain registers
386  * where it is unsafe to read back the register without some delay.
387  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
388  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
389  */
390 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
391 {
392         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
393             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
394                 /* Non-posted methods */
395                 tp->write32(tp, off, val);
396         else {
397                 /* Posted method */
398                 tg3_write32(tp, off, val);
399                 if (usec_wait)
400                         udelay(usec_wait);
401                 tp->read32(tp, off);
402         }
403         /* Wait again after the read for the posted method to guarantee that
404          * the wait time is met.
405          */
406         if (usec_wait)
407                 udelay(usec_wait);
408 }
409
410 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
411 {
412         tp->write32_mbox(tp, off, val);
413         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
414             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415                 tp->read32_mbox(tp, off);
416 }
417
418 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
419 {
420         void __iomem *mbox = tp->regs + off;
421         writel(val, mbox);
422         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
423                 writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
425                 readl(mbox);
426 }
427
428 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
429 {
430         return (readl(tp->regs + off + GRCMBOX_BASE));
431 }
432
433 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
434 {
435         writel(val, tp->regs + off + GRCMBOX_BASE);
436 }
437
438 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
439 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
440 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
441 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
442 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
443
444 #define tw32(reg,val)           tp->write32(tp, reg, val)
445 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
446 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
447 #define tr32(reg)               tp->read32(tp, reg)
448
449 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
454             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
455                 return;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
460                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
461
462                 /* Always leave this as zero. */
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
464         } else {
465                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         }
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
480                 *val = 0;
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489                 /* Always leave this as zero. */
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491         } else {
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493                 *val = tr32(TG3PCI_MEM_WIN_DATA);
494
495                 /* Always leave this as zero. */
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497         }
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_disable_ints(struct tg3 *tp)
502 {
503         tw32(TG3PCI_MISC_HOST_CTRL,
504              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
505         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
506 }
507
508 static inline void tg3_cond_int(struct tg3 *tp)
509 {
510         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
511             (tp->hw_status->status & SD_STATUS_UPDATED))
512                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
513         else
514                 tw32(HOSTCC_MODE, tp->coalesce_mode |
515                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
516 }
517
518 static void tg3_enable_ints(struct tg3 *tp)
519 {
520         tp->irq_sync = 0;
521         wmb();
522
523         tw32(TG3PCI_MISC_HOST_CTRL,
524              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
526                        (tp->last_tag << 24));
527         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
528                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529                                (tp->last_tag << 24));
530         tg3_cond_int(tp);
531 }
532
533 static inline unsigned int tg3_has_work(struct tg3 *tp)
534 {
535         struct tg3_hw_status *sblk = tp->hw_status;
536         unsigned int work_exists = 0;
537
538         /* check for phy events */
539         if (!(tp->tg3_flags &
540               (TG3_FLAG_USE_LINKCHG_REG |
541                TG3_FLAG_POLL_SERDES))) {
542                 if (sblk->status & SD_STATUS_LINK_CHG)
543                         work_exists = 1;
544         }
545         /* check for RX/TX work to do */
546         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
547             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
548                 work_exists = 1;
549
550         return work_exists;
551 }
552
553 /* tg3_restart_ints
554  *  similar to tg3_enable_ints, but it accurately determines whether there
555  *  is new work pending and can return without flushing the PIO write
556  *  which reenables interrupts
557  */
558 static void tg3_restart_ints(struct tg3 *tp)
559 {
560         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                      tp->last_tag << 24);
562         mmiowb();
563
564         /* When doing tagged status, this work check is unnecessary.
565          * The last_tag we write above tells the chip which piece of
566          * work we've completed.
567          */
568         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
569             tg3_has_work(tp))
570                 tw32(HOSTCC_MODE, tp->coalesce_mode |
571                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
572 }
573
574 static inline void tg3_netif_stop(struct tg3 *tp)
575 {
576         tp->dev->trans_start = jiffies; /* prevent tx timeout */
577         netif_poll_disable(tp->dev);
578         netif_tx_disable(tp->dev);
579 }
580
581 static inline void tg3_netif_start(struct tg3 *tp)
582 {
583         netif_wake_queue(tp->dev);
584         /* NOTE: unconditional netif_wake_queue is only appropriate
585          * so long as all callers are assured to have free tx slots
586          * (such as after tg3_init_hw)
587          */
588         netif_poll_enable(tp->dev);
589         tp->hw_status->status |= SD_STATUS_UPDATED;
590         tg3_enable_ints(tp);
591 }
592
593 static void tg3_switch_clocks(struct tg3 *tp)
594 {
595         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
596         u32 orig_clock_ctrl;
597
598         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
599                 return;
600
601         orig_clock_ctrl = clock_ctrl;
602         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
603                        CLOCK_CTRL_CLKRUN_OENABLE |
604                        0x1f);
605         tp->pci_clock_ctrl = clock_ctrl;
606
607         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
608                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
609                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
610                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
611                 }
612         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
613                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                             clock_ctrl |
615                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
616                             40);
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
619                             40);
620         }
621         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
622 }
623
624 #define PHY_BUSY_LOOPS  5000
625
626 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
627 {
628         u32 frame_val;
629         unsigned int loops;
630         int ret;
631
632         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
633                 tw32_f(MAC_MI_MODE,
634                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
635                 udelay(80);
636         }
637
638         *val = 0x0;
639
640         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
641                       MI_COM_PHY_ADDR_MASK);
642         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
643                       MI_COM_REG_ADDR_MASK);
644         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
645
646         tw32_f(MAC_MI_COM, frame_val);
647
648         loops = PHY_BUSY_LOOPS;
649         while (loops != 0) {
650                 udelay(10);
651                 frame_val = tr32(MAC_MI_COM);
652
653                 if ((frame_val & MI_COM_BUSY) == 0) {
654                         udelay(5);
655                         frame_val = tr32(MAC_MI_COM);
656                         break;
657                 }
658                 loops -= 1;
659         }
660
661         ret = -EBUSY;
662         if (loops != 0) {
663                 *val = frame_val & MI_COM_DATA_MASK;
664                 ret = 0;
665         }
666
667         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668                 tw32_f(MAC_MI_MODE, tp->mi_mode);
669                 udelay(80);
670         }
671
672         return ret;
673 }
674
675 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
676 {
677         u32 frame_val;
678         unsigned int loops;
679         int ret;
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
682             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
683                 return 0;
684
685         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
686                 tw32_f(MAC_MI_MODE,
687                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
688                 udelay(80);
689         }
690
691         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692                       MI_COM_PHY_ADDR_MASK);
693         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694                       MI_COM_REG_ADDR_MASK);
695         frame_val |= (val & MI_COM_DATA_MASK);
696         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
697
698         tw32_f(MAC_MI_COM, frame_val);
699
700         loops = PHY_BUSY_LOOPS;
701         while (loops != 0) {
702                 udelay(10);
703                 frame_val = tr32(MAC_MI_COM);
704                 if ((frame_val & MI_COM_BUSY) == 0) {
705                         udelay(5);
706                         frame_val = tr32(MAC_MI_COM);
707                         break;
708                 }
709                 loops -= 1;
710         }
711
712         ret = -EBUSY;
713         if (loops != 0)
714                 ret = 0;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE, tp->mi_mode);
718                 udelay(80);
719         }
720
721         return ret;
722 }
723
724 static void tg3_phy_set_wirespeed(struct tg3 *tp)
725 {
726         u32 val;
727
728         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
729                 return;
730
731         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
732             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
733                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
734                              (val | (1 << 15) | (1 << 4)));
735 }
736
737 static int tg3_bmcr_reset(struct tg3 *tp)
738 {
739         u32 phy_control;
740         int limit, err;
741
742         /* OK, reset it, and poll the BMCR_RESET bit until it
743          * clears or we time out.
744          */
745         phy_control = BMCR_RESET;
746         err = tg3_writephy(tp, MII_BMCR, phy_control);
747         if (err != 0)
748                 return -EBUSY;
749
750         limit = 5000;
751         while (limit--) {
752                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
753                 if (err != 0)
754                         return -EBUSY;
755
756                 if ((phy_control & BMCR_RESET) == 0) {
757                         udelay(40);
758                         break;
759                 }
760                 udelay(10);
761         }
762         if (limit <= 0)
763                 return -EBUSY;
764
765         return 0;
766 }
767
768 static int tg3_wait_macro_done(struct tg3 *tp)
769 {
770         int limit = 100;
771
772         while (limit--) {
773                 u32 tmp32;
774
775                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
776                         if ((tmp32 & 0x1000) == 0)
777                                 break;
778                 }
779         }
780         if (limit <= 0)
781                 return -EBUSY;
782
783         return 0;
784 }
785
786 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
787 {
788         static const u32 test_pat[4][6] = {
789         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
790         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
791         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
792         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
793         };
794         int chan;
795
796         for (chan = 0; chan < 4; chan++) {
797                 int i;
798
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800                              (chan * 0x2000) | 0x0200);
801                 tg3_writephy(tp, 0x16, 0x0002);
802
803                 for (i = 0; i < 6; i++)
804                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
805                                      test_pat[chan][i]);
806
807                 tg3_writephy(tp, 0x16, 0x0202);
808                 if (tg3_wait_macro_done(tp)) {
809                         *resetp = 1;
810                         return -EBUSY;
811                 }
812
813                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
814                              (chan * 0x2000) | 0x0200);
815                 tg3_writephy(tp, 0x16, 0x0082);
816                 if (tg3_wait_macro_done(tp)) {
817                         *resetp = 1;
818                         return -EBUSY;
819                 }
820
821                 tg3_writephy(tp, 0x16, 0x0802);
822                 if (tg3_wait_macro_done(tp)) {
823                         *resetp = 1;
824                         return -EBUSY;
825                 }
826
827                 for (i = 0; i < 6; i += 2) {
828                         u32 low, high;
829
830                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
831                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
832                             tg3_wait_macro_done(tp)) {
833                                 *resetp = 1;
834                                 return -EBUSY;
835                         }
836                         low &= 0x7fff;
837                         high &= 0x000f;
838                         if (low != test_pat[chan][i] ||
839                             high != test_pat[chan][i+1]) {
840                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
841                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
842                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
843
844                                 return -EBUSY;
845                         }
846                 }
847         }
848
849         return 0;
850 }
851
852 static int tg3_phy_reset_chanpat(struct tg3 *tp)
853 {
854         int chan;
855
856         for (chan = 0; chan < 4; chan++) {
857                 int i;
858
859                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
860                              (chan * 0x2000) | 0x0200);
861                 tg3_writephy(tp, 0x16, 0x0002);
862                 for (i = 0; i < 6; i++)
863                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
864                 tg3_writephy(tp, 0x16, 0x0202);
865                 if (tg3_wait_macro_done(tp))
866                         return -EBUSY;
867         }
868
869         return 0;
870 }
871
872 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
873 {
874         u32 reg32, phy9_orig;
875         int retries, do_phy_reset, err;
876
877         retries = 10;
878         do_phy_reset = 1;
879         do {
880                 if (do_phy_reset) {
881                         err = tg3_bmcr_reset(tp);
882                         if (err)
883                                 return err;
884                         do_phy_reset = 0;
885                 }
886
887                 /* Disable transmitter and interrupt.  */
888                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
889                         continue;
890
891                 reg32 |= 0x3000;
892                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
893
894                 /* Set full-duplex, 1000 mbps.  */
895                 tg3_writephy(tp, MII_BMCR,
896                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
897
898                 /* Set to master mode.  */
899                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
900                         continue;
901
902                 tg3_writephy(tp, MII_TG3_CTRL,
903                              (MII_TG3_CTRL_AS_MASTER |
904                               MII_TG3_CTRL_ENABLE_AS_MASTER));
905
906                 /* Enable SM_DSP_CLOCK and 6dB.  */
907                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
908
909                 /* Block the PHY control access.  */
910                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
911                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
912
913                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
914                 if (!err)
915                         break;
916         } while (--retries);
917
918         err = tg3_phy_reset_chanpat(tp);
919         if (err)
920                 return err;
921
922         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
923         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
926         tg3_writephy(tp, 0x16, 0x0000);
927
928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
930                 /* Set Extended packet length bit for jumbo frames */
931                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
932         }
933         else {
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
935         }
936
937         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
938
939         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
940                 reg32 &= ~0x3000;
941                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
942         } else if (!err)
943                 err = -EBUSY;
944
945         return err;
946 }
947
948 static void tg3_link_report(struct tg3 *);
949
950 /* This will reset the tigon3 PHY if there is no valid
951  * link unless the FORCE argument is non-zero.
952  */
953 static int tg3_phy_reset(struct tg3 *tp)
954 {
955         u32 phy_status;
956         int err;
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
959                 u32 val;
960
961                 val = tr32(GRC_MISC_CFG);
962                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
963                 udelay(40);
964         }
965         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
966         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
967         if (err != 0)
968                 return -EBUSY;
969
970         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
971                 netif_carrier_off(tp->dev);
972                 tg3_link_report(tp);
973         }
974
975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
978                 err = tg3_phy_reset_5703_4_5(tp);
979                 if (err)
980                         return err;
981                 goto out;
982         }
983
984         err = tg3_bmcr_reset(tp);
985         if (err)
986                 return err;
987
988 out:
989         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
991                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
992                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
993                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
994                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
996         }
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
998                 tg3_writephy(tp, 0x1c, 0x8d68);
999                 tg3_writephy(tp, 0x1c, 0x8d68);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1002                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1003                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1004                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1005                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1006                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1009                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1010         }
1011         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1012                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1015                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1016                         tg3_writephy(tp, MII_TG3_TEST1,
1017                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1018                 } else
1019                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1020                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1021         }
1022         /* Set Extended packet length bit (bit 14) on all chips that */
1023         /* support jumbo frames */
1024         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1025                 /* Cannot do read-modify-write on 5401 */
1026                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1027         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1028                 u32 phy_reg;
1029
1030                 /* Set bit 14 with read-modify-write to preserve other bits */
1031                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1032                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1033                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1034         }
1035
1036         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1037          * jumbo frames transmission.
1038          */
1039         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1040                 u32 phy_reg;
1041
1042                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1043                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1044                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1045         }
1046
1047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048                 u32 phy_reg;
1049
1050                 /* adjust output voltage */
1051                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054                         u32 phy_reg2;
1055
1056                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058                         /* Enable auto-MDIX */
1059                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062                 }
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_setup_phy(struct tg3 *, int);
1166
1167 #define RESET_KIND_SHUTDOWN     0
1168 #define RESET_KIND_INIT         1
1169 #define RESET_KIND_SUSPEND      2
1170
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1175
1176 static void tg3_power_down_phy(struct tg3 *tp)
1177 {
1178         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1179                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1180                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1181                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1182
1183                         sg_dig_ctrl |=
1184                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1185                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1186                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1187                 }
1188                 return;
1189         }
1190
1191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1192                 u32 val;
1193
1194                 tg3_bmcr_reset(tp);
1195                 val = tr32(GRC_MISC_CFG);
1196                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1197                 udelay(40);
1198                 return;
1199         } else {
1200                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1201                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1202                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1203         }
1204
1205         /* The PHY should not be powered down on some chips because
1206          * of bugs.
1207          */
1208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1209             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1210             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1211              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1212                 return;
1213         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1214 }
1215
1216 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1217 {
1218         u32 misc_host_ctrl;
1219         u16 power_control, power_caps;
1220         int pm = tp->pm_cap;
1221
1222         /* Make sure register accesses (indirect or otherwise)
1223          * will function correctly.
1224          */
1225         pci_write_config_dword(tp->pdev,
1226                                TG3PCI_MISC_HOST_CTRL,
1227                                tp->misc_host_ctrl);
1228
1229         pci_read_config_word(tp->pdev,
1230                              pm + PCI_PM_CTRL,
1231                              &power_control);
1232         power_control |= PCI_PM_CTRL_PME_STATUS;
1233         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1234         switch (state) {
1235         case PCI_D0:
1236                 power_control |= 0;
1237                 pci_write_config_word(tp->pdev,
1238                                       pm + PCI_PM_CTRL,
1239                                       power_control);
1240                 udelay(100);    /* Delay after power state change */
1241
1242                 /* Switch out of Vaux if it is a NIC */
1243                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1244                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1245
1246                 return 0;
1247
1248         case PCI_D1:
1249                 power_control |= 1;
1250                 break;
1251
1252         case PCI_D2:
1253                 power_control |= 2;
1254                 break;
1255
1256         case PCI_D3hot:
1257                 power_control |= 3;
1258                 break;
1259
1260         default:
1261                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1262                        "requested.\n",
1263                        tp->dev->name, state);
1264                 return -EINVAL;
1265         };
1266
1267         power_control |= PCI_PM_CTRL_PME_ENABLE;
1268
1269         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1270         tw32(TG3PCI_MISC_HOST_CTRL,
1271              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1272
1273         if (tp->link_config.phy_is_low_power == 0) {
1274                 tp->link_config.phy_is_low_power = 1;
1275                 tp->link_config.orig_speed = tp->link_config.speed;
1276                 tp->link_config.orig_duplex = tp->link_config.duplex;
1277                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1278         }
1279
1280         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1281                 tp->link_config.speed = SPEED_10;
1282                 tp->link_config.duplex = DUPLEX_HALF;
1283                 tp->link_config.autoneg = AUTONEG_ENABLE;
1284                 tg3_setup_phy(tp, 0);
1285         }
1286
1287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1288                 u32 val;
1289
1290                 val = tr32(GRC_VCPU_EXT_CTRL);
1291                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1292         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1293                 int i;
1294                 u32 val;
1295
1296                 for (i = 0; i < 200; i++) {
1297                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1298                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1299                                 break;
1300                         msleep(1);
1301                 }
1302         }
1303         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1304                                              WOL_DRV_STATE_SHUTDOWN |
1305                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1306
1307         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1308
1309         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1310                 u32 mac_mode;
1311
1312                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1313                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1314                         udelay(40);
1315
1316                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1317                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1318                         else
1319                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1320
1321                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1322                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1323                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1324                 } else {
1325                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1326                 }
1327
1328                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1329                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1330
1331                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1332                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1333                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1334
1335                 tw32_f(MAC_MODE, mac_mode);
1336                 udelay(100);
1337
1338                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1339                 udelay(10);
1340         }
1341
1342         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1343             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1344              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1345                 u32 base_val;
1346
1347                 base_val = tp->pci_clock_ctrl;
1348                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1349                              CLOCK_CTRL_TXCLK_DISABLE);
1350
1351                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1352                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1353         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1354                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1355                 /* do nothing */
1356         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1357                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1358                 u32 newbits1, newbits2;
1359
1360                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1361                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1362                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1363                                     CLOCK_CTRL_TXCLK_DISABLE |
1364                                     CLOCK_CTRL_ALTCLK);
1365                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1366                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1367                         newbits1 = CLOCK_CTRL_625_CORE;
1368                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1369                 } else {
1370                         newbits1 = CLOCK_CTRL_ALTCLK;
1371                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1372                 }
1373
1374                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1375                             40);
1376
1377                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1378                             40);
1379
1380                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1381                         u32 newbits3;
1382
1383                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1384                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1385                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1386                                             CLOCK_CTRL_TXCLK_DISABLE |
1387                                             CLOCK_CTRL_44MHZ_CORE);
1388                         } else {
1389                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1390                         }
1391
1392                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1393                                     tp->pci_clock_ctrl | newbits3, 40);
1394                 }
1395         }
1396
1397         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1398             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1399                 tg3_power_down_phy(tp);
1400
1401         tg3_frob_aux_power(tp);
1402
1403         /* Workaround for unstable PLL clock */
1404         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1405             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1406                 u32 val = tr32(0x7d00);
1407
1408                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1409                 tw32(0x7d00, val);
1410                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1411                         int err;
1412
1413                         err = tg3_nvram_lock(tp);
1414                         tg3_halt_cpu(tp, RX_CPU_BASE);
1415                         if (!err)
1416                                 tg3_nvram_unlock(tp);
1417                 }
1418         }
1419
1420         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1421
1422         /* Finally, set the new power state. */
1423         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1424         udelay(100);    /* Delay after power state change */
1425
1426         return 0;
1427 }
1428
1429 static void tg3_link_report(struct tg3 *tp)
1430 {
1431         if (!netif_carrier_ok(tp->dev)) {
1432                 if (netif_msg_link(tp))
1433                         printk(KERN_INFO PFX "%s: Link is down.\n",
1434                                tp->dev->name);
1435         } else if (netif_msg_link(tp)) {
1436                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1437                        tp->dev->name,
1438                        (tp->link_config.active_speed == SPEED_1000 ?
1439                         1000 :
1440                         (tp->link_config.active_speed == SPEED_100 ?
1441                          100 : 10)),
1442                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1443                         "full" : "half"));
1444
1445                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1446                        "%s for RX.\n",
1447                        tp->dev->name,
1448                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1449                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1450         }
1451 }
1452
1453 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1454 {
1455         u32 new_tg3_flags = 0;
1456         u32 old_rx_mode = tp->rx_mode;
1457         u32 old_tx_mode = tp->tx_mode;
1458
1459         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1460
1461                 /* Convert 1000BaseX flow control bits to 1000BaseT
1462                  * bits before resolving flow control.
1463                  */
1464                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1465                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1466                                        ADVERTISE_PAUSE_ASYM);
1467                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1468
1469                         if (local_adv & ADVERTISE_1000XPAUSE)
1470                                 local_adv |= ADVERTISE_PAUSE_CAP;
1471                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1472                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1473                         if (remote_adv & LPA_1000XPAUSE)
1474                                 remote_adv |= LPA_PAUSE_CAP;
1475                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1476                                 remote_adv |= LPA_PAUSE_ASYM;
1477                 }
1478
1479                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1480                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1481                                 if (remote_adv & LPA_PAUSE_CAP)
1482                                         new_tg3_flags |=
1483                                                 (TG3_FLAG_RX_PAUSE |
1484                                                 TG3_FLAG_TX_PAUSE);
1485                                 else if (remote_adv & LPA_PAUSE_ASYM)
1486                                         new_tg3_flags |=
1487                                                 (TG3_FLAG_RX_PAUSE);
1488                         } else {
1489                                 if (remote_adv & LPA_PAUSE_CAP)
1490                                         new_tg3_flags |=
1491                                                 (TG3_FLAG_RX_PAUSE |
1492                                                 TG3_FLAG_TX_PAUSE);
1493                         }
1494                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1495                         if ((remote_adv & LPA_PAUSE_CAP) &&
1496                         (remote_adv & LPA_PAUSE_ASYM))
1497                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1498                 }
1499
1500                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1501                 tp->tg3_flags |= new_tg3_flags;
1502         } else {
1503                 new_tg3_flags = tp->tg3_flags;
1504         }
1505
1506         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1507                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1508         else
1509                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1510
1511         if (old_rx_mode != tp->rx_mode) {
1512                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1513         }
1514
1515         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1516                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1517         else
1518                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1519
1520         if (old_tx_mode != tp->tx_mode) {
1521                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1522         }
1523 }
1524
1525 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1526 {
1527         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1528         case MII_TG3_AUX_STAT_10HALF:
1529                 *speed = SPEED_10;
1530                 *duplex = DUPLEX_HALF;
1531                 break;
1532
1533         case MII_TG3_AUX_STAT_10FULL:
1534                 *speed = SPEED_10;
1535                 *duplex = DUPLEX_FULL;
1536                 break;
1537
1538         case MII_TG3_AUX_STAT_100HALF:
1539                 *speed = SPEED_100;
1540                 *duplex = DUPLEX_HALF;
1541                 break;
1542
1543         case MII_TG3_AUX_STAT_100FULL:
1544                 *speed = SPEED_100;
1545                 *duplex = DUPLEX_FULL;
1546                 break;
1547
1548         case MII_TG3_AUX_STAT_1000HALF:
1549                 *speed = SPEED_1000;
1550                 *duplex = DUPLEX_HALF;
1551                 break;
1552
1553         case MII_TG3_AUX_STAT_1000FULL:
1554                 *speed = SPEED_1000;
1555                 *duplex = DUPLEX_FULL;
1556                 break;
1557
1558         default:
1559                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1560                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1561                                  SPEED_10;
1562                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1563                                   DUPLEX_HALF;
1564                         break;
1565                 }
1566                 *speed = SPEED_INVALID;
1567                 *duplex = DUPLEX_INVALID;
1568                 break;
1569         };
1570 }
1571
1572 static void tg3_phy_copper_begin(struct tg3 *tp)
1573 {
1574         u32 new_adv;
1575         int i;
1576
1577         if (tp->link_config.phy_is_low_power) {
1578                 /* Entering low power mode.  Disable gigabit and
1579                  * 100baseT advertisements.
1580                  */
1581                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1582
1583                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1584                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1585                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1586                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1587
1588                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1589         } else if (tp->link_config.speed == SPEED_INVALID) {
1590                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1591                         tp->link_config.advertising &=
1592                                 ~(ADVERTISED_1000baseT_Half |
1593                                   ADVERTISED_1000baseT_Full);
1594
1595                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1596                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1597                         new_adv |= ADVERTISE_10HALF;
1598                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1599                         new_adv |= ADVERTISE_10FULL;
1600                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1601                         new_adv |= ADVERTISE_100HALF;
1602                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1603                         new_adv |= ADVERTISE_100FULL;
1604                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1605
1606                 if (tp->link_config.advertising &
1607                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1608                         new_adv = 0;
1609                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1610                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1611                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1612                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1613                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1614                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1615                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1616                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1617                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1618                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1619                 } else {
1620                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1621                 }
1622         } else {
1623                 /* Asking for a specific link mode. */
1624                 if (tp->link_config.speed == SPEED_1000) {
1625                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1626                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1627
1628                         if (tp->link_config.duplex == DUPLEX_FULL)
1629                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1630                         else
1631                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1632                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1633                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1634                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1635                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1636                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1637                 } else {
1638                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1639
1640                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1641                         if (tp->link_config.speed == SPEED_100) {
1642                                 if (tp->link_config.duplex == DUPLEX_FULL)
1643                                         new_adv |= ADVERTISE_100FULL;
1644                                 else
1645                                         new_adv |= ADVERTISE_100HALF;
1646                         } else {
1647                                 if (tp->link_config.duplex == DUPLEX_FULL)
1648                                         new_adv |= ADVERTISE_10FULL;
1649                                 else
1650                                         new_adv |= ADVERTISE_10HALF;
1651                         }
1652                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1653                 }
1654         }
1655
1656         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1657             tp->link_config.speed != SPEED_INVALID) {
1658                 u32 bmcr, orig_bmcr;
1659
1660                 tp->link_config.active_speed = tp->link_config.speed;
1661                 tp->link_config.active_duplex = tp->link_config.duplex;
1662
1663                 bmcr = 0;
1664                 switch (tp->link_config.speed) {
1665                 default:
1666                 case SPEED_10:
1667                         break;
1668
1669                 case SPEED_100:
1670                         bmcr |= BMCR_SPEED100;
1671                         break;
1672
1673                 case SPEED_1000:
1674                         bmcr |= TG3_BMCR_SPEED1000;
1675                         break;
1676                 };
1677
1678                 if (tp->link_config.duplex == DUPLEX_FULL)
1679                         bmcr |= BMCR_FULLDPLX;
1680
1681                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1682                     (bmcr != orig_bmcr)) {
1683                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1684                         for (i = 0; i < 1500; i++) {
1685                                 u32 tmp;
1686
1687                                 udelay(10);
1688                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1689                                     tg3_readphy(tp, MII_BMSR, &tmp))
1690                                         continue;
1691                                 if (!(tmp & BMSR_LSTATUS)) {
1692                                         udelay(40);
1693                                         break;
1694                                 }
1695                         }
1696                         tg3_writephy(tp, MII_BMCR, bmcr);
1697                         udelay(40);
1698                 }
1699         } else {
1700                 tg3_writephy(tp, MII_BMCR,
1701                              BMCR_ANENABLE | BMCR_ANRESTART);
1702         }
1703 }
1704
1705 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1706 {
1707         int err;
1708
1709         /* Turn off tap power management. */
1710         /* Set Extended packet length bit */
1711         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1712
1713         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1714         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1715
1716         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1717         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1718
1719         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1720         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1721
1722         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1723         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1724
1725         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1726         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1727
1728         udelay(40);
1729
1730         return err;
1731 }
1732
1733 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1734 {
1735         u32 adv_reg, all_mask = 0;
1736
1737         if (mask & ADVERTISED_10baseT_Half)
1738                 all_mask |= ADVERTISE_10HALF;
1739         if (mask & ADVERTISED_10baseT_Full)
1740                 all_mask |= ADVERTISE_10FULL;
1741         if (mask & ADVERTISED_100baseT_Half)
1742                 all_mask |= ADVERTISE_100HALF;
1743         if (mask & ADVERTISED_100baseT_Full)
1744                 all_mask |= ADVERTISE_100FULL;
1745
1746         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1747                 return 0;
1748
1749         if ((adv_reg & all_mask) != all_mask)
1750                 return 0;
1751         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1752                 u32 tg3_ctrl;
1753
1754                 all_mask = 0;
1755                 if (mask & ADVERTISED_1000baseT_Half)
1756                         all_mask |= ADVERTISE_1000HALF;
1757                 if (mask & ADVERTISED_1000baseT_Full)
1758                         all_mask |= ADVERTISE_1000FULL;
1759
1760                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1761                         return 0;
1762
1763                 if ((tg3_ctrl & all_mask) != all_mask)
1764                         return 0;
1765         }
1766         return 1;
1767 }
1768
1769 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1770 {
1771         int current_link_up;
1772         u32 bmsr, dummy;
1773         u16 current_speed;
1774         u8 current_duplex;
1775         int i, err;
1776
1777         tw32(MAC_EVENT, 0);
1778
1779         tw32_f(MAC_STATUS,
1780              (MAC_STATUS_SYNC_CHANGED |
1781               MAC_STATUS_CFG_CHANGED |
1782               MAC_STATUS_MI_COMPLETION |
1783               MAC_STATUS_LNKSTATE_CHANGED));
1784         udelay(40);
1785
1786         tp->mi_mode = MAC_MI_MODE_BASE;
1787         tw32_f(MAC_MI_MODE, tp->mi_mode);
1788         udelay(80);
1789
1790         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1791
1792         /* Some third-party PHYs need to be reset on link going
1793          * down.
1794          */
1795         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1796              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1797              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1798             netif_carrier_ok(tp->dev)) {
1799                 tg3_readphy(tp, MII_BMSR, &bmsr);
1800                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1801                     !(bmsr & BMSR_LSTATUS))
1802                         force_reset = 1;
1803         }
1804         if (force_reset)
1805                 tg3_phy_reset(tp);
1806
1807         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1808                 tg3_readphy(tp, MII_BMSR, &bmsr);
1809                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1810                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1811                         bmsr = 0;
1812
1813                 if (!(bmsr & BMSR_LSTATUS)) {
1814                         err = tg3_init_5401phy_dsp(tp);
1815                         if (err)
1816                                 return err;
1817
1818                         tg3_readphy(tp, MII_BMSR, &bmsr);
1819                         for (i = 0; i < 1000; i++) {
1820                                 udelay(10);
1821                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1822                                     (bmsr & BMSR_LSTATUS)) {
1823                                         udelay(40);
1824                                         break;
1825                                 }
1826                         }
1827
1828                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1829                             !(bmsr & BMSR_LSTATUS) &&
1830                             tp->link_config.active_speed == SPEED_1000) {
1831                                 err = tg3_phy_reset(tp);
1832                                 if (!err)
1833                                         err = tg3_init_5401phy_dsp(tp);
1834                                 if (err)
1835                                         return err;
1836                         }
1837                 }
1838         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1839                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1840                 /* 5701 {A0,B0} CRC bug workaround */
1841                 tg3_writephy(tp, 0x15, 0x0a75);
1842                 tg3_writephy(tp, 0x1c, 0x8c68);
1843                 tg3_writephy(tp, 0x1c, 0x8d68);
1844                 tg3_writephy(tp, 0x1c, 0x8c68);
1845         }
1846
1847         /* Clear pending interrupts... */
1848         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1849         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1850
1851         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1852                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1853         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1854                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1855
1856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1858                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1859                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1860                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1861                 else
1862                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1863         }
1864
1865         current_link_up = 0;
1866         current_speed = SPEED_INVALID;
1867         current_duplex = DUPLEX_INVALID;
1868
1869         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1870                 u32 val;
1871
1872                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1873                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1874                 if (!(val & (1 << 10))) {
1875                         val |= (1 << 10);
1876                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1877                         goto relink;
1878                 }
1879         }
1880
1881         bmsr = 0;
1882         for (i = 0; i < 100; i++) {
1883                 tg3_readphy(tp, MII_BMSR, &bmsr);
1884                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1885                     (bmsr & BMSR_LSTATUS))
1886                         break;
1887                 udelay(40);
1888         }
1889
1890         if (bmsr & BMSR_LSTATUS) {
1891                 u32 aux_stat, bmcr;
1892
1893                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1894                 for (i = 0; i < 2000; i++) {
1895                         udelay(10);
1896                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1897                             aux_stat)
1898                                 break;
1899                 }
1900
1901                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1902                                              &current_speed,
1903                                              &current_duplex);
1904
1905                 bmcr = 0;
1906                 for (i = 0; i < 200; i++) {
1907                         tg3_readphy(tp, MII_BMCR, &bmcr);
1908                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1909                                 continue;
1910                         if (bmcr && bmcr != 0x7fff)
1911                                 break;
1912                         udelay(10);
1913                 }
1914
1915                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1916                         if (bmcr & BMCR_ANENABLE) {
1917                                 current_link_up = 1;
1918
1919                                 /* Force autoneg restart if we are exiting
1920                                  * low power mode.
1921                                  */
1922                                 if (!tg3_copper_is_advertising_all(tp,
1923                                                 tp->link_config.advertising))
1924                                         current_link_up = 0;
1925                         } else {
1926                                 current_link_up = 0;
1927                         }
1928                 } else {
1929                         if (!(bmcr & BMCR_ANENABLE) &&
1930                             tp->link_config.speed == current_speed &&
1931                             tp->link_config.duplex == current_duplex) {
1932                                 current_link_up = 1;
1933                         } else {
1934                                 current_link_up = 0;
1935                         }
1936                 }
1937
1938                 tp->link_config.active_speed = current_speed;
1939                 tp->link_config.active_duplex = current_duplex;
1940         }
1941
1942         if (current_link_up == 1 &&
1943             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1944             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1945                 u32 local_adv, remote_adv;
1946
1947                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1948                         local_adv = 0;
1949                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1950
1951                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1952                         remote_adv = 0;
1953
1954                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1955
1956                 /* If we are not advertising full pause capability,
1957                  * something is wrong.  Bring the link down and reconfigure.
1958                  */
1959                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1960                         current_link_up = 0;
1961                 } else {
1962                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1963                 }
1964         }
1965 relink:
1966         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1967                 u32 tmp;
1968
1969                 tg3_phy_copper_begin(tp);
1970
1971                 tg3_readphy(tp, MII_BMSR, &tmp);
1972                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1973                     (tmp & BMSR_LSTATUS))
1974                         current_link_up = 1;
1975         }
1976
1977         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1978         if (current_link_up == 1) {
1979                 if (tp->link_config.active_speed == SPEED_100 ||
1980                     tp->link_config.active_speed == SPEED_10)
1981                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1982                 else
1983                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1984         } else
1985                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1988         if (tp->link_config.active_duplex == DUPLEX_HALF)
1989                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1990
1991         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1993                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1994                     (current_link_up == 1 &&
1995                      tp->link_config.active_speed == SPEED_10))
1996                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1997         } else {
1998                 if (current_link_up == 1)
1999                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2000         }
2001
2002         /* ??? Without this setting Netgear GA302T PHY does not
2003          * ??? send/receive packets...
2004          */
2005         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2006             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2007                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2008                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2009                 udelay(80);
2010         }
2011
2012         tw32_f(MAC_MODE, tp->mac_mode);
2013         udelay(40);
2014
2015         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2016                 /* Polled via timer. */
2017                 tw32_f(MAC_EVENT, 0);
2018         } else {
2019                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2020         }
2021         udelay(40);
2022
2023         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2024             current_link_up == 1 &&
2025             tp->link_config.active_speed == SPEED_1000 &&
2026             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2027              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2028                 udelay(120);
2029                 tw32_f(MAC_STATUS,
2030                      (MAC_STATUS_SYNC_CHANGED |
2031                       MAC_STATUS_CFG_CHANGED));
2032                 udelay(40);
2033                 tg3_write_mem(tp,
2034                               NIC_SRAM_FIRMWARE_MBOX,
2035                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2036         }
2037
2038         if (current_link_up != netif_carrier_ok(tp->dev)) {
2039                 if (current_link_up)
2040                         netif_carrier_on(tp->dev);
2041                 else
2042                         netif_carrier_off(tp->dev);
2043                 tg3_link_report(tp);
2044         }
2045
2046         return 0;
2047 }
2048
2049 struct tg3_fiber_aneginfo {
2050         int state;
2051 #define ANEG_STATE_UNKNOWN              0
2052 #define ANEG_STATE_AN_ENABLE            1
2053 #define ANEG_STATE_RESTART_INIT         2
2054 #define ANEG_STATE_RESTART              3
2055 #define ANEG_STATE_DISABLE_LINK_OK      4
2056 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2057 #define ANEG_STATE_ABILITY_DETECT       6
2058 #define ANEG_STATE_ACK_DETECT_INIT      7
2059 #define ANEG_STATE_ACK_DETECT           8
2060 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2061 #define ANEG_STATE_COMPLETE_ACK         10
2062 #define ANEG_STATE_IDLE_DETECT_INIT     11
2063 #define ANEG_STATE_IDLE_DETECT          12
2064 #define ANEG_STATE_LINK_OK              13
2065 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2066 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2067
2068         u32 flags;
2069 #define MR_AN_ENABLE            0x00000001
2070 #define MR_RESTART_AN           0x00000002
2071 #define MR_AN_COMPLETE          0x00000004
2072 #define MR_PAGE_RX              0x00000008
2073 #define MR_NP_LOADED            0x00000010
2074 #define MR_TOGGLE_TX            0x00000020
2075 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2076 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2077 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2078 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2079 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2080 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2081 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2082 #define MR_TOGGLE_RX            0x00002000
2083 #define MR_NP_RX                0x00004000
2084
2085 #define MR_LINK_OK              0x80000000
2086
2087         unsigned long link_time, cur_time;
2088
2089         u32 ability_match_cfg;
2090         int ability_match_count;
2091
2092         char ability_match, idle_match, ack_match;
2093
2094         u32 txconfig, rxconfig;
2095 #define ANEG_CFG_NP             0x00000080
2096 #define ANEG_CFG_ACK            0x00000040
2097 #define ANEG_CFG_RF2            0x00000020
2098 #define ANEG_CFG_RF1            0x00000010
2099 #define ANEG_CFG_PS2            0x00000001
2100 #define ANEG_CFG_PS1            0x00008000
2101 #define ANEG_CFG_HD             0x00004000
2102 #define ANEG_CFG_FD             0x00002000
2103 #define ANEG_CFG_INVAL          0x00001f06
2104
2105 };
2106 #define ANEG_OK         0
2107 #define ANEG_DONE       1
2108 #define ANEG_TIMER_ENAB 2
2109 #define ANEG_FAILED     -1
2110
2111 #define ANEG_STATE_SETTLE_TIME  10000
2112
2113 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2114                                    struct tg3_fiber_aneginfo *ap)
2115 {
2116         unsigned long delta;
2117         u32 rx_cfg_reg;
2118         int ret;
2119
2120         if (ap->state == ANEG_STATE_UNKNOWN) {
2121                 ap->rxconfig = 0;
2122                 ap->link_time = 0;
2123                 ap->cur_time = 0;
2124                 ap->ability_match_cfg = 0;
2125                 ap->ability_match_count = 0;
2126                 ap->ability_match = 0;
2127                 ap->idle_match = 0;
2128                 ap->ack_match = 0;
2129         }
2130         ap->cur_time++;
2131
2132         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2133                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2134
2135                 if (rx_cfg_reg != ap->ability_match_cfg) {
2136                         ap->ability_match_cfg = rx_cfg_reg;
2137                         ap->ability_match = 0;
2138                         ap->ability_match_count = 0;
2139                 } else {
2140                         if (++ap->ability_match_count > 1) {
2141                                 ap->ability_match = 1;
2142                                 ap->ability_match_cfg = rx_cfg_reg;
2143                         }
2144                 }
2145                 if (rx_cfg_reg & ANEG_CFG_ACK)
2146                         ap->ack_match = 1;
2147                 else
2148                         ap->ack_match = 0;
2149
2150                 ap->idle_match = 0;
2151         } else {
2152                 ap->idle_match = 1;
2153                 ap->ability_match_cfg = 0;
2154                 ap->ability_match_count = 0;
2155                 ap->ability_match = 0;
2156                 ap->ack_match = 0;
2157
2158                 rx_cfg_reg = 0;
2159         }
2160
2161         ap->rxconfig = rx_cfg_reg;
2162         ret = ANEG_OK;
2163
2164         switch(ap->state) {
2165         case ANEG_STATE_UNKNOWN:
2166                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2167                         ap->state = ANEG_STATE_AN_ENABLE;
2168
2169                 /* fallthru */
2170         case ANEG_STATE_AN_ENABLE:
2171                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2172                 if (ap->flags & MR_AN_ENABLE) {
2173                         ap->link_time = 0;
2174                         ap->cur_time = 0;
2175                         ap->ability_match_cfg = 0;
2176                         ap->ability_match_count = 0;
2177                         ap->ability_match = 0;
2178                         ap->idle_match = 0;
2179                         ap->ack_match = 0;
2180
2181                         ap->state = ANEG_STATE_RESTART_INIT;
2182                 } else {
2183                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2184                 }
2185                 break;
2186
2187         case ANEG_STATE_RESTART_INIT:
2188                 ap->link_time = ap->cur_time;
2189                 ap->flags &= ~(MR_NP_LOADED);
2190                 ap->txconfig = 0;
2191                 tw32(MAC_TX_AUTO_NEG, 0);
2192                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2193                 tw32_f(MAC_MODE, tp->mac_mode);
2194                 udelay(40);
2195
2196                 ret = ANEG_TIMER_ENAB;
2197                 ap->state = ANEG_STATE_RESTART;
2198
2199                 /* fallthru */
2200         case ANEG_STATE_RESTART:
2201                 delta = ap->cur_time - ap->link_time;
2202                 if (delta > ANEG_STATE_SETTLE_TIME) {
2203                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2204                 } else {
2205                         ret = ANEG_TIMER_ENAB;
2206                 }
2207                 break;
2208
2209         case ANEG_STATE_DISABLE_LINK_OK:
2210                 ret = ANEG_DONE;
2211                 break;
2212
2213         case ANEG_STATE_ABILITY_DETECT_INIT:
2214                 ap->flags &= ~(MR_TOGGLE_TX);
2215                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2216                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2217                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2218                 tw32_f(MAC_MODE, tp->mac_mode);
2219                 udelay(40);
2220
2221                 ap->state = ANEG_STATE_ABILITY_DETECT;
2222                 break;
2223
2224         case ANEG_STATE_ABILITY_DETECT:
2225                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2226                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2227                 }
2228                 break;
2229
2230         case ANEG_STATE_ACK_DETECT_INIT:
2231                 ap->txconfig |= ANEG_CFG_ACK;
2232                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2233                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2234                 tw32_f(MAC_MODE, tp->mac_mode);
2235                 udelay(40);
2236
2237                 ap->state = ANEG_STATE_ACK_DETECT;
2238
2239                 /* fallthru */
2240         case ANEG_STATE_ACK_DETECT:
2241                 if (ap->ack_match != 0) {
2242                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2243                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2244                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2245                         } else {
2246                                 ap->state = ANEG_STATE_AN_ENABLE;
2247                         }
2248                 } else if (ap->ability_match != 0 &&
2249                            ap->rxconfig == 0) {
2250                         ap->state = ANEG_STATE_AN_ENABLE;
2251                 }
2252                 break;
2253
2254         case ANEG_STATE_COMPLETE_ACK_INIT:
2255                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2256                         ret = ANEG_FAILED;
2257                         break;
2258                 }
2259                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2260                                MR_LP_ADV_HALF_DUPLEX |
2261                                MR_LP_ADV_SYM_PAUSE |
2262                                MR_LP_ADV_ASYM_PAUSE |
2263                                MR_LP_ADV_REMOTE_FAULT1 |
2264                                MR_LP_ADV_REMOTE_FAULT2 |
2265                                MR_LP_ADV_NEXT_PAGE |
2266                                MR_TOGGLE_RX |
2267                                MR_NP_RX);
2268                 if (ap->rxconfig & ANEG_CFG_FD)
2269                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2270                 if (ap->rxconfig & ANEG_CFG_HD)
2271                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2272                 if (ap->rxconfig & ANEG_CFG_PS1)
2273                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2274                 if (ap->rxconfig & ANEG_CFG_PS2)
2275                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2276                 if (ap->rxconfig & ANEG_CFG_RF1)
2277                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2278                 if (ap->rxconfig & ANEG_CFG_RF2)
2279                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2280                 if (ap->rxconfig & ANEG_CFG_NP)
2281                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2282
2283                 ap->link_time = ap->cur_time;
2284
2285                 ap->flags ^= (MR_TOGGLE_TX);
2286                 if (ap->rxconfig & 0x0008)
2287                         ap->flags |= MR_TOGGLE_RX;
2288                 if (ap->rxconfig & ANEG_CFG_NP)
2289                         ap->flags |= MR_NP_RX;
2290                 ap->flags |= MR_PAGE_RX;
2291
2292                 ap->state = ANEG_STATE_COMPLETE_ACK;
2293                 ret = ANEG_TIMER_ENAB;
2294                 break;
2295
2296         case ANEG_STATE_COMPLETE_ACK:
2297                 if (ap->ability_match != 0 &&
2298                     ap->rxconfig == 0) {
2299                         ap->state = ANEG_STATE_AN_ENABLE;
2300                         break;
2301                 }
2302                 delta = ap->cur_time - ap->link_time;
2303                 if (delta > ANEG_STATE_SETTLE_TIME) {
2304                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2305                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2306                         } else {
2307                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2308                                     !(ap->flags & MR_NP_RX)) {
2309                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2310                                 } else {
2311                                         ret = ANEG_FAILED;
2312                                 }
2313                         }
2314                 }
2315                 break;
2316
2317         case ANEG_STATE_IDLE_DETECT_INIT:
2318                 ap->link_time = ap->cur_time;
2319                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2320                 tw32_f(MAC_MODE, tp->mac_mode);
2321                 udelay(40);
2322
2323                 ap->state = ANEG_STATE_IDLE_DETECT;
2324                 ret = ANEG_TIMER_ENAB;
2325                 break;
2326
2327         case ANEG_STATE_IDLE_DETECT:
2328                 if (ap->ability_match != 0 &&
2329                     ap->rxconfig == 0) {
2330                         ap->state = ANEG_STATE_AN_ENABLE;
2331                         break;
2332                 }
2333                 delta = ap->cur_time - ap->link_time;
2334                 if (delta > ANEG_STATE_SETTLE_TIME) {
2335                         /* XXX another gem from the Broadcom driver :( */
2336                         ap->state = ANEG_STATE_LINK_OK;
2337                 }
2338                 break;
2339
2340         case ANEG_STATE_LINK_OK:
2341                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2342                 ret = ANEG_DONE;
2343                 break;
2344
2345         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2346                 /* ??? unimplemented */
2347                 break;
2348
2349         case ANEG_STATE_NEXT_PAGE_WAIT:
2350                 /* ??? unimplemented */
2351                 break;
2352
2353         default:
2354                 ret = ANEG_FAILED;
2355                 break;
2356         };
2357
2358         return ret;
2359 }
2360
2361 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2362 {
2363         int res = 0;
2364         struct tg3_fiber_aneginfo aninfo;
2365         int status = ANEG_FAILED;
2366         unsigned int tick;
2367         u32 tmp;
2368
2369         tw32_f(MAC_TX_AUTO_NEG, 0);
2370
2371         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2372         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2373         udelay(40);
2374
2375         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2376         udelay(40);
2377
2378         memset(&aninfo, 0, sizeof(aninfo));
2379         aninfo.flags |= MR_AN_ENABLE;
2380         aninfo.state = ANEG_STATE_UNKNOWN;
2381         aninfo.cur_time = 0;
2382         tick = 0;
2383         while (++tick < 195000) {
2384                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2385                 if (status == ANEG_DONE || status == ANEG_FAILED)
2386                         break;
2387
2388                 udelay(1);
2389         }
2390
2391         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2392         tw32_f(MAC_MODE, tp->mac_mode);
2393         udelay(40);
2394
2395         *flags = aninfo.flags;
2396
2397         if (status == ANEG_DONE &&
2398             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2399                              MR_LP_ADV_FULL_DUPLEX)))
2400                 res = 1;
2401
2402         return res;
2403 }
2404
2405 static void tg3_init_bcm8002(struct tg3 *tp)
2406 {
2407         u32 mac_status = tr32(MAC_STATUS);
2408         int i;
2409
2410         /* Reset when initting first time or we have a link. */
2411         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2412             !(mac_status & MAC_STATUS_PCS_SYNCED))
2413                 return;
2414
2415         /* Set PLL lock range. */
2416         tg3_writephy(tp, 0x16, 0x8007);
2417
2418         /* SW reset */
2419         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2420
2421         /* Wait for reset to complete. */
2422         /* XXX schedule_timeout() ... */
2423         for (i = 0; i < 500; i++)
2424                 udelay(10);
2425
2426         /* Config mode; select PMA/Ch 1 regs. */
2427         tg3_writephy(tp, 0x10, 0x8411);
2428
2429         /* Enable auto-lock and comdet, select txclk for tx. */
2430         tg3_writephy(tp, 0x11, 0x0a10);
2431
2432         tg3_writephy(tp, 0x18, 0x00a0);
2433         tg3_writephy(tp, 0x16, 0x41ff);
2434
2435         /* Assert and deassert POR. */
2436         tg3_writephy(tp, 0x13, 0x0400);
2437         udelay(40);
2438         tg3_writephy(tp, 0x13, 0x0000);
2439
2440         tg3_writephy(tp, 0x11, 0x0a50);
2441         udelay(40);
2442         tg3_writephy(tp, 0x11, 0x0a10);
2443
2444         /* Wait for signal to stabilize */
2445         /* XXX schedule_timeout() ... */
2446         for (i = 0; i < 15000; i++)
2447                 udelay(10);
2448
2449         /* Deselect the channel register so we can read the PHYID
2450          * later.
2451          */
2452         tg3_writephy(tp, 0x10, 0x8011);
2453 }
2454
2455 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2456 {
2457         u32 sg_dig_ctrl, sg_dig_status;
2458         u32 serdes_cfg, expected_sg_dig_ctrl;
2459         int workaround, port_a;
2460         int current_link_up;
2461
2462         serdes_cfg = 0;
2463         expected_sg_dig_ctrl = 0;
2464         workaround = 0;
2465         port_a = 1;
2466         current_link_up = 0;
2467
2468         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2469             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2470                 workaround = 1;
2471                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2472                         port_a = 0;
2473
2474                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2475                 /* preserve bits 20-23 for voltage regulator */
2476                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2477         }
2478
2479         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2480
2481         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2482                 if (sg_dig_ctrl & (1 << 31)) {
2483                         if (workaround) {
2484                                 u32 val = serdes_cfg;
2485
2486                                 if (port_a)
2487                                         val |= 0xc010000;
2488                                 else
2489                                         val |= 0x4010000;
2490                                 tw32_f(MAC_SERDES_CFG, val);
2491                         }
2492                         tw32_f(SG_DIG_CTRL, 0x01388400);
2493                 }
2494                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2495                         tg3_setup_flow_control(tp, 0, 0);
2496                         current_link_up = 1;
2497                 }
2498                 goto out;
2499         }
2500
2501         /* Want auto-negotiation.  */
2502         expected_sg_dig_ctrl = 0x81388400;
2503
2504         /* Pause capability */
2505         expected_sg_dig_ctrl |= (1 << 11);
2506
2507         /* Asymettric pause */
2508         expected_sg_dig_ctrl |= (1 << 12);
2509
2510         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2511                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2512                     tp->serdes_counter &&
2513                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2514                                     MAC_STATUS_RCVD_CFG)) ==
2515                      MAC_STATUS_PCS_SYNCED)) {
2516                         tp->serdes_counter--;
2517                         current_link_up = 1;
2518                         goto out;
2519                 }
2520 restart_autoneg:
2521                 if (workaround)
2522                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2523                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2524                 udelay(5);
2525                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2526
2527                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2528                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2529         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2530                                  MAC_STATUS_SIGNAL_DET)) {
2531                 sg_dig_status = tr32(SG_DIG_STATUS);
2532                 mac_status = tr32(MAC_STATUS);
2533
2534                 if ((sg_dig_status & (1 << 1)) &&
2535                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2536                         u32 local_adv, remote_adv;
2537
2538                         local_adv = ADVERTISE_PAUSE_CAP;
2539                         remote_adv = 0;
2540                         if (sg_dig_status & (1 << 19))
2541                                 remote_adv |= LPA_PAUSE_CAP;
2542                         if (sg_dig_status & (1 << 20))
2543                                 remote_adv |= LPA_PAUSE_ASYM;
2544
2545                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2546                         current_link_up = 1;
2547                         tp->serdes_counter = 0;
2548                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2549                 } else if (!(sg_dig_status & (1 << 1))) {
2550                         if (tp->serdes_counter)
2551                                 tp->serdes_counter--;
2552                         else {
2553                                 if (workaround) {
2554                                         u32 val = serdes_cfg;
2555
2556                                         if (port_a)
2557                                                 val |= 0xc010000;
2558                                         else
2559                                                 val |= 0x4010000;
2560
2561                                         tw32_f(MAC_SERDES_CFG, val);
2562                                 }
2563
2564                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2565                                 udelay(40);
2566
2567                                 /* Link parallel detection - link is up */
2568                                 /* only if we have PCS_SYNC and not */
2569                                 /* receiving config code words */
2570                                 mac_status = tr32(MAC_STATUS);
2571                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2572                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2573                                         tg3_setup_flow_control(tp, 0, 0);
2574                                         current_link_up = 1;
2575                                         tp->tg3_flags2 |=
2576                                                 TG3_FLG2_PARALLEL_DETECT;
2577                                         tp->serdes_counter =
2578                                                 SERDES_PARALLEL_DET_TIMEOUT;
2579                                 } else
2580                                         goto restart_autoneg;
2581                         }
2582                 }
2583         } else {
2584                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2585                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2586         }
2587
2588 out:
2589         return current_link_up;
2590 }
2591
2592 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2593 {
2594         int current_link_up = 0;
2595
2596         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2597                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2598                 goto out;
2599         }
2600
2601         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2602                 u32 flags;
2603                 int i;
2604
2605                 if (fiber_autoneg(tp, &flags)) {
2606                         u32 local_adv, remote_adv;
2607
2608                         local_adv = ADVERTISE_PAUSE_CAP;
2609                         remote_adv = 0;
2610                         if (flags & MR_LP_ADV_SYM_PAUSE)
2611                                 remote_adv |= LPA_PAUSE_CAP;
2612                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2613                                 remote_adv |= LPA_PAUSE_ASYM;
2614
2615                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2616
2617                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2618                         current_link_up = 1;
2619                 }
2620                 for (i = 0; i < 30; i++) {
2621                         udelay(20);
2622                         tw32_f(MAC_STATUS,
2623                                (MAC_STATUS_SYNC_CHANGED |
2624                                 MAC_STATUS_CFG_CHANGED));
2625                         udelay(40);
2626                         if ((tr32(MAC_STATUS) &
2627                              (MAC_STATUS_SYNC_CHANGED |
2628                               MAC_STATUS_CFG_CHANGED)) == 0)
2629                                 break;
2630                 }
2631
2632                 mac_status = tr32(MAC_STATUS);
2633                 if (current_link_up == 0 &&
2634                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2635                     !(mac_status & MAC_STATUS_RCVD_CFG))
2636                         current_link_up = 1;
2637         } else {
2638                 /* Forcing 1000FD link up. */
2639                 current_link_up = 1;
2640                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2641
2642                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2643                 udelay(40);
2644         }
2645
2646 out:
2647         return current_link_up;
2648 }
2649
2650 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2651 {
2652         u32 orig_pause_cfg;
2653         u16 orig_active_speed;
2654         u8 orig_active_duplex;
2655         u32 mac_status;
2656         int current_link_up;
2657         int i;
2658
2659         orig_pause_cfg =
2660                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2661                                   TG3_FLAG_TX_PAUSE));
2662         orig_active_speed = tp->link_config.active_speed;
2663         orig_active_duplex = tp->link_config.active_duplex;
2664
2665         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2666             netif_carrier_ok(tp->dev) &&
2667             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2668                 mac_status = tr32(MAC_STATUS);
2669                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2670                                MAC_STATUS_SIGNAL_DET |
2671                                MAC_STATUS_CFG_CHANGED |
2672                                MAC_STATUS_RCVD_CFG);
2673                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2674                                    MAC_STATUS_SIGNAL_DET)) {
2675                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2676                                             MAC_STATUS_CFG_CHANGED));
2677                         return 0;
2678                 }
2679         }
2680
2681         tw32_f(MAC_TX_AUTO_NEG, 0);
2682
2683         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2684         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2685         tw32_f(MAC_MODE, tp->mac_mode);
2686         udelay(40);
2687
2688         if (tp->phy_id == PHY_ID_BCM8002)
2689                 tg3_init_bcm8002(tp);
2690
2691         /* Enable link change event even when serdes polling.  */
2692         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2693         udelay(40);
2694
2695         current_link_up = 0;
2696         mac_status = tr32(MAC_STATUS);
2697
2698         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2699                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2700         else
2701                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2702
2703         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2704         tw32_f(MAC_MODE, tp->mac_mode);
2705         udelay(40);
2706
2707         tp->hw_status->status =
2708                 (SD_STATUS_UPDATED |
2709                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2710
2711         for (i = 0; i < 100; i++) {
2712                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2713                                     MAC_STATUS_CFG_CHANGED));
2714                 udelay(5);
2715                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2716                                          MAC_STATUS_CFG_CHANGED |
2717                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2718                         break;
2719         }
2720
2721         mac_status = tr32(MAC_STATUS);
2722         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2723                 current_link_up = 0;
2724                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2725                     tp->serdes_counter == 0) {
2726                         tw32_f(MAC_MODE, (tp->mac_mode |
2727                                           MAC_MODE_SEND_CONFIGS));
2728                         udelay(1);
2729                         tw32_f(MAC_MODE, tp->mac_mode);
2730                 }
2731         }
2732
2733         if (current_link_up == 1) {
2734                 tp->link_config.active_speed = SPEED_1000;
2735                 tp->link_config.active_duplex = DUPLEX_FULL;
2736                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2737                                     LED_CTRL_LNKLED_OVERRIDE |
2738                                     LED_CTRL_1000MBPS_ON));
2739         } else {
2740                 tp->link_config.active_speed = SPEED_INVALID;
2741                 tp->link_config.active_duplex = DUPLEX_INVALID;
2742                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2743                                     LED_CTRL_LNKLED_OVERRIDE |
2744                                     LED_CTRL_TRAFFIC_OVERRIDE));
2745         }
2746
2747         if (current_link_up != netif_carrier_ok(tp->dev)) {
2748                 if (current_link_up)
2749                         netif_carrier_on(tp->dev);
2750                 else
2751                         netif_carrier_off(tp->dev);
2752                 tg3_link_report(tp);
2753         } else {
2754                 u32 now_pause_cfg =
2755                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2756                                          TG3_FLAG_TX_PAUSE);
2757                 if (orig_pause_cfg != now_pause_cfg ||
2758                     orig_active_speed != tp->link_config.active_speed ||
2759                     orig_active_duplex != tp->link_config.active_duplex)
2760                         tg3_link_report(tp);
2761         }
2762
2763         return 0;
2764 }
2765
2766 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2767 {
2768         int current_link_up, err = 0;
2769         u32 bmsr, bmcr;
2770         u16 current_speed;
2771         u8 current_duplex;
2772
2773         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2774         tw32_f(MAC_MODE, tp->mac_mode);
2775         udelay(40);
2776
2777         tw32(MAC_EVENT, 0);
2778
2779         tw32_f(MAC_STATUS,
2780              (MAC_STATUS_SYNC_CHANGED |
2781               MAC_STATUS_CFG_CHANGED |
2782               MAC_STATUS_MI_COMPLETION |
2783               MAC_STATUS_LNKSTATE_CHANGED));
2784         udelay(40);
2785
2786         if (force_reset)
2787                 tg3_phy_reset(tp);
2788
2789         current_link_up = 0;
2790         current_speed = SPEED_INVALID;
2791         current_duplex = DUPLEX_INVALID;
2792
2793         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2794         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2795         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2796                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2797                         bmsr |= BMSR_LSTATUS;
2798                 else
2799                         bmsr &= ~BMSR_LSTATUS;
2800         }
2801
2802         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2803
2804         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2805             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2806                 /* do nothing, just check for link up at the end */
2807         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2808                 u32 adv, new_adv;
2809
2810                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2811                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2812                                   ADVERTISE_1000XPAUSE |
2813                                   ADVERTISE_1000XPSE_ASYM |
2814                                   ADVERTISE_SLCT);
2815
2816                 /* Always advertise symmetric PAUSE just like copper */
2817                 new_adv |= ADVERTISE_1000XPAUSE;
2818
2819                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2820                         new_adv |= ADVERTISE_1000XHALF;
2821                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2822                         new_adv |= ADVERTISE_1000XFULL;
2823
2824                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2825                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2826                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2827                         tg3_writephy(tp, MII_BMCR, bmcr);
2828
2829                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2830                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2831                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2832
2833                         return err;
2834                 }
2835         } else {
2836                 u32 new_bmcr;
2837
2838                 bmcr &= ~BMCR_SPEED1000;
2839                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2840
2841                 if (tp->link_config.duplex == DUPLEX_FULL)
2842                         new_bmcr |= BMCR_FULLDPLX;
2843
2844                 if (new_bmcr != bmcr) {
2845                         /* BMCR_SPEED1000 is a reserved bit that needs
2846                          * to be set on write.
2847                          */
2848                         new_bmcr |= BMCR_SPEED1000;
2849
2850                         /* Force a linkdown */
2851                         if (netif_carrier_ok(tp->dev)) {
2852                                 u32 adv;
2853
2854                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2855                                 adv &= ~(ADVERTISE_1000XFULL |
2856                                          ADVERTISE_1000XHALF |
2857                                          ADVERTISE_SLCT);
2858                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2859                                 tg3_writephy(tp, MII_BMCR, bmcr |
2860                                                            BMCR_ANRESTART |
2861                                                            BMCR_ANENABLE);
2862                                 udelay(10);
2863                                 netif_carrier_off(tp->dev);
2864                         }
2865                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2866                         bmcr = new_bmcr;
2867                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2868                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2869                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2870                             ASIC_REV_5714) {
2871                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2872                                         bmsr |= BMSR_LSTATUS;
2873                                 else
2874                                         bmsr &= ~BMSR_LSTATUS;
2875                         }
2876                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2877                 }
2878         }
2879
2880         if (bmsr & BMSR_LSTATUS) {
2881                 current_speed = SPEED_1000;
2882                 current_link_up = 1;
2883                 if (bmcr & BMCR_FULLDPLX)
2884                         current_duplex = DUPLEX_FULL;
2885                 else
2886                         current_duplex = DUPLEX_HALF;
2887
2888                 if (bmcr & BMCR_ANENABLE) {
2889                         u32 local_adv, remote_adv, common;
2890
2891                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2892                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2893                         common = local_adv & remote_adv;
2894                         if (common & (ADVERTISE_1000XHALF |
2895                                       ADVERTISE_1000XFULL)) {
2896                                 if (common & ADVERTISE_1000XFULL)
2897                                         current_duplex = DUPLEX_FULL;
2898                                 else
2899                                         current_duplex = DUPLEX_HALF;
2900
2901                                 tg3_setup_flow_control(tp, local_adv,
2902                                                        remote_adv);
2903                         }
2904                         else
2905                                 current_link_up = 0;
2906                 }
2907         }
2908
2909         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2910         if (tp->link_config.active_duplex == DUPLEX_HALF)
2911                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2912
2913         tw32_f(MAC_MODE, tp->mac_mode);
2914         udelay(40);
2915
2916         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2917
2918         tp->link_config.active_speed = current_speed;
2919         tp->link_config.active_duplex = current_duplex;
2920
2921         if (current_link_up != netif_carrier_ok(tp->dev)) {
2922                 if (current_link_up)
2923                         netif_carrier_on(tp->dev);
2924                 else {
2925                         netif_carrier_off(tp->dev);
2926                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2927                 }
2928                 tg3_link_report(tp);
2929         }
2930         return err;
2931 }
2932
2933 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2934 {
2935         if (tp->serdes_counter) {
2936                 /* Give autoneg time to complete. */
2937                 tp->serdes_counter--;
2938                 return;
2939         }
2940         if (!netif_carrier_ok(tp->dev) &&
2941             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2942                 u32 bmcr;
2943
2944                 tg3_readphy(tp, MII_BMCR, &bmcr);
2945                 if (bmcr & BMCR_ANENABLE) {
2946                         u32 phy1, phy2;
2947
2948                         /* Select shadow register 0x1f */
2949                         tg3_writephy(tp, 0x1c, 0x7c00);
2950                         tg3_readphy(tp, 0x1c, &phy1);
2951
2952                         /* Select expansion interrupt status register */
2953                         tg3_writephy(tp, 0x17, 0x0f01);
2954                         tg3_readphy(tp, 0x15, &phy2);
2955                         tg3_readphy(tp, 0x15, &phy2);
2956
2957                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2958                                 /* We have signal detect and not receiving
2959                                  * config code words, link is up by parallel
2960                                  * detection.
2961                                  */
2962
2963                                 bmcr &= ~BMCR_ANENABLE;
2964                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2965                                 tg3_writephy(tp, MII_BMCR, bmcr);
2966                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2967                         }
2968                 }
2969         }
2970         else if (netif_carrier_ok(tp->dev) &&
2971                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2972                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2973                 u32 phy2;
2974
2975                 /* Select expansion interrupt status register */
2976                 tg3_writephy(tp, 0x17, 0x0f01);
2977                 tg3_readphy(tp, 0x15, &phy2);
2978                 if (phy2 & 0x20) {
2979                         u32 bmcr;
2980
2981                         /* Config code words received, turn on autoneg. */
2982                         tg3_readphy(tp, MII_BMCR, &bmcr);
2983                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2984
2985                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2986
2987                 }
2988         }
2989 }
2990
2991 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2992 {
2993         int err;
2994
2995         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2996                 err = tg3_setup_fiber_phy(tp, force_reset);
2997         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2998                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2999         } else {
3000                 err = tg3_setup_copper_phy(tp, force_reset);
3001         }
3002
3003         if (tp->link_config.active_speed == SPEED_1000 &&
3004             tp->link_config.active_duplex == DUPLEX_HALF)
3005                 tw32(MAC_TX_LENGTHS,
3006                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3007                       (6 << TX_LENGTHS_IPG_SHIFT) |
3008                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3009         else
3010                 tw32(MAC_TX_LENGTHS,
3011                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3012                       (6 << TX_LENGTHS_IPG_SHIFT) |
3013                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3014
3015         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3016                 if (netif_carrier_ok(tp->dev)) {
3017                         tw32(HOSTCC_STAT_COAL_TICKS,
3018                              tp->coal.stats_block_coalesce_usecs);
3019                 } else {
3020                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3021                 }
3022         }
3023
3024         return err;
3025 }
3026
3027 /* This is called whenever we suspect that the system chipset is re-
3028  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3029  * is bogus tx completions. We try to recover by setting the
3030  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3031  * in the workqueue.
3032  */
3033 static void tg3_tx_recover(struct tg3 *tp)
3034 {
3035         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3036                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3037
3038         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3039                "mapped I/O cycles to the network device, attempting to "
3040                "recover. Please report the problem to the driver maintainer "
3041                "and include system chipset information.\n", tp->dev->name);
3042
3043         spin_lock(&tp->lock);
3044         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3045         spin_unlock(&tp->lock);
3046 }
3047
3048 static inline u32 tg3_tx_avail(struct tg3 *tp)
3049 {
3050         smp_mb();
3051         return (tp->tx_pending -
3052                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3053 }
3054
3055 /* Tigon3 never reports partial packet sends.  So we do not
3056  * need special logic to handle SKBs that have not had all
3057  * of their frags sent yet, like SunGEM does.
3058  */
3059 static void tg3_tx(struct tg3 *tp)
3060 {
3061         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3062         u32 sw_idx = tp->tx_cons;
3063
3064         while (sw_idx != hw_idx) {
3065                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3066                 struct sk_buff *skb = ri->skb;
3067                 int i, tx_bug = 0;
3068
3069                 if (unlikely(skb == NULL)) {
3070                         tg3_tx_recover(tp);
3071                         return;
3072                 }
3073
3074                 pci_unmap_single(tp->pdev,
3075                                  pci_unmap_addr(ri, mapping),
3076                                  skb_headlen(skb),
3077                                  PCI_DMA_TODEVICE);
3078
3079                 ri->skb = NULL;
3080
3081                 sw_idx = NEXT_TX(sw_idx);
3082
3083                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3084                         ri = &tp->tx_buffers[sw_idx];
3085                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3086                                 tx_bug = 1;
3087
3088                         pci_unmap_page(tp->pdev,
3089                                        pci_unmap_addr(ri, mapping),
3090                                        skb_shinfo(skb)->frags[i].size,
3091                                        PCI_DMA_TODEVICE);
3092
3093                         sw_idx = NEXT_TX(sw_idx);
3094                 }
3095
3096                 dev_kfree_skb(skb);
3097
3098                 if (unlikely(tx_bug)) {
3099                         tg3_tx_recover(tp);
3100                         return;
3101                 }
3102         }
3103
3104         tp->tx_cons = sw_idx;
3105
3106         /* Need to make the tx_cons update visible to tg3_start_xmit()
3107          * before checking for netif_queue_stopped().  Without the
3108          * memory barrier, there is a small possibility that tg3_start_xmit()
3109          * will miss it and cause the queue to be stopped forever.
3110          */
3111         smp_mb();
3112
3113         if (unlikely(netif_queue_stopped(tp->dev) &&
3114                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3115                 netif_tx_lock(tp->dev);
3116                 if (netif_queue_stopped(tp->dev) &&
3117                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3118                         netif_wake_queue(tp->dev);
3119                 netif_tx_unlock(tp->dev);
3120         }
3121 }
3122
3123 /* Returns size of skb allocated or < 0 on error.
3124  *
3125  * We only need to fill in the address because the other members
3126  * of the RX descriptor are invariant, see tg3_init_rings.
3127  *
3128  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3129  * posting buffers we only dirty the first cache line of the RX
3130  * descriptor (containing the address).  Whereas for the RX status
3131  * buffers the cpu only reads the last cacheline of the RX descriptor
3132  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3133  */
3134 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3135                             int src_idx, u32 dest_idx_unmasked)
3136 {
3137         struct tg3_rx_buffer_desc *desc;
3138         struct ring_info *map, *src_map;
3139         struct sk_buff *skb;
3140         dma_addr_t mapping;
3141         int skb_size, dest_idx;
3142
3143         src_map = NULL;
3144         switch (opaque_key) {
3145         case RXD_OPAQUE_RING_STD:
3146                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3147                 desc = &tp->rx_std[dest_idx];
3148                 map = &tp->rx_std_buffers[dest_idx];
3149                 if (src_idx >= 0)
3150                         src_map = &tp->rx_std_buffers[src_idx];
3151                 skb_size = tp->rx_pkt_buf_sz;
3152                 break;
3153
3154         case RXD_OPAQUE_RING_JUMBO:
3155                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3156                 desc = &tp->rx_jumbo[dest_idx];
3157                 map = &tp->rx_jumbo_buffers[dest_idx];
3158                 if (src_idx >= 0)
3159                         src_map = &tp->rx_jumbo_buffers[src_idx];
3160                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3161                 break;
3162
3163         default:
3164                 return -EINVAL;
3165         };
3166
3167         /* Do not overwrite any of the map or rp information
3168          * until we are sure we can commit to a new buffer.
3169          *
3170          * Callers depend upon this behavior and assume that
3171          * we leave everything unchanged if we fail.
3172          */
3173         skb = netdev_alloc_skb(tp->dev, skb_size);
3174         if (skb == NULL)
3175                 return -ENOMEM;
3176
3177         skb_reserve(skb, tp->rx_offset);
3178
3179         mapping = pci_map_single(tp->pdev, skb->data,
3180                                  skb_size - tp->rx_offset,
3181                                  PCI_DMA_FROMDEVICE);
3182
3183         map->skb = skb;
3184         pci_unmap_addr_set(map, mapping, mapping);
3185
3186         if (src_map != NULL)
3187                 src_map->skb = NULL;
3188
3189         desc->addr_hi = ((u64)mapping >> 32);
3190         desc->addr_lo = ((u64)mapping & 0xffffffff);
3191
3192         return skb_size;
3193 }
3194
3195 /* We only need to move over in the address because the other
3196  * members of the RX descriptor are invariant.  See notes above
3197  * tg3_alloc_rx_skb for full details.
3198  */
3199 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3200                            int src_idx, u32 dest_idx_unmasked)
3201 {
3202         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3203         struct ring_info *src_map, *dest_map;
3204         int dest_idx;
3205
3206         switch (opaque_key) {
3207         case RXD_OPAQUE_RING_STD:
3208                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3209                 dest_desc = &tp->rx_std[dest_idx];
3210                 dest_map = &tp->rx_std_buffers[dest_idx];
3211                 src_desc = &tp->rx_std[src_idx];
3212                 src_map = &tp->rx_std_buffers[src_idx];
3213                 break;
3214
3215         case RXD_OPAQUE_RING_JUMBO:
3216                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3217                 dest_desc = &tp->rx_jumbo[dest_idx];
3218                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3219                 src_desc = &tp->rx_jumbo[src_idx];
3220                 src_map = &tp->rx_jumbo_buffers[src_idx];
3221                 break;
3222
3223         default:
3224                 return;
3225         };
3226
3227         dest_map->skb = src_map->skb;
3228         pci_unmap_addr_set(dest_map, mapping,
3229                            pci_unmap_addr(src_map, mapping));
3230         dest_desc->addr_hi = src_desc->addr_hi;
3231         dest_desc->addr_lo = src_desc->addr_lo;
3232
3233         src_map->skb = NULL;
3234 }
3235
3236 #if TG3_VLAN_TAG_USED
3237 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3238 {
3239         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3240 }
3241 #endif
3242
3243 /* The RX ring scheme is composed of multiple rings which post fresh
3244  * buffers to the chip, and one special ring the chip uses to report
3245  * status back to the host.
3246  *
3247  * The special ring reports the status of received packets to the
3248  * host.  The chip does not write into the original descriptor the
3249  * RX buffer was obtained from.  The chip simply takes the original
3250  * descriptor as provided by the host, updates the status and length
3251  * field, then writes this into the next status ring entry.
3252  *
3253  * Each ring the host uses to post buffers to the chip is described
3254  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3255  * it is first placed into the on-chip ram.  When the packet's length
3256  * is known, it walks down the TG3_BDINFO entries to select the ring.
3257  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3258  * which is within the range of the new packet's length is chosen.
3259  *
3260  * The "separate ring for rx status" scheme may sound queer, but it makes
3261  * sense from a cache coherency perspective.  If only the host writes
3262  * to the buffer post rings, and only the chip writes to the rx status
3263  * rings, then cache lines never move beyond shared-modified state.
3264  * If both the host and chip were to write into the same ring, cache line
3265  * eviction could occur since both entities want it in an exclusive state.
3266  */
3267 static int tg3_rx(struct tg3 *tp, int budget)
3268 {
3269         u32 work_mask, rx_std_posted = 0;
3270         u32 sw_idx = tp->rx_rcb_ptr;
3271         u16 hw_idx;
3272         int received;
3273
3274         hw_idx = tp->hw_status->idx[0].rx_producer;
3275         /*
3276          * We need to order the read of hw_idx and the read of
3277          * the opaque cookie.
3278          */
3279         rmb();
3280         work_mask = 0;
3281         received = 0;
3282         while (sw_idx != hw_idx && budget > 0) {
3283                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3284                 unsigned int len;
3285                 struct sk_buff *skb;
3286                 dma_addr_t dma_addr;
3287                 u32 opaque_key, desc_idx, *post_ptr;
3288
3289                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3290                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3291                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3292                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3293                                                   mapping);
3294                         skb = tp->rx_std_buffers[desc_idx].skb;
3295                         post_ptr = &tp->rx_std_ptr;
3296                         rx_std_posted++;
3297                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3298                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3299                                                   mapping);
3300                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3301                         post_ptr = &tp->rx_jumbo_ptr;
3302                 }
3303                 else {
3304                         goto next_pkt_nopost;
3305                 }
3306
3307                 work_mask |= opaque_key;
3308
3309                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3310                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3311                 drop_it:
3312                         tg3_recycle_rx(tp, opaque_key,
3313                                        desc_idx, *post_ptr);
3314                 drop_it_no_recycle:
3315                         /* Other statistics kept track of by card. */
3316                         tp->net_stats.rx_dropped++;
3317                         goto next_pkt;
3318                 }
3319
3320                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3321
3322                 if (len > RX_COPY_THRESHOLD
3323                         && tp->rx_offset == 2
3324                         /* rx_offset != 2 iff this is a 5701 card running
3325                          * in PCI-X mode [see tg3_get_invariants()] */
3326                 ) {
3327                         int skb_size;
3328
3329                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3330                                                     desc_idx, *post_ptr);
3331                         if (skb_size < 0)
3332                                 goto drop_it;
3333
3334                         pci_unmap_single(tp->pdev, dma_addr,
3335                                          skb_size - tp->rx_offset,
3336                                          PCI_DMA_FROMDEVICE);
3337
3338                         skb_put(skb, len);
3339                 } else {
3340                         struct sk_buff *copy_skb;
3341
3342                         tg3_recycle_rx(tp, opaque_key,
3343                                        desc_idx, *post_ptr);
3344
3345                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3346                         if (copy_skb == NULL)
3347                                 goto drop_it_no_recycle;
3348
3349                         skb_reserve(copy_skb, 2);
3350                         skb_put(copy_skb, len);
3351                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3352                         memcpy(copy_skb->data, skb->data, len);
3353                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3354
3355                         /* We'll reuse the original ring buffer. */
3356                         skb = copy_skb;
3357                 }
3358
3359                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3360                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3361                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3362                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3363                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3364                 else
3365                         skb->ip_summed = CHECKSUM_NONE;
3366
3367                 skb->protocol = eth_type_trans(skb, tp->dev);
3368 #if TG3_VLAN_TAG_USED
3369                 if (tp->vlgrp != NULL &&
3370                     desc->type_flags & RXD_FLAG_VLAN) {
3371                         tg3_vlan_rx(tp, skb,
3372                                     desc->err_vlan & RXD_VLAN_MASK);
3373                 } else
3374 #endif
3375                         netif_receive_skb(skb);
3376
3377                 tp->dev->last_rx = jiffies;
3378                 received++;
3379                 budget--;
3380
3381 next_pkt:
3382                 (*post_ptr)++;
3383
3384                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3385                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3386
3387                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3388                                      TG3_64BIT_REG_LOW, idx);
3389                         work_mask &= ~RXD_OPAQUE_RING_STD;
3390                         rx_std_posted = 0;
3391                 }
3392 next_pkt_nopost:
3393                 sw_idx++;
3394                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3395
3396                 /* Refresh hw_idx to see if there is new work */
3397                 if (sw_idx == hw_idx) {
3398                         hw_idx = tp->hw_status->idx[0].rx_producer;
3399                         rmb();
3400                 }
3401         }
3402
3403         /* ACK the status ring. */
3404         tp->rx_rcb_ptr = sw_idx;
3405         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3406
3407         /* Refill RX ring(s). */
3408         if (work_mask & RXD_OPAQUE_RING_STD) {
3409                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3410                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3411                              sw_idx);
3412         }
3413         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3414                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3415                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3416                              sw_idx);
3417         }
3418         mmiowb();
3419
3420         return received;
3421 }
3422
3423 static int tg3_poll(struct net_device *netdev, int *budget)
3424 {
3425         struct tg3 *tp = netdev_priv(netdev);
3426         struct tg3_hw_status *sblk = tp->hw_status;
3427         int done;
3428
3429         /* handle link change and other phy events */
3430         if (!(tp->tg3_flags &
3431               (TG3_FLAG_USE_LINKCHG_REG |
3432                TG3_FLAG_POLL_SERDES))) {
3433                 if (sblk->status & SD_STATUS_LINK_CHG) {
3434                         sblk->status = SD_STATUS_UPDATED |
3435                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3436                         spin_lock(&tp->lock);
3437                         tg3_setup_phy(tp, 0);
3438                         spin_unlock(&tp->lock);
3439                 }
3440         }
3441
3442         /* run TX completion thread */
3443         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3444                 tg3_tx(tp);
3445                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3446                         netif_rx_complete(netdev);
3447                         schedule_work(&tp->reset_task);
3448                         return 0;
3449                 }
3450         }
3451
3452         /* run RX thread, within the bounds set by NAPI.
3453          * All RX "locking" is done by ensuring outside
3454          * code synchronizes with dev->poll()
3455          */
3456         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3457                 int orig_budget = *budget;
3458                 int work_done;
3459
3460                 if (orig_budget > netdev->quota)
3461                         orig_budget = netdev->quota;
3462
3463                 work_done = tg3_rx(tp, orig_budget);
3464
3465                 *budget -= work_done;
3466                 netdev->quota -= work_done;
3467         }
3468
3469         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3470                 tp->last_tag = sblk->status_tag;
3471                 rmb();
3472         } else
3473                 sblk->status &= ~SD_STATUS_UPDATED;
3474
3475         /* if no more work, tell net stack and NIC we're done */
3476         done = !tg3_has_work(tp);
3477         if (done) {
3478                 netif_rx_complete(netdev);
3479                 tg3_restart_ints(tp);
3480         }
3481
3482         return (done ? 0 : 1);
3483 }
3484
3485 static void tg3_irq_quiesce(struct tg3 *tp)
3486 {
3487         BUG_ON(tp->irq_sync);
3488
3489         tp->irq_sync = 1;
3490         smp_mb();
3491
3492         synchronize_irq(tp->pdev->irq);
3493 }
3494
3495 static inline int tg3_irq_sync(struct tg3 *tp)
3496 {
3497         return tp->irq_sync;
3498 }
3499
3500 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3501  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3502  * with as well.  Most of the time, this is not necessary except when
3503  * shutting down the device.
3504  */
3505 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3506 {
3507         if (irq_sync)
3508                 tg3_irq_quiesce(tp);
3509         spin_lock_bh(&tp->lock);
3510 }
3511
3512 static inline void tg3_full_unlock(struct tg3 *tp)
3513 {
3514         spin_unlock_bh(&tp->lock);
3515 }
3516
3517 /* One-shot MSI handler - Chip automatically disables interrupt
3518  * after sending MSI so driver doesn't have to do it.
3519  */
3520 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3521 {
3522         struct net_device *dev = dev_id;
3523         struct tg3 *tp = netdev_priv(dev);
3524
3525         prefetch(tp->hw_status);
3526         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3527
3528         if (likely(!tg3_irq_sync(tp)))
3529                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3530
3531         return IRQ_HANDLED;
3532 }
3533
3534 /* MSI ISR - No need to check for interrupt sharing and no need to
3535  * flush status block and interrupt mailbox. PCI ordering rules
3536  * guarantee that MSI will arrive after the status block.
3537  */
3538 static irqreturn_t tg3_msi(int irq, void *dev_id)
3539 {
3540         struct net_device *dev = dev_id;
3541         struct tg3 *tp = netdev_priv(dev);
3542
3543         prefetch(tp->hw_status);
3544         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3545         /*
3546          * Writing any value to intr-mbox-0 clears PCI INTA# and
3547          * chip-internal interrupt pending events.
3548          * Writing non-zero to intr-mbox-0 additional tells the
3549          * NIC to stop sending us irqs, engaging "in-intr-handler"
3550          * event coalescing.
3551          */
3552         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3553         if (likely(!tg3_irq_sync(tp)))
3554                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3555
3556         return IRQ_RETVAL(1);
3557 }
3558
3559 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3560 {
3561         struct net_device *dev = dev_id;
3562         struct tg3 *tp = netdev_priv(dev);
3563         struct tg3_hw_status *sblk = tp->hw_status;
3564         unsigned int handled = 1;
3565
3566         /* In INTx mode, it is possible for the interrupt to arrive at
3567          * the CPU before the status block posted prior to the interrupt.
3568          * Reading the PCI State register will confirm whether the
3569          * interrupt is ours and will flush the status block.
3570          */
3571         if ((sblk->status & SD_STATUS_UPDATED) ||
3572             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3573                 /*
3574                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3575                  * chip-internal interrupt pending events.
3576                  * Writing non-zero to intr-mbox-0 additional tells the
3577                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3578                  * event coalescing.
3579                  */
3580                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3581                              0x00000001);
3582                 if (tg3_irq_sync(tp))
3583                         goto out;
3584                 sblk->status &= ~SD_STATUS_UPDATED;
3585                 if (likely(tg3_has_work(tp))) {
3586                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3587                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3588                 } else {
3589                         /* No work, shared interrupt perhaps?  re-enable
3590                          * interrupts, and flush that PCI write
3591                          */
3592                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3593                                 0x00000000);
3594                 }
3595         } else {        /* shared interrupt */
3596                 handled = 0;
3597         }
3598 out:
3599         return IRQ_RETVAL(handled);
3600 }
3601
3602 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3603 {
3604         struct net_device *dev = dev_id;
3605         struct tg3 *tp = netdev_priv(dev);
3606         struct tg3_hw_status *sblk = tp->hw_status;
3607         unsigned int handled = 1;
3608
3609         /* In INTx mode, it is possible for the interrupt to arrive at
3610          * the CPU before the status block posted prior to the interrupt.
3611          * Reading the PCI State register will confirm whether the
3612          * interrupt is ours and will flush the status block.
3613          */
3614         if ((sblk->status_tag != tp->last_tag) ||
3615             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3616                 /*
3617                  * writing any value to intr-mbox-0 clears PCI INTA# and
3618                  * chip-internal interrupt pending events.
3619                  * writing non-zero to intr-mbox-0 additional tells the
3620                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3621                  * event coalescing.
3622                  */
3623                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3624                              0x00000001);
3625                 if (tg3_irq_sync(tp))
3626                         goto out;
3627                 if (netif_rx_schedule_prep(dev)) {
3628                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3629                         /* Update last_tag to mark that this status has been
3630                          * seen. Because interrupt may be shared, we may be
3631                          * racing with tg3_poll(), so only update last_tag
3632                          * if tg3_poll() is not scheduled.
3633                          */
3634                         tp->last_tag = sblk->status_tag;
3635                         __netif_rx_schedule(dev);
3636                 }
3637         } else {        /* shared interrupt */
3638                 handled = 0;
3639         }
3640 out:
3641         return IRQ_RETVAL(handled);
3642 }
3643
3644 /* ISR for interrupt test */
3645 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3646 {
3647         struct net_device *dev = dev_id;
3648         struct tg3 *tp = netdev_priv(dev);
3649         struct tg3_hw_status *sblk = tp->hw_status;
3650
3651         if ((sblk->status & SD_STATUS_UPDATED) ||
3652             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3653                 tg3_disable_ints(tp);
3654                 return IRQ_RETVAL(1);
3655         }
3656         return IRQ_RETVAL(0);
3657 }
3658
3659 static int tg3_init_hw(struct tg3 *, int);
3660 static int tg3_halt(struct tg3 *, int, int);
3661
3662 /* Restart hardware after configuration changes, self-test, etc.
3663  * Invoked with tp->lock held.
3664  */
3665 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3666 {
3667         int err;
3668
3669         err = tg3_init_hw(tp, reset_phy);
3670         if (err) {
3671                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3672                        "aborting.\n", tp->dev->name);
3673                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3674                 tg3_full_unlock(tp);
3675                 del_timer_sync(&tp->timer);
3676                 tp->irq_sync = 0;
3677                 netif_poll_enable(tp->dev);
3678                 dev_close(tp->dev);
3679                 tg3_full_lock(tp, 0);
3680         }
3681         return err;
3682 }
3683
3684 #ifdef CONFIG_NET_POLL_CONTROLLER
3685 static void tg3_poll_controller(struct net_device *dev)
3686 {
3687         struct tg3 *tp = netdev_priv(dev);
3688
3689         tg3_interrupt(tp->pdev->irq, dev);
3690 }
3691 #endif
3692
3693 static void tg3_reset_task(struct work_struct *work)
3694 {
3695         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3696         unsigned int restart_timer;
3697
3698         tg3_full_lock(tp, 0);
3699         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3700
3701         if (!netif_running(tp->dev)) {
3702                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3703                 tg3_full_unlock(tp);
3704                 return;
3705         }
3706
3707         tg3_full_unlock(tp);
3708
3709         tg3_netif_stop(tp);
3710
3711         tg3_full_lock(tp, 1);
3712
3713         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3714         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3715
3716         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3717                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3718                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3719                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3720                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3721         }
3722
3723         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3724         if (tg3_init_hw(tp, 1))
3725                 goto out;
3726
3727         tg3_netif_start(tp);
3728
3729         if (restart_timer)
3730                 mod_timer(&tp->timer, jiffies + 1);
3731
3732 out:
3733         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3734
3735         tg3_full_unlock(tp);
3736 }
3737
3738 static void tg3_tx_timeout(struct net_device *dev)
3739 {
3740         struct tg3 *tp = netdev_priv(dev);
3741
3742         if (netif_msg_tx_err(tp))
3743                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3744                        dev->name);
3745
3746         schedule_work(&tp->reset_task);
3747 }
3748
3749 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3750 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3751 {
3752         u32 base = (u32) mapping & 0xffffffff;
3753
3754         return ((base > 0xffffdcc0) &&
3755                 (base + len + 8 < base));
3756 }
3757
3758 /* Test for DMA addresses > 40-bit */
3759 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3760                                           int len)
3761 {
3762 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3763         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3764                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3765         return 0;
3766 #else
3767         return 0;
3768 #endif
3769 }
3770
3771 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3772
3773 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3774 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3775                                        u32 last_plus_one, u32 *start,
3776                                        u32 base_flags, u32 mss)
3777 {
3778         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3779         dma_addr_t new_addr = 0;
3780         u32 entry = *start;
3781         int i, ret = 0;
3782
3783         if (!new_skb) {
3784                 ret = -1;
3785         } else {
3786                 /* New SKB is guaranteed to be linear. */
3787                 entry = *start;
3788                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3789                                           PCI_DMA_TODEVICE);
3790                 /* Make sure new skb does not cross any 4G boundaries.
3791                  * Drop the packet if it does.
3792                  */
3793                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3794                         ret = -1;
3795                         dev_kfree_skb(new_skb);
3796                         new_skb = NULL;
3797                 } else {
3798                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3799                                     base_flags, 1 | (mss << 1));
3800                         *start = NEXT_TX(entry);
3801                 }
3802         }
3803
3804         /* Now clean up the sw ring entries. */
3805         i = 0;
3806         while (entry != last_plus_one) {
3807                 int len;
3808
3809                 if (i == 0)
3810                         len = skb_headlen(skb);
3811                 else
3812                         len = skb_shinfo(skb)->frags[i-1].size;
3813                 pci_unmap_single(tp->pdev,
3814                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3815                                  len, PCI_DMA_TODEVICE);
3816                 if (i == 0) {
3817                         tp->tx_buffers[entry].skb = new_skb;
3818                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3819                 } else {
3820                         tp->tx_buffers[entry].skb = NULL;
3821                 }
3822                 entry = NEXT_TX(entry);
3823                 i++;
3824         }
3825
3826         dev_kfree_skb(skb);
3827
3828         return ret;
3829 }
3830
3831 static void tg3_set_txd(struct tg3 *tp, int entry,
3832                         dma_addr_t mapping, int len, u32 flags,
3833                         u32 mss_and_is_end)
3834 {
3835         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3836         int is_end = (mss_and_is_end & 0x1);
3837         u32 mss = (mss_and_is_end >> 1);
3838         u32 vlan_tag = 0;
3839
3840         if (is_end)
3841                 flags |= TXD_FLAG_END;
3842         if (flags & TXD_FLAG_VLAN) {
3843                 vlan_tag = flags >> 16;
3844                 flags &= 0xffff;
3845         }
3846         vlan_tag |= (mss << TXD_MSS_SHIFT);
3847
3848         txd->addr_hi = ((u64) mapping >> 32);
3849         txd->addr_lo = ((u64) mapping & 0xffffffff);
3850         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3851         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3852 }
3853
3854 /* hard_start_xmit for devices that don't have any bugs and
3855  * support TG3_FLG2_HW_TSO_2 only.
3856  */
3857 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3858 {
3859         struct tg3 *tp = netdev_priv(dev);
3860         dma_addr_t mapping;
3861         u32 len, entry, base_flags, mss;
3862
3863         len = skb_headlen(skb);
3864
3865         /* We are running in BH disabled context with netif_tx_lock
3866          * and TX reclaim runs via tp->poll inside of a software
3867          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3868          * no IRQ context deadlocks to worry about either.  Rejoice!
3869          */
3870         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3871                 if (!netif_queue_stopped(dev)) {
3872                         netif_stop_queue(dev);
3873
3874                         /* This is a hard error, log it. */
3875                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3876                                "queue awake!\n", dev->name);
3877                 }
3878                 return NETDEV_TX_BUSY;
3879         }
3880
3881         entry = tp->tx_prod;
3882         base_flags = 0;
3883         mss = 0;
3884         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3885             (mss = skb_shinfo(skb)->gso_size) != 0) {
3886                 int tcp_opt_len, ip_tcp_len;
3887
3888                 if (skb_header_cloned(skb) &&
3889                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3890                         dev_kfree_skb(skb);
3891                         goto out_unlock;
3892                 }
3893
3894                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3895                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3896                 else {
3897                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3898                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3899                                      sizeof(struct tcphdr);
3900
3901                         skb->nh.iph->check = 0;
3902                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3903                                                      tcp_opt_len);
3904                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3905                 }
3906
3907                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3908                                TXD_FLAG_CPU_POST_DMA);
3909
3910                 skb->h.th->check = 0;
3911
3912         }
3913         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3914                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3915 #if TG3_VLAN_TAG_USED
3916         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3917                 base_flags |= (TXD_FLAG_VLAN |
3918                                (vlan_tx_tag_get(skb) << 16));
3919 #endif
3920
3921         /* Queue skb data, a.k.a. the main skb fragment. */
3922         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3923
3924         tp->tx_buffers[entry].skb = skb;
3925         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3926
3927         tg3_set_txd(tp, entry, mapping, len, base_flags,
3928                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3929
3930         entry = NEXT_TX(entry);
3931
3932         /* Now loop through additional data fragments, and queue them. */
3933         if (skb_shinfo(skb)->nr_frags > 0) {
3934                 unsigned int i, last;
3935
3936                 last = skb_shinfo(skb)->nr_frags - 1;
3937                 for (i = 0; i <= last; i++) {
3938                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3939
3940                         len = frag->size;
3941                         mapping = pci_map_page(tp->pdev,
3942                                                frag->page,
3943                                                frag->page_offset,
3944                                                len, PCI_DMA_TODEVICE);
3945
3946                         tp->tx_buffers[entry].skb = NULL;
3947                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3948
3949                         tg3_set_txd(tp, entry, mapping, len,
3950                                     base_flags, (i == last) | (mss << 1));
3951
3952                         entry = NEXT_TX(entry);
3953                 }
3954         }
3955
3956         /* Packets are ready, update Tx producer idx local and on card. */
3957         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3958
3959         tp->tx_prod = entry;
3960         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3961                 netif_stop_queue(dev);
3962                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3963                         netif_wake_queue(tp->dev);
3964         }
3965
3966 out_unlock:
3967         mmiowb();
3968
3969         dev->trans_start = jiffies;
3970
3971         return NETDEV_TX_OK;
3972 }
3973
3974 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3975
3976 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3977  * TSO header is greater than 80 bytes.
3978  */
3979 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3980 {
3981         struct sk_buff *segs, *nskb;
3982
3983         /* Estimate the number of fragments in the worst case */
3984         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3985                 netif_stop_queue(tp->dev);
3986                 return NETDEV_TX_BUSY;
3987         }
3988
3989         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3990         if (unlikely(IS_ERR(segs)))
3991                 goto tg3_tso_bug_end;
3992
3993         do {
3994                 nskb = segs;
3995                 segs = segs->next;
3996                 nskb->next = NULL;
3997                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3998         } while (segs);
3999
4000 tg3_tso_bug_end:
4001         dev_kfree_skb(skb);
4002
4003         return NETDEV_TX_OK;
4004 }
4005
4006 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4007  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4008  */
4009 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4010 {
4011         struct tg3 *tp = netdev_priv(dev);
4012         dma_addr_t mapping;
4013         u32 len, entry, base_flags, mss;
4014         int would_hit_hwbug;
4015
4016         len = skb_headlen(skb);
4017
4018         /* We are running in BH disabled context with netif_tx_lock
4019          * and TX reclaim runs via tp->poll inside of a software
4020          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4021          * no IRQ context deadlocks to worry about either.  Rejoice!
4022          */
4023         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4024                 if (!netif_queue_stopped(dev)) {
4025                         netif_stop_queue(dev);
4026
4027                         /* This is a hard error, log it. */
4028                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4029                                "queue awake!\n", dev->name);
4030                 }
4031                 return NETDEV_TX_BUSY;
4032         }
4033
4034         entry = tp->tx_prod;
4035         base_flags = 0;
4036         if (skb->ip_summed == CHECKSUM_PARTIAL)
4037                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4038         mss = 0;
4039         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4040             (mss = skb_shinfo(skb)->gso_size) != 0) {
4041                 int tcp_opt_len, ip_tcp_len, hdr_len;
4042
4043                 if (skb_header_cloned(skb) &&
4044                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4045                         dev_kfree_skb(skb);
4046                         goto out_unlock;
4047                 }
4048
4049                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4050                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4051
4052                 hdr_len = ip_tcp_len + tcp_opt_len;
4053                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4054                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4055                         return (tg3_tso_bug(tp, skb));
4056
4057                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4058                                TXD_FLAG_CPU_POST_DMA);
4059
4060                 skb->nh.iph->check = 0;
4061                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4062                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4063                         skb->h.th->check = 0;
4064                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4065                 }
4066                 else {
4067                         skb->h.th->check =
4068                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4069                                                    skb->nh.iph->daddr,
4070                                                    0, IPPROTO_TCP, 0);
4071                 }
4072
4073                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4074                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4075                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4076                                 int tsflags;
4077
4078                                 tsflags = ((skb->nh.iph->ihl - 5) +
4079                                            (tcp_opt_len >> 2));
4080                                 mss |= (tsflags << 11);
4081                         }
4082                 } else {
4083                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4084                                 int tsflags;
4085
4086                                 tsflags = ((skb->nh.iph->ihl - 5) +
4087                                            (tcp_opt_len >> 2));
4088                                 base_flags |= tsflags << 12;
4089                         }
4090                 }
4091         }
4092 #if TG3_VLAN_TAG_USED
4093         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4094                 base_flags |= (TXD_FLAG_VLAN |
4095                                (vlan_tx_tag_get(skb) << 16));
4096 #endif
4097
4098         /* Queue skb data, a.k.a. the main skb fragment. */
4099         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4100
4101         tp->tx_buffers[entry].skb = skb;
4102         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4103
4104         would_hit_hwbug = 0;
4105
4106         if (tg3_4g_overflow_test(mapping, len))
4107                 would_hit_hwbug = 1;
4108
4109         tg3_set_txd(tp, entry, mapping, len, base_flags,
4110                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4111
4112         entry = NEXT_TX(entry);
4113
4114         /* Now loop through additional data fragments, and queue them. */
4115         if (skb_shinfo(skb)->nr_frags > 0) {
4116                 unsigned int i, last;
4117
4118                 last = skb_shinfo(skb)->nr_frags - 1;
4119                 for (i = 0; i <= last; i++) {
4120                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4121
4122                         len = frag->size;
4123                         mapping = pci_map_page(tp->pdev,
4124                                                frag->page,
4125                                                frag->page_offset,
4126                                                len, PCI_DMA_TODEVICE);
4127
4128                         tp->tx_buffers[entry].skb = NULL;
4129                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4130
4131                         if (tg3_4g_overflow_test(mapping, len))
4132                                 would_hit_hwbug = 1;
4133
4134                         if (tg3_40bit_overflow_test(tp, mapping, len))
4135                                 would_hit_hwbug = 1;
4136
4137                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4138                                 tg3_set_txd(tp, entry, mapping, len,
4139                                             base_flags, (i == last)|(mss << 1));
4140                         else
4141                                 tg3_set_txd(tp, entry, mapping, len,
4142                                             base_flags, (i == last));
4143
4144                         entry = NEXT_TX(entry);
4145                 }
4146         }
4147
4148         if (would_hit_hwbug) {
4149                 u32 last_plus_one = entry;
4150                 u32 start;
4151
4152                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4153                 start &= (TG3_TX_RING_SIZE - 1);
4154
4155                 /* If the workaround fails due to memory/mapping
4156                  * failure, silently drop this packet.
4157                  */
4158                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4159                                                 &start, base_flags, mss))
4160                         goto out_unlock;
4161
4162                 entry = start;
4163         }
4164
4165         /* Packets are ready, update Tx producer idx local and on card. */
4166         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4167
4168         tp->tx_prod = entry;
4169         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4170                 netif_stop_queue(dev);
4171                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4172                         netif_wake_queue(tp->dev);
4173         }
4174
4175 out_unlock:
4176         mmiowb();
4177
4178         dev->trans_start = jiffies;
4179
4180         return NETDEV_TX_OK;
4181 }
4182
4183 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4184                                int new_mtu)
4185 {
4186         dev->mtu = new_mtu;
4187
4188         if (new_mtu > ETH_DATA_LEN) {
4189                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4190                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4191                         ethtool_op_set_tso(dev, 0);
4192                 }
4193                 else
4194                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4195         } else {
4196                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4197                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4198                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4199         }
4200 }
4201
4202 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4203 {
4204         struct tg3 *tp = netdev_priv(dev);
4205         int err;
4206
4207         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4208                 return -EINVAL;
4209
4210         if (!netif_running(dev)) {
4211                 /* We'll just catch it later when the
4212                  * device is up'd.
4213                  */
4214                 tg3_set_mtu(dev, tp, new_mtu);
4215                 return 0;
4216         }
4217
4218         tg3_netif_stop(tp);
4219
4220         tg3_full_lock(tp, 1);
4221
4222         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4223
4224         tg3_set_mtu(dev, tp, new_mtu);
4225
4226         err = tg3_restart_hw(tp, 0);
4227
4228         if (!err)
4229                 tg3_netif_start(tp);
4230
4231         tg3_full_unlock(tp);
4232
4233         return err;
4234 }
4235
4236 /* Free up pending packets in all rx/tx rings.
4237  *
4238  * The chip has been shut down and the driver detached from
4239  * the networking, so no interrupts or new tx packets will
4240  * end up in the driver.  tp->{tx,}lock is not held and we are not
4241  * in an interrupt context and thus may sleep.
4242  */
4243 static void tg3_free_rings(struct tg3 *tp)
4244 {
4245         struct ring_info *rxp;
4246         int i;
4247
4248         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4249                 rxp = &tp->rx_std_buffers[i];
4250
4251                 if (rxp->skb == NULL)
4252                         continue;
4253                 pci_unmap_single(tp->pdev,
4254                                  pci_unmap_addr(rxp, mapping),
4255                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4256                                  PCI_DMA_FROMDEVICE);
4257                 dev_kfree_skb_any(rxp->skb);
4258                 rxp->skb = NULL;
4259         }
4260
4261         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4262                 rxp = &tp->rx_jumbo_buffers[i];
4263
4264                 if (rxp->skb == NULL)
4265                         continue;
4266                 pci_unmap_single(tp->pdev,
4267                                  pci_unmap_addr(rxp, mapping),
4268                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4269                                  PCI_DMA_FROMDEVICE);
4270                 dev_kfree_skb_any(rxp->skb);
4271                 rxp->skb = NULL;
4272         }
4273
4274         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4275                 struct tx_ring_info *txp;
4276                 struct sk_buff *skb;
4277                 int j;
4278
4279                 txp = &tp->tx_buffers[i];
4280                 skb = txp->skb;
4281
4282                 if (skb == NULL) {
4283                         i++;
4284                         continue;
4285                 }
4286
4287                 pci_unmap_single(tp->pdev,
4288                                  pci_unmap_addr(txp, mapping),
4289                                  skb_headlen(skb),
4290                                  PCI_DMA_TODEVICE);
4291                 txp->skb = NULL;
4292
4293                 i++;
4294
4295                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4296                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4297                         pci_unmap_page(tp->pdev,
4298                                        pci_unmap_addr(txp, mapping),
4299                                        skb_shinfo(skb)->frags[j].size,
4300                                        PCI_DMA_TODEVICE);
4301                         i++;
4302                 }
4303
4304                 dev_kfree_skb_any(skb);
4305         }
4306 }
4307
4308 /* Initialize tx/rx rings for packet processing.
4309  *
4310  * The chip has been shut down and the driver detached from
4311  * the networking, so no interrupts or new tx packets will
4312  * end up in the driver.  tp->{tx,}lock are held and thus
4313  * we may not sleep.
4314  */
4315 static int tg3_init_rings(struct tg3 *tp)
4316 {
4317         u32 i;
4318
4319         /* Free up all the SKBs. */
4320         tg3_free_rings(tp);
4321
4322         /* Zero out all descriptors. */
4323         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4324         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4325         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4326         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4327
4328         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4329         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4330             (tp->dev->mtu > ETH_DATA_LEN))
4331                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4332
4333         /* Initialize invariants of the rings, we only set this
4334          * stuff once.  This works because the card does not
4335          * write into the rx buffer posting rings.
4336          */
4337         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4338                 struct tg3_rx_buffer_desc *rxd;
4339
4340                 rxd = &tp->rx_std[i];
4341                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4342                         << RXD_LEN_SHIFT;
4343                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4344                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4345                                (i << RXD_OPAQUE_INDEX_SHIFT));
4346         }
4347
4348         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4349                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4350                         struct tg3_rx_buffer_desc *rxd;
4351
4352                         rxd = &tp->rx_jumbo[i];
4353                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4354                                 << RXD_LEN_SHIFT;
4355                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4356                                 RXD_FLAG_JUMBO;
4357                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4358                                (i << RXD_OPAQUE_INDEX_SHIFT));
4359                 }
4360         }
4361
4362         /* Now allocate fresh SKBs for each rx ring. */
4363         for (i = 0; i < tp->rx_pending; i++) {
4364                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4365                         printk(KERN_WARNING PFX
4366                                "%s: Using a smaller RX standard ring, "
4367                                "only %d out of %d buffers were allocated "
4368                                "successfully.\n",
4369                                tp->dev->name, i, tp->rx_pending);
4370                         if (i == 0)
4371                                 return -ENOMEM;
4372                         tp->rx_pending = i;
4373                         break;
4374                 }
4375         }
4376
4377         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4378                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4379                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4380                                              -1, i) < 0) {
4381                                 printk(KERN_WARNING PFX
4382                                        "%s: Using a smaller RX jumbo ring, "
4383                                        "only %d out of %d buffers were "
4384                                        "allocated successfully.\n",
4385                                        tp->dev->name, i, tp->rx_jumbo_pending);
4386                                 if (i == 0) {
4387                                         tg3_free_rings(tp);
4388                                         return -ENOMEM;
4389                                 }
4390                                 tp->rx_jumbo_pending = i;
4391                                 break;
4392                         }
4393                 }
4394         }
4395         return 0;
4396 }
4397
4398 /*
4399  * Must not be invoked with interrupt sources disabled and
4400  * the hardware shutdown down.
4401  */
4402 static void tg3_free_consistent(struct tg3 *tp)
4403 {
4404         kfree(tp->rx_std_buffers);
4405         tp->rx_std_buffers = NULL;
4406         if (tp->rx_std) {
4407                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4408                                     tp->rx_std, tp->rx_std_mapping);
4409                 tp->rx_std = NULL;
4410         }
4411         if (tp->rx_jumbo) {
4412                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4413                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4414                 tp->rx_jumbo = NULL;
4415         }
4416         if (tp->rx_rcb) {
4417                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4418                                     tp->rx_rcb, tp->rx_rcb_mapping);
4419                 tp->rx_rcb = NULL;
4420         }
4421         if (tp->tx_ring) {
4422                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4423                         tp->tx_ring, tp->tx_desc_mapping);
4424                 tp->tx_ring = NULL;
4425         }
4426         if (tp->hw_status) {
4427                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4428                                     tp->hw_status, tp->status_mapping);
4429                 tp->hw_status = NULL;
4430         }
4431         if (tp->hw_stats) {
4432                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4433                                     tp->hw_stats, tp->stats_mapping);
4434                 tp->hw_stats = NULL;
4435         }
4436 }
4437
4438 /*
4439  * Must not be invoked with interrupt sources disabled and
4440  * the hardware shutdown down.  Can sleep.
4441  */
4442 static int tg3_alloc_consistent(struct tg3 *tp)
4443 {
4444         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4445                                       (TG3_RX_RING_SIZE +
4446                                        TG3_RX_JUMBO_RING_SIZE)) +
4447                                      (sizeof(struct tx_ring_info) *
4448                                       TG3_TX_RING_SIZE),
4449                                      GFP_KERNEL);
4450         if (!tp->rx_std_buffers)
4451                 return -ENOMEM;
4452
4453         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4454         tp->tx_buffers = (struct tx_ring_info *)
4455                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4456
4457         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4458                                           &tp->rx_std_mapping);
4459         if (!tp->rx_std)
4460                 goto err_out;
4461
4462         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4463                                             &tp->rx_jumbo_mapping);
4464
4465         if (!tp->rx_jumbo)
4466                 goto err_out;
4467
4468         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4469                                           &tp->rx_rcb_mapping);
4470         if (!tp->rx_rcb)
4471                 goto err_out;
4472
4473         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4474                                            &tp->tx_desc_mapping);
4475         if (!tp->tx_ring)
4476                 goto err_out;
4477
4478         tp->hw_status = pci_alloc_consistent(tp->pdev,
4479                                              TG3_HW_STATUS_SIZE,
4480                                              &tp->status_mapping);
4481         if (!tp->hw_status)
4482                 goto err_out;
4483
4484         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4485                                             sizeof(struct tg3_hw_stats),
4486                                             &tp->stats_mapping);
4487         if (!tp->hw_stats)
4488                 goto err_out;
4489
4490         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4491         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4492
4493         return 0;
4494
4495 err_out:
4496         tg3_free_consistent(tp);
4497         return -ENOMEM;
4498 }
4499
4500 #define MAX_WAIT_CNT 1000
4501
4502 /* To stop a block, clear the enable bit and poll till it
4503  * clears.  tp->lock is held.
4504  */
4505 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4506 {
4507         unsigned int i;
4508         u32 val;
4509
4510         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4511                 switch (ofs) {
4512                 case RCVLSC_MODE:
4513                 case DMAC_MODE:
4514                 case MBFREE_MODE:
4515                 case BUFMGR_MODE:
4516                 case MEMARB_MODE:
4517                         /* We can't enable/disable these bits of the
4518                          * 5705/5750, just say success.
4519                          */
4520                         return 0;
4521
4522                 default:
4523                         break;
4524                 };
4525         }
4526
4527         val = tr32(ofs);
4528         val &= ~enable_bit;
4529         tw32_f(ofs, val);
4530
4531         for (i = 0; i < MAX_WAIT_CNT; i++) {
4532                 udelay(100);
4533                 val = tr32(ofs);
4534                 if ((val & enable_bit) == 0)
4535                         break;
4536         }
4537
4538         if (i == MAX_WAIT_CNT && !silent) {
4539                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4540                        "ofs=%lx enable_bit=%x\n",
4541                        ofs, enable_bit);
4542                 return -ENODEV;
4543         }
4544
4545         return 0;
4546 }
4547
4548 /* tp->lock is held. */
4549 static int tg3_abort_hw(struct tg3 *tp, int silent)
4550 {
4551         int i, err;
4552
4553         tg3_disable_ints(tp);
4554
4555         tp->rx_mode &= ~RX_MODE_ENABLE;
4556         tw32_f(MAC_RX_MODE, tp->rx_mode);
4557         udelay(10);
4558
4559         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4560         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4561         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4562         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4563         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4564         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4565
4566         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4567         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4568         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4569         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4570         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4571         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4572         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4573
4574         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4575         tw32_f(MAC_MODE, tp->mac_mode);
4576         udelay(40);
4577
4578         tp->tx_mode &= ~TX_MODE_ENABLE;
4579         tw32_f(MAC_TX_MODE, tp->tx_mode);
4580
4581         for (i = 0; i < MAX_WAIT_CNT; i++) {
4582                 udelay(100);
4583                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4584                         break;
4585         }
4586         if (i >= MAX_WAIT_CNT) {
4587                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4588                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4589                        tp->dev->name, tr32(MAC_TX_MODE));
4590                 err |= -ENODEV;
4591         }
4592
4593         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4594         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4595         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4596
4597         tw32(FTQ_RESET, 0xffffffff);
4598         tw32(FTQ_RESET, 0x00000000);
4599
4600         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4601         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4602
4603         if (tp->hw_status)
4604                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4605         if (tp->hw_stats)
4606                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4607
4608         return err;
4609 }
4610
4611 /* tp->lock is held. */
4612 static int tg3_nvram_lock(struct tg3 *tp)
4613 {
4614         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4615                 int i;
4616
4617                 if (tp->nvram_lock_cnt == 0) {
4618                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4619                         for (i = 0; i < 8000; i++) {
4620                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4621                                         break;
4622                                 udelay(20);
4623                         }
4624                         if (i == 8000) {
4625                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4626                                 return -ENODEV;
4627                         }
4628                 }
4629                 tp->nvram_lock_cnt++;
4630         }
4631         return 0;
4632 }
4633
4634 /* tp->lock is held. */
4635 static void tg3_nvram_unlock(struct tg3 *tp)
4636 {
4637         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4638                 if (tp->nvram_lock_cnt > 0)
4639                         tp->nvram_lock_cnt--;
4640                 if (tp->nvram_lock_cnt == 0)
4641                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4642         }
4643 }
4644
4645 /* tp->lock is held. */
4646 static void tg3_enable_nvram_access(struct tg3 *tp)
4647 {
4648         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4649             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4650                 u32 nvaccess = tr32(NVRAM_ACCESS);
4651
4652                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4653         }
4654 }
4655
4656 /* tp->lock is held. */
4657 static void tg3_disable_nvram_access(struct tg3 *tp)
4658 {
4659         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4660             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4661                 u32 nvaccess = tr32(NVRAM_ACCESS);
4662
4663                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4664         }
4665 }
4666
4667 /* tp->lock is held. */
4668 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4669 {
4670         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4671                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4672
4673         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4674                 switch (kind) {
4675                 case RESET_KIND_INIT:
4676                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4677                                       DRV_STATE_START);
4678                         break;
4679
4680                 case RESET_KIND_SHUTDOWN:
4681                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4682                                       DRV_STATE_UNLOAD);
4683                         break;
4684
4685                 case RESET_KIND_SUSPEND:
4686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4687                                       DRV_STATE_SUSPEND);
4688                         break;
4689
4690                 default:
4691                         break;
4692                 };
4693         }
4694 }
4695
4696 /* tp->lock is held. */
4697 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4698 {
4699         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4700                 switch (kind) {
4701                 case RESET_KIND_INIT:
4702                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4703                                       DRV_STATE_START_DONE);
4704                         break;
4705
4706                 case RESET_KIND_SHUTDOWN:
4707                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4708                                       DRV_STATE_UNLOAD_DONE);
4709                         break;
4710
4711                 default:
4712                         break;
4713                 };
4714         }
4715 }
4716
4717 /* tp->lock is held. */
4718 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4719 {
4720         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4721                 switch (kind) {
4722                 case RESET_KIND_INIT:
4723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4724                                       DRV_STATE_START);
4725                         break;
4726
4727                 case RESET_KIND_SHUTDOWN:
4728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4729                                       DRV_STATE_UNLOAD);
4730                         break;
4731
4732                 case RESET_KIND_SUSPEND:
4733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4734                                       DRV_STATE_SUSPEND);
4735                         break;
4736
4737                 default:
4738                         break;
4739                 };
4740         }
4741 }
4742
4743 static int tg3_poll_fw(struct tg3 *tp)
4744 {
4745         int i;
4746         u32 val;
4747
4748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4749                 /* Wait up to 20ms for init done. */
4750                 for (i = 0; i < 200; i++) {
4751                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4752                                 return 0;
4753                         udelay(100);
4754                 }
4755                 return -ENODEV;
4756         }
4757
4758         /* Wait for firmware initialization to complete. */
4759         for (i = 0; i < 100000; i++) {
4760                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4761                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4762                         break;
4763                 udelay(10);
4764         }
4765
4766         /* Chip might not be fitted with firmware.  Some Sun onboard
4767          * parts are configured like that.  So don't signal the timeout
4768          * of the above loop as an error, but do report the lack of
4769          * running firmware once.
4770          */
4771         if (i >= 100000 &&
4772             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4773                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4774
4775                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4776                        tp->dev->name);
4777         }
4778
4779         return 0;
4780 }
4781
4782 static void tg3_stop_fw(struct tg3 *);
4783
4784 /* tp->lock is held. */
4785 static int tg3_chip_reset(struct tg3 *tp)
4786 {
4787         u32 val;
4788         void (*write_op)(struct tg3 *, u32, u32);
4789         int err;
4790
4791         tg3_nvram_lock(tp);
4792
4793         /* No matching tg3_nvram_unlock() after this because
4794          * chip reset below will undo the nvram lock.
4795          */
4796         tp->nvram_lock_cnt = 0;
4797
4798         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4801                 tw32(GRC_FASTBOOT_PC, 0);
4802
4803         /*
4804          * We must avoid the readl() that normally takes place.
4805          * It locks machines, causes machine checks, and other
4806          * fun things.  So, temporarily disable the 5701
4807          * hardware workaround, while we do the reset.
4808          */
4809         write_op = tp->write32;
4810         if (write_op == tg3_write_flush_reg32)
4811                 tp->write32 = tg3_write32;
4812
4813         /* do the reset */
4814         val = GRC_MISC_CFG_CORECLK_RESET;
4815
4816         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4817                 if (tr32(0x7e2c) == 0x60) {
4818                         tw32(0x7e2c, 0x20);
4819                 }
4820                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4821                         tw32(GRC_MISC_CFG, (1 << 29));
4822                         val |= (1 << 29);
4823                 }
4824         }
4825
4826         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4827                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4828                 tw32(GRC_VCPU_EXT_CTRL,
4829                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4830         }
4831
4832         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4833                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4834         tw32(GRC_MISC_CFG, val);
4835
4836         /* restore 5701 hardware bug workaround write method */
4837         tp->write32 = write_op;
4838
4839         /* Unfortunately, we have to delay before the PCI read back.
4840          * Some 575X chips even will not respond to a PCI cfg access
4841          * when the reset command is given to the chip.
4842          *
4843          * How do these hardware designers expect things to work
4844          * properly if the PCI write is posted for a long period
4845          * of time?  It is always necessary to have some method by
4846          * which a register read back can occur to push the write
4847          * out which does the reset.
4848          *
4849          * For most tg3 variants the trick below was working.
4850          * Ho hum...
4851          */
4852         udelay(120);
4853
4854         /* Flush PCI posted writes.  The normal MMIO registers
4855          * are inaccessible at this time so this is the only
4856          * way to make this reliably (actually, this is no longer
4857          * the case, see above).  I tried to use indirect
4858          * register read/write but this upset some 5701 variants.
4859          */
4860         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4861
4862         udelay(120);
4863
4864         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4865                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4866                         int i;
4867                         u32 cfg_val;
4868
4869                         /* Wait for link training to complete.  */
4870                         for (i = 0; i < 5000; i++)
4871                                 udelay(100);
4872
4873                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4874                         pci_write_config_dword(tp->pdev, 0xc4,
4875                                                cfg_val | (1 << 15));
4876                 }
4877                 /* Set PCIE max payload size and clear error status.  */
4878                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4879         }
4880
4881         /* Re-enable indirect register accesses. */
4882         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4883                                tp->misc_host_ctrl);
4884
4885         /* Set MAX PCI retry to zero. */
4886         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4887         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4888             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4889                 val |= PCISTATE_RETRY_SAME_DMA;
4890         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4891
4892         pci_restore_state(tp->pdev);
4893
4894         /* Make sure PCI-X relaxed ordering bit is clear. */
4895         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4896         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4897         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4898
4899         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4900                 u32 val;
4901
4902                 /* Chip reset on 5780 will reset MSI enable bit,
4903                  * so need to restore it.
4904                  */
4905                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4906                         u16 ctrl;
4907
4908                         pci_read_config_word(tp->pdev,
4909                                              tp->msi_cap + PCI_MSI_FLAGS,
4910                                              &ctrl);
4911                         pci_write_config_word(tp->pdev,
4912                                               tp->msi_cap + PCI_MSI_FLAGS,
4913                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4914                         val = tr32(MSGINT_MODE);
4915                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4916                 }
4917
4918                 val = tr32(MEMARB_MODE);
4919                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4920
4921         } else
4922                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4923
4924         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4925                 tg3_stop_fw(tp);
4926                 tw32(0x5000, 0x400);
4927         }
4928
4929         tw32(GRC_MODE, tp->grc_mode);
4930
4931         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4932                 u32 val = tr32(0xc4);
4933
4934                 tw32(0xc4, val | (1 << 15));
4935         }
4936
4937         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4939                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4940                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4941                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4942                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4943         }
4944
4945         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4946                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4947                 tw32_f(MAC_MODE, tp->mac_mode);
4948         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4949                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4950                 tw32_f(MAC_MODE, tp->mac_mode);
4951         } else
4952                 tw32_f(MAC_MODE, 0);
4953         udelay(40);
4954
4955         err = tg3_poll_fw(tp);
4956         if (err)
4957                 return err;
4958
4959         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4960             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4961                 u32 val = tr32(0x7c00);
4962
4963                 tw32(0x7c00, val | (1 << 25));
4964         }
4965
4966         /* Reprobe ASF enable state.  */
4967         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4968         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4969         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4970         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4971                 u32 nic_cfg;
4972
4973                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4974                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4975                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4976                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4977                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4978                 }
4979         }
4980
4981         return 0;
4982 }
4983
4984 /* tp->lock is held. */
4985 static void tg3_stop_fw(struct tg3 *tp)
4986 {
4987         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4988                 u32 val;
4989                 int i;
4990
4991                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4992                 val = tr32(GRC_RX_CPU_EVENT);
4993                 val |= (1 << 14);
4994                 tw32(GRC_RX_CPU_EVENT, val);
4995
4996                 /* Wait for RX cpu to ACK the event.  */
4997                 for (i = 0; i < 100; i++) {
4998                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4999                                 break;
5000                         udelay(1);
5001                 }
5002         }
5003 }
5004
5005 /* tp->lock is held. */
5006 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5007 {
5008         int err;
5009
5010         tg3_stop_fw(tp);
5011
5012         tg3_write_sig_pre_reset(tp, kind);
5013
5014         tg3_abort_hw(tp, silent);
5015         err = tg3_chip_reset(tp);
5016
5017         tg3_write_sig_legacy(tp, kind);
5018         tg3_write_sig_post_reset(tp, kind);
5019
5020         if (err)
5021                 return err;
5022
5023         return 0;
5024 }
5025
5026 #define TG3_FW_RELEASE_MAJOR    0x0
5027 #define TG3_FW_RELASE_MINOR     0x0
5028 #define TG3_FW_RELEASE_FIX      0x0
5029 #define TG3_FW_START_ADDR       0x08000000
5030 #define TG3_FW_TEXT_ADDR        0x08000000
5031 #define TG3_FW_TEXT_LEN         0x9c0
5032 #define TG3_FW_RODATA_ADDR      0x080009c0
5033 #define TG3_FW_RODATA_LEN       0x60
5034 #define TG3_FW_DATA_ADDR        0x08000a40
5035 #define TG3_FW_DATA_LEN         0x20
5036 #define TG3_FW_SBSS_ADDR        0x08000a60
5037 #define TG3_FW_SBSS_LEN         0xc
5038 #define TG3_FW_BSS_ADDR         0x08000a70
5039 #define TG3_FW_BSS_LEN          0x10
5040
5041 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5042         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5043         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5044         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5045         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5046         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5047         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5048         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5049         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5050         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5051         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5052         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5053         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5054         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5055         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5056         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5057         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5058         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5059         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5060         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5061         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5062         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5063         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5064         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5065         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5066         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5067         0, 0, 0, 0, 0, 0,
5068         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5072         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5073         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5074         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5075         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5077         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5078         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5079         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5080         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5081         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5082         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5083         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5084         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5085         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5086         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5087         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5088         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5089         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5090         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5091         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5092         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5093         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5094         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5095         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5096         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5097         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5098         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5099         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5100         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5101         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5102         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5103         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5104         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5105         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5106         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5107         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5108         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5109         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5110         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5111         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5112         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5113         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5114         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5115         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5116         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5117         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5118         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5119         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5120         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5121         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5122         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5123         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5124         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5125         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5126         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5127         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5128         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5129         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5130         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5131         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5132         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5133 };
5134
5135 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5136         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5137         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5139         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5140         0x00000000
5141 };
5142
5143 #if 0 /* All zeros, don't eat up space with it. */
5144 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5145         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5146         0x00000000, 0x00000000, 0x00000000, 0x00000000
5147 };
5148 #endif
5149
5150 #define RX_CPU_SCRATCH_BASE     0x30000
5151 #define RX_CPU_SCRATCH_SIZE     0x04000
5152 #define TX_CPU_SCRATCH_BASE     0x34000
5153 #define TX_CPU_SCRATCH_SIZE     0x04000
5154
5155 /* tp->lock is held. */
5156 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5157 {
5158         int i;
5159
5160         BUG_ON(offset == TX_CPU_BASE &&
5161             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5162
5163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5164                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5165
5166                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5167                 return 0;
5168         }
5169         if (offset == RX_CPU_BASE) {
5170                 for (i = 0; i < 10000; i++) {
5171                         tw32(offset + CPU_STATE, 0xffffffff);
5172                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5173                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5174                                 break;
5175                 }
5176
5177                 tw32(offset + CPU_STATE, 0xffffffff);
5178                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5179                 udelay(10);
5180         } else {
5181                 for (i = 0; i < 10000; i++) {
5182                         tw32(offset + CPU_STATE, 0xffffffff);
5183                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5184                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5185                                 break;
5186                 }
5187         }
5188
5189         if (i >= 10000) {
5190                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5191                        "and %s CPU\n",
5192                        tp->dev->name,
5193                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5194                 return -ENODEV;
5195         }
5196
5197         /* Clear firmware's nvram arbitration. */
5198         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5199                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5200         return 0;
5201 }
5202
5203 struct fw_info {
5204         unsigned int text_base;
5205         unsigned int text_len;
5206         const u32 *text_data;
5207         unsigned int rodata_base;
5208         unsigned int rodata_len;
5209         const u32 *rodata_data;
5210         unsigned int data_base;
5211         unsigned int data_len;
5212         const u32 *data_data;
5213 };
5214
5215 /* tp->lock is held. */
5216 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5217                                  int cpu_scratch_size, struct fw_info *info)
5218 {
5219         int err, lock_err, i;
5220         void (*write_op)(struct tg3 *, u32, u32);
5221
5222         if (cpu_base == TX_CPU_BASE &&
5223             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5224                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5225                        "TX cpu firmware on %s which is 5705.\n",
5226                        tp->dev->name);
5227                 return -EINVAL;
5228         }
5229
5230         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5231                 write_op = tg3_write_mem;
5232         else
5233                 write_op = tg3_write_indirect_reg32;
5234
5235         /* It is possible that bootcode is still loading at this point.
5236          * Get the nvram lock first before halting the cpu.
5237          */
5238         lock_err = tg3_nvram_lock(tp);
5239         err = tg3_halt_cpu(tp, cpu_base);
5240         if (!lock_err)
5241                 tg3_nvram_unlock(tp);
5242         if (err)
5243                 goto out;
5244
5245         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5246                 write_op(tp, cpu_scratch_base + i, 0);
5247         tw32(cpu_base + CPU_STATE, 0xffffffff);
5248         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5249         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5250                 write_op(tp, (cpu_scratch_base +
5251                               (info->text_base & 0xffff) +
5252                               (i * sizeof(u32))),
5253                          (info->text_data ?
5254                           info->text_data[i] : 0));
5255         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5256                 write_op(tp, (cpu_scratch_base +
5257                               (info->rodata_base & 0xffff) +
5258                               (i * sizeof(u32))),
5259                          (info->rodata_data ?
5260                           info->rodata_data[i] : 0));
5261         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5262                 write_op(tp, (cpu_scratch_base +
5263                               (info->data_base & 0xffff) +
5264                               (i * sizeof(u32))),
5265                          (info->data_data ?
5266                           info->data_data[i] : 0));
5267
5268         err = 0;
5269
5270 out:
5271         return err;
5272 }
5273
5274 /* tp->lock is held. */
5275 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5276 {
5277         struct fw_info info;
5278         int err, i;
5279
5280         info.text_base = TG3_FW_TEXT_ADDR;
5281         info.text_len = TG3_FW_TEXT_LEN;
5282         info.text_data = &tg3FwText[0];
5283         info.rodata_base = TG3_FW_RODATA_ADDR;
5284         info.rodata_len = TG3_FW_RODATA_LEN;
5285         info.rodata_data = &tg3FwRodata[0];
5286         info.data_base = TG3_FW_DATA_ADDR;
5287         info.data_len = TG3_FW_DATA_LEN;
5288         info.data_data = NULL;
5289
5290         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5291                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5292                                     &info);
5293         if (err)
5294                 return err;
5295
5296         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5297                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5298                                     &info);
5299         if (err)
5300                 return err;
5301
5302         /* Now startup only the RX cpu. */
5303         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5304         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5305
5306         for (i = 0; i < 5; i++) {
5307                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5308                         break;
5309                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5310                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5311                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5312                 udelay(1000);
5313         }
5314         if (i >= 5) {
5315                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5316                        "to set RX CPU PC, is %08x should be %08x\n",
5317                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5318                        TG3_FW_TEXT_ADDR);
5319                 return -ENODEV;
5320         }
5321         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5322         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5323
5324         return 0;
5325 }
5326
5327
5328 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5329 #define TG3_TSO_FW_RELASE_MINOR         0x6
5330 #define TG3_TSO_FW_RELEASE_FIX          0x0
5331 #define TG3_TSO_FW_START_ADDR           0x08000000
5332 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5333 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5334 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5335 #define TG3_TSO_FW_RODATA_LEN           0x60
5336 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5337 #define TG3_TSO_FW_DATA_LEN             0x30
5338 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5339 #define TG3_TSO_FW_SBSS_LEN             0x2c
5340 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5341 #define TG3_TSO_FW_BSS_LEN              0x894
5342
5343 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5344         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5345         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5346         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5347         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5348         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5349         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5350         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5351         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5352         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5353         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5354         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5355         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5356         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5357         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5358         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5359         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5360         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5361         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5362         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5363         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5364         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5365         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5366         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5367         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5368         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5369         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5370         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5371         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5372         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5373         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5374         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5375         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5376         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5377         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5378         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5379         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5380         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5381         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5382         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5383         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5384         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5385         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5386         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5387         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5388         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5389         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5390         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5391         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5392         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5393         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5394         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5395         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5396         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5397         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5398         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5399         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5400         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5401         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5402         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5403         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5404         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5405         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5406         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5407         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5408         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5409         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5410         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5411         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5412         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5413         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5414         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5415         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5416         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5417         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5418         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5419         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5420         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5421         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5422         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5423         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5424         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5425         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5426         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5427         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5428         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5429         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5430         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5431         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5432         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5433         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5434         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5435         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5436         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5437         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5438         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5439         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5440         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5441         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5442         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5443         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5444         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5445         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5446         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5447         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5448         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5449         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5450         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5451         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5452         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5453         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5454         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5455         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5456         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5457         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5458         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5459         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5460         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5461         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5462         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5463         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5464         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5465         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5466         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5467         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5468         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5469         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5470         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5471         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5472         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5473         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5474         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5475         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5476         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5477         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5478         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5479         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5480         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5481         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5482         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5483         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5484         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5485         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5486         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5487         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5488         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5489         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5490         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5491         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5492         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5493         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5494         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5495         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5496         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5497         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5498         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5499         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5500         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5501         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5502         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5503         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5504         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5505         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5506         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5507         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5508         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5509         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5510         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5511         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5512         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5513         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5514         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5515         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5516         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5517         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5518         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5519         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5520         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5521         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5522         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5523         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5524         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5525         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5526         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5527         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5528         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5529         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5530         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5531         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5532         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5533         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5534         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5535         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5536         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5537         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5538         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5539         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5540         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5541         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5542         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5543         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5544         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5545         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5546         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5547         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5548         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5549         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5550         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5551         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5552         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5553         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5554         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5555         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5556         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5557         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5558         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5559         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5560         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5561         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5562         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5563         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5564         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5565         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5566         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5567         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5568         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5569         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5570         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5571         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5572         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5573         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5574         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5575         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5576         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5577         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5578         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5579         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5580         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5581         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5582         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5583         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5584         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5585         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5586         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5587         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5588         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5589         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5590         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5591         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5592         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5593         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5594         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5595         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5596         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5597         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5598         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5599         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5600         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5601         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5602         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5603         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5604         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5605         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5606         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5607         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5608         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5609         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5610         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5611         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5612         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5613         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5614         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5615         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5616         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5617         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5618         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5619         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5620         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5621         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5622         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5623         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5624         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5625         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5626         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5627         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5628 };
5629
5630 static const u32 tg3TsoFwRodata[] = {
5631         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5632         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5633         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5634         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5635         0x00000000,
5636 };
5637
5638 static const u32 tg3TsoFwData[] = {
5639         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5640         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5641         0x00000000,
5642 };
5643
5644 /* 5705 needs a special version of the TSO firmware.  */
5645 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5646 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5647 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5648 #define TG3_TSO5_FW_START_ADDR          0x00010000
5649 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5650 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5651 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5652 #define TG3_TSO5_FW_RODATA_LEN          0x50
5653 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5654 #define TG3_TSO5_FW_DATA_LEN            0x20
5655 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5656 #define TG3_TSO5_FW_SBSS_LEN            0x28
5657 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5658 #define TG3_TSO5_FW_BSS_LEN             0x88
5659
5660 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5661         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5662         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5663         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5664         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5665         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5666         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5667         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5668         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5669         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5670         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5671         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5672         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5673         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5674         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5675         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5676         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5677         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5678         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5679         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5680         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5681         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5682         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5683         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5684         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5685         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5686         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5687         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5688         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5689         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5690         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5691         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5692         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5693         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5694         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5695         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5696         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5697         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5698         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5699         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5700         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5701         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5702         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5703         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5704         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5705         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5706         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5707         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5708         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5709         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5710         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5711         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5712         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5713         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5714         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5715         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5716         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5717         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5718         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5719         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5720         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5721         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5722         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5723         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5724         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5725         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5726         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5727         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5728         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5729         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5730         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5731         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5732         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5733         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5734         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5735         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5736         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5737         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5738         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5739         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5740         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5741         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5742         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5743         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5744         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5745         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5746         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5747         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5748         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5749         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5750         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5751         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5752         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5753         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5754         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5755         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5756         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5757         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5758         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5759         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5760         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5761         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5762         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5763         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5764         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5765         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5766         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5767         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5768         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5769         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5770         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5771         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5772         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5773         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5774         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5775         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5776         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5777         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5778         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5779         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5780         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5781         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5782         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5783         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5784         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5785         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5786         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5787         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5788         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5789         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5790         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5791         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5792         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5793         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5794         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5795         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5796         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5797         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5798         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5799         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5800         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5801         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5802         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5803         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5804         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5805         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5806         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5807         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5808         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5809         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5810         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5811         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5812         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5813         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5814         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5815         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5816         0x00000000, 0x00000000, 0x00000000,
5817 };
5818
5819 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5820         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5821         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5822         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5823         0x00000000, 0x00000000, 0x00000000,
5824 };
5825
5826 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5827         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5828         0x00000000, 0x00000000, 0x00000000,
5829 };
5830
5831 /* tp->lock is held. */
5832 static int tg3_load_tso_firmware(struct tg3 *tp)
5833 {
5834         struct fw_info info;
5835         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5836         int err, i;
5837
5838         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5839                 return 0;
5840
5841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5842                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5843                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5844                 info.text_data = &tg3Tso5FwText[0];
5845                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5846                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5847                 info.rodata_data = &tg3Tso5FwRodata[0];
5848                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5849                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5850                 info.data_data = &tg3Tso5FwData[0];
5851                 cpu_base = RX_CPU_BASE;
5852                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5853                 cpu_scratch_size = (info.text_len +
5854                                     info.rodata_len +
5855                                     info.data_len +
5856                                     TG3_TSO5_FW_SBSS_LEN +
5857                                     TG3_TSO5_FW_BSS_LEN);
5858         } else {
5859                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5860                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5861                 info.text_data = &tg3TsoFwText[0];
5862                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5863                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5864                 info.rodata_data = &tg3TsoFwRodata[0];
5865                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5866                 info.data_len = TG3_TSO_FW_DATA_LEN;
5867                 info.data_data = &tg3TsoFwData[0];
5868                 cpu_base = TX_CPU_BASE;
5869                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5870                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5871         }
5872
5873         err = tg3_load_firmware_cpu(tp, cpu_base,
5874                                     cpu_scratch_base, cpu_scratch_size,
5875                                     &info);
5876         if (err)
5877                 return err;
5878
5879         /* Now startup the cpu. */
5880         tw32(cpu_base + CPU_STATE, 0xffffffff);
5881         tw32_f(cpu_base + CPU_PC,    info.text_base);
5882
5883         for (i = 0; i < 5; i++) {
5884                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5885                         break;
5886                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5887                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5888                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5889                 udelay(1000);
5890         }
5891         if (i >= 5) {
5892                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5893                        "to set CPU PC, is %08x should be %08x\n",
5894                        tp->dev->name, tr32(cpu_base + CPU_PC),
5895                        info.text_base);
5896                 return -ENODEV;
5897         }
5898         tw32(cpu_base + CPU_STATE, 0xffffffff);
5899         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5900         return 0;
5901 }
5902
5903
5904 /* tp->lock is held. */
5905 static void __tg3_set_mac_addr(struct tg3 *tp)
5906 {
5907         u32 addr_high, addr_low;
5908         int i;
5909
5910         addr_high = ((tp->dev->dev_addr[0] << 8) |
5911                      tp->dev->dev_addr[1]);
5912         addr_low = ((tp->dev->dev_addr[2] << 24) |
5913                     (tp->dev->dev_addr[3] << 16) |
5914                     (tp->dev->dev_addr[4] <<  8) |
5915                     (tp->dev->dev_addr[5] <<  0));
5916         for (i = 0; i < 4; i++) {
5917                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5918                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5919         }
5920
5921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5923                 for (i = 0; i < 12; i++) {
5924                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5925                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5926                 }
5927         }
5928
5929         addr_high = (tp->dev->dev_addr[0] +
5930                      tp->dev->dev_addr[1] +
5931                      tp->dev->dev_addr[2] +
5932                      tp->dev->dev_addr[3] +
5933                      tp->dev->dev_addr[4] +
5934                      tp->dev->dev_addr[5]) &
5935                 TX_BACKOFF_SEED_MASK;
5936         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5937 }
5938
5939 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5940 {
5941         struct tg3 *tp = netdev_priv(dev);
5942         struct sockaddr *addr = p;
5943         int err = 0;
5944
5945         if (!is_valid_ether_addr(addr->sa_data))
5946                 return -EINVAL;
5947
5948         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5949
5950         if (!netif_running(dev))
5951                 return 0;
5952
5953         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5954                 /* Reset chip so that ASF can re-init any MAC addresses it
5955                  * needs.
5956                  */
5957                 tg3_netif_stop(tp);
5958                 tg3_full_lock(tp, 1);
5959
5960                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5961                 err = tg3_restart_hw(tp, 0);
5962                 if (!err)
5963                         tg3_netif_start(tp);
5964                 tg3_full_unlock(tp);
5965         } else {
5966                 spin_lock_bh(&tp->lock);
5967                 __tg3_set_mac_addr(tp);
5968                 spin_unlock_bh(&tp->lock);
5969         }
5970
5971         return err;
5972 }
5973
5974 /* tp->lock is held. */
5975 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5976                            dma_addr_t mapping, u32 maxlen_flags,
5977                            u32 nic_addr)
5978 {
5979         tg3_write_mem(tp,
5980                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5981                       ((u64) mapping >> 32));
5982         tg3_write_mem(tp,
5983                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5984                       ((u64) mapping & 0xffffffff));
5985         tg3_write_mem(tp,
5986                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5987                        maxlen_flags);
5988
5989         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5990                 tg3_write_mem(tp,
5991                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5992                               nic_addr);
5993 }
5994
5995 static void __tg3_set_rx_mode(struct net_device *);
5996 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5997 {
5998         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5999         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6000         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6001         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6002         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6003                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6004                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6005         }
6006         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6007         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6008         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6009                 u32 val = ec->stats_block_coalesce_usecs;
6010
6011                 if (!netif_carrier_ok(tp->dev))
6012                         val = 0;
6013
6014                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6015         }
6016 }
6017
6018 /* tp->lock is held. */
6019 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6020 {
6021         u32 val, rdmac_mode;
6022         int i, err, limit;
6023
6024         tg3_disable_ints(tp);
6025
6026         tg3_stop_fw(tp);
6027
6028         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6029
6030         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6031                 tg3_abort_hw(tp, 1);
6032         }
6033
6034         if (reset_phy)
6035                 tg3_phy_reset(tp);
6036
6037         err = tg3_chip_reset(tp);
6038         if (err)
6039                 return err;
6040
6041         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6042
6043         /* This works around an issue with Athlon chipsets on
6044          * B3 tigon3 silicon.  This bit has no effect on any
6045          * other revision.  But do not set this on PCI Express
6046          * chips.
6047          */
6048         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6049                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6050         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6051
6052         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6053             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6054                 val = tr32(TG3PCI_PCISTATE);
6055                 val |= PCISTATE_RETRY_SAME_DMA;
6056                 tw32(TG3PCI_PCISTATE, val);
6057         }
6058
6059         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6060                 /* Enable some hw fixes.  */
6061                 val = tr32(TG3PCI_MSI_DATA);
6062                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6063                 tw32(TG3PCI_MSI_DATA, val);
6064         }
6065
6066         /* Descriptor ring init may make accesses to the
6067          * NIC SRAM area to setup the TX descriptors, so we
6068          * can only do this after the hardware has been
6069          * successfully reset.
6070          */
6071         err = tg3_init_rings(tp);
6072         if (err)
6073                 return err;
6074
6075         /* This value is determined during the probe time DMA
6076          * engine test, tg3_test_dma.
6077          */
6078         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6079
6080         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6081                           GRC_MODE_4X_NIC_SEND_RINGS |
6082                           GRC_MODE_NO_TX_PHDR_CSUM |
6083                           GRC_MODE_NO_RX_PHDR_CSUM);
6084         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6085
6086         /* Pseudo-header checksum is done by hardware logic and not
6087          * the offload processers, so make the chip do the pseudo-
6088          * header checksums on receive.  For transmit it is more
6089          * convenient to do the pseudo-header checksum in software
6090          * as Linux does that on transmit for us in all cases.
6091          */
6092         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6093
6094         tw32(GRC_MODE,
6095              tp->grc_mode |
6096              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6097
6098         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6099         val = tr32(GRC_MISC_CFG);
6100         val &= ~0xff;
6101         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6102         tw32(GRC_MISC_CFG, val);
6103
6104         /* Initialize MBUF/DESC pool. */
6105         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6106                 /* Do nothing.  */
6107         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6108                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6110                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6111                 else
6112                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6113                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6114                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6115         }
6116         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6117                 int fw_len;
6118
6119                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6120                           TG3_TSO5_FW_RODATA_LEN +
6121                           TG3_TSO5_FW_DATA_LEN +
6122                           TG3_TSO5_FW_SBSS_LEN +
6123                           TG3_TSO5_FW_BSS_LEN);
6124                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6125                 tw32(BUFMGR_MB_POOL_ADDR,
6126                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6127                 tw32(BUFMGR_MB_POOL_SIZE,
6128                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6129         }
6130
6131         if (tp->dev->mtu <= ETH_DATA_LEN) {
6132                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6133                      tp->bufmgr_config.mbuf_read_dma_low_water);
6134                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6135                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6136                 tw32(BUFMGR_MB_HIGH_WATER,
6137                      tp->bufmgr_config.mbuf_high_water);
6138         } else {
6139                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6140                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6141                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6142                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6143                 tw32(BUFMGR_MB_HIGH_WATER,
6144                      tp->bufmgr_config.mbuf_high_water_jumbo);
6145         }
6146         tw32(BUFMGR_DMA_LOW_WATER,
6147              tp->bufmgr_config.dma_low_water);
6148         tw32(BUFMGR_DMA_HIGH_WATER,
6149              tp->bufmgr_config.dma_high_water);
6150
6151         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6152         for (i = 0; i < 2000; i++) {
6153                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6154                         break;
6155                 udelay(10);
6156         }
6157         if (i >= 2000) {
6158                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6159                        tp->dev->name);
6160                 return -ENODEV;
6161         }
6162
6163         /* Setup replenish threshold. */
6164         val = tp->rx_pending / 8;
6165         if (val == 0)
6166                 val = 1;
6167         else if (val > tp->rx_std_max_post)
6168                 val = tp->rx_std_max_post;
6169         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6170                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6171                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6172
6173                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6174                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6175         }
6176
6177         tw32(RCVBDI_STD_THRESH, val);
6178
6179         /* Initialize TG3_BDINFO's at:
6180          *  RCVDBDI_STD_BD:     standard eth size rx ring
6181          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6182          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6183          *
6184          * like so:
6185          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6186          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6187          *                              ring attribute flags
6188          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6189          *
6190          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6191          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6192          *
6193          * The size of each ring is fixed in the firmware, but the location is
6194          * configurable.
6195          */
6196         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6197              ((u64) tp->rx_std_mapping >> 32));
6198         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6199              ((u64) tp->rx_std_mapping & 0xffffffff));
6200         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6201              NIC_SRAM_RX_BUFFER_DESC);
6202
6203         /* Don't even try to program the JUMBO/MINI buffer descriptor
6204          * configs on 5705.
6205          */
6206         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6207                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6208                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6209         } else {
6210                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6211                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6212
6213                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6214                      BDINFO_FLAGS_DISABLED);
6215
6216                 /* Setup replenish threshold. */
6217                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6218
6219                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6220                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6221                              ((u64) tp->rx_jumbo_mapping >> 32));
6222                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6223                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6224                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6225                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6226                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6227                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6228                 } else {
6229                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6230                              BDINFO_FLAGS_DISABLED);
6231                 }
6232
6233         }
6234
6235         /* There is only one send ring on 5705/5750, no need to explicitly
6236          * disable the others.
6237          */
6238         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6239                 /* Clear out send RCB ring in SRAM. */
6240                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6241                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6242                                       BDINFO_FLAGS_DISABLED);
6243         }
6244
6245         tp->tx_prod = 0;
6246         tp->tx_cons = 0;
6247         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6248         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6249
6250         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6251                        tp->tx_desc_mapping,
6252                        (TG3_TX_RING_SIZE <<
6253                         BDINFO_FLAGS_MAXLEN_SHIFT),
6254                        NIC_SRAM_TX_BUFFER_DESC);
6255
6256         /* There is only one receive return ring on 5705/5750, no need
6257          * to explicitly disable the others.
6258          */
6259         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6260                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6261                      i += TG3_BDINFO_SIZE) {
6262                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6263                                       BDINFO_FLAGS_DISABLED);
6264                 }
6265         }
6266
6267         tp->rx_rcb_ptr = 0;
6268         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6269
6270         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6271                        tp->rx_rcb_mapping,
6272                        (TG3_RX_RCB_RING_SIZE(tp) <<
6273                         BDINFO_FLAGS_MAXLEN_SHIFT),
6274                        0);
6275
6276         tp->rx_std_ptr = tp->rx_pending;
6277         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6278                      tp->rx_std_ptr);
6279
6280         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6281                                                 tp->rx_jumbo_pending : 0;
6282         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6283                      tp->rx_jumbo_ptr);
6284
6285         /* Initialize MAC address and backoff seed. */
6286         __tg3_set_mac_addr(tp);
6287
6288         /* MTU + ethernet header + FCS + optional VLAN tag */
6289         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6290
6291         /* The slot time is changed by tg3_setup_phy if we
6292          * run at gigabit with half duplex.
6293          */
6294         tw32(MAC_TX_LENGTHS,
6295              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6296              (6 << TX_LENGTHS_IPG_SHIFT) |
6297              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6298
6299         /* Receive rules. */
6300         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6301         tw32(RCVLPC_CONFIG, 0x0181);
6302
6303         /* Calculate RDMAC_MODE setting early, we need it to determine
6304          * the RCVLPC_STATE_ENABLE mask.
6305          */
6306         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6307                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6308                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6309                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6310                       RDMAC_MODE_LNGREAD_ENAB);
6311         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6312                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6313
6314         /* If statement applies to 5705 and 5750 PCI devices only */
6315         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6316              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6317             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6318                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6319                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6320                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6321                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6322                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6323                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6324                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6325                 }
6326         }
6327
6328         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6329                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6330
6331         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6332                 rdmac_mode |= (1 << 27);
6333
6334         /* Receive/send statistics. */
6335         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6336                 val = tr32(RCVLPC_STATS_ENABLE);
6337                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6338                 tw32(RCVLPC_STATS_ENABLE, val);
6339         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6340                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6341                 val = tr32(RCVLPC_STATS_ENABLE);
6342                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6343                 tw32(RCVLPC_STATS_ENABLE, val);
6344         } else {
6345                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6346         }
6347         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6348         tw32(SNDDATAI_STATSENAB, 0xffffff);
6349         tw32(SNDDATAI_STATSCTRL,
6350              (SNDDATAI_SCTRL_ENABLE |
6351               SNDDATAI_SCTRL_FASTUPD));
6352
6353         /* Setup host coalescing engine. */
6354         tw32(HOSTCC_MODE, 0);
6355         for (i = 0; i < 2000; i++) {
6356                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6357                         break;
6358                 udelay(10);
6359         }
6360
6361         __tg3_set_coalesce(tp, &tp->coal);
6362
6363         /* set status block DMA address */
6364         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6365              ((u64) tp->status_mapping >> 32));
6366         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6367              ((u64) tp->status_mapping & 0xffffffff));
6368
6369         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6370                 /* Status/statistics block address.  See tg3_timer,
6371                  * the tg3_periodic_fetch_stats call there, and
6372                  * tg3_get_stats to see how this works for 5705/5750 chips.
6373                  */
6374                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6375                      ((u64) tp->stats_mapping >> 32));
6376                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6377                      ((u64) tp->stats_mapping & 0xffffffff));
6378                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6379                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6380         }
6381
6382         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6383
6384         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6385         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6386         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6387                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6388
6389         /* Clear statistics/status block in chip, and status block in ram. */
6390         for (i = NIC_SRAM_STATS_BLK;
6391              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6392              i += sizeof(u32)) {
6393                 tg3_write_mem(tp, i, 0);
6394                 udelay(40);
6395         }
6396         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6397
6398         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6399                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6400                 /* reset to prevent losing 1st rx packet intermittently */
6401                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6402                 udelay(10);
6403         }
6404
6405         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6406                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6407         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6408         udelay(40);
6409
6410         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6411          * If TG3_FLG2_IS_NIC is zero, we should read the
6412          * register to preserve the GPIO settings for LOMs. The GPIOs,
6413          * whether used as inputs or outputs, are set by boot code after
6414          * reset.
6415          */
6416         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6417                 u32 gpio_mask;
6418
6419                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6420                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6421                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6422
6423                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6424                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6425                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6426
6427                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6428                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6429
6430                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6431
6432                 /* GPIO1 must be driven high for eeprom write protect */
6433                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6434                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6435                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6436         }
6437         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6438         udelay(100);
6439
6440         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6441         tp->last_tag = 0;
6442
6443         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6444                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6445                 udelay(40);
6446         }
6447
6448         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6449                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6450                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6451                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6452                WDMAC_MODE_LNGREAD_ENAB);
6453
6454         /* If statement applies to 5705 and 5750 PCI devices only */
6455         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6456              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6457             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6458                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6459                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6460                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6461                         /* nothing */
6462                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6463                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6464                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6465                         val |= WDMAC_MODE_RX_ACCEL;
6466                 }
6467         }
6468
6469         /* Enable host coalescing bug fix */
6470         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6471             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6472                 val |= (1 << 29);
6473
6474         tw32_f(WDMAC_MODE, val);
6475         udelay(40);
6476
6477         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6478                 val = tr32(TG3PCI_X_CAPS);
6479                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6480                         val &= ~PCIX_CAPS_BURST_MASK;
6481                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6482                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6483                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6484                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6485                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6486                                 val |= (tp->split_mode_max_reqs <<
6487                                         PCIX_CAPS_SPLIT_SHIFT);
6488                 }
6489                 tw32(TG3PCI_X_CAPS, val);
6490         }
6491
6492         tw32_f(RDMAC_MODE, rdmac_mode);
6493         udelay(40);
6494
6495         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6496         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6497                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6498         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6499         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6500         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6501         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6502         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6503         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6504                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6505         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6506         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6507
6508         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6509                 err = tg3_load_5701_a0_firmware_fix(tp);
6510                 if (err)
6511                         return err;
6512         }
6513
6514         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6515                 err = tg3_load_tso_firmware(tp);
6516                 if (err)
6517                         return err;
6518         }
6519
6520         tp->tx_mode = TX_MODE_ENABLE;
6521         tw32_f(MAC_TX_MODE, tp->tx_mode);
6522         udelay(100);
6523
6524         tp->rx_mode = RX_MODE_ENABLE;
6525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6526                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6527
6528         tw32_f(MAC_RX_MODE, tp->rx_mode);
6529         udelay(10);
6530
6531         if (tp->link_config.phy_is_low_power) {
6532                 tp->link_config.phy_is_low_power = 0;
6533                 tp->link_config.speed = tp->link_config.orig_speed;
6534                 tp->link_config.duplex = tp->link_config.orig_duplex;
6535                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6536         }
6537
6538         tp->mi_mode = MAC_MI_MODE_BASE;
6539         tw32_f(MAC_MI_MODE, tp->mi_mode);
6540         udelay(80);
6541
6542         tw32(MAC_LED_CTRL, tp->led_ctrl);
6543
6544         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6545         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6546                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6547                 udelay(10);
6548         }
6549         tw32_f(MAC_RX_MODE, tp->rx_mode);
6550         udelay(10);
6551
6552         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6553                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6554                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6555                         /* Set drive transmission level to 1.2V  */
6556                         /* only if the signal pre-emphasis bit is not set  */
6557                         val = tr32(MAC_SERDES_CFG);
6558                         val &= 0xfffff000;
6559                         val |= 0x880;
6560                         tw32(MAC_SERDES_CFG, val);
6561                 }
6562                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6563                         tw32(MAC_SERDES_CFG, 0x616000);
6564         }
6565
6566         /* Prevent chip from dropping frames when flow control
6567          * is enabled.
6568          */
6569         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6570
6571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6572             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6573                 /* Use hardware link auto-negotiation */
6574                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6575         }
6576
6577         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6578             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6579                 u32 tmp;
6580
6581                 tmp = tr32(SERDES_RX_CTRL);
6582                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6583                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6584                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6585                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6586         }
6587
6588         err = tg3_setup_phy(tp, 0);
6589         if (err)
6590                 return err;
6591
6592         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6593             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6594                 u32 tmp;
6595
6596                 /* Clear CRC stats. */
6597                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6598                         tg3_writephy(tp, MII_TG3_TEST1,
6599                                      tmp | MII_TG3_TEST1_CRC_EN);
6600                         tg3_readphy(tp, 0x14, &tmp);
6601                 }
6602         }
6603
6604         __tg3_set_rx_mode(tp->dev);
6605
6606         /* Initialize receive rules. */
6607         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6608         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6609         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6610         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6611
6612         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6613             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6614                 limit = 8;
6615         else
6616                 limit = 16;
6617         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6618                 limit -= 4;
6619         switch (limit) {
6620         case 16:
6621                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6622         case 15:
6623                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6624         case 14:
6625                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6626         case 13:
6627                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6628         case 12:
6629                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6630         case 11:
6631                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6632         case 10:
6633                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6634         case 9:
6635                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6636         case 8:
6637                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6638         case 7:
6639                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6640         case 6:
6641                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6642         case 5:
6643                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6644         case 4:
6645                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6646         case 3:
6647                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6648         case 2:
6649         case 1:
6650
6651         default:
6652                 break;
6653         };
6654
6655         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6656
6657         return 0;
6658 }
6659
6660 /* Called at device open time to get the chip ready for
6661  * packet processing.  Invoked with tp->lock held.
6662  */
6663 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6664 {
6665         int err;
6666
6667         /* Force the chip into D0. */
6668         err = tg3_set_power_state(tp, PCI_D0);
6669         if (err)
6670                 goto out;
6671
6672         tg3_switch_clocks(tp);
6673
6674         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6675
6676         err = tg3_reset_hw(tp, reset_phy);
6677
6678 out:
6679         return err;
6680 }
6681
6682 #define TG3_STAT_ADD32(PSTAT, REG) \
6683 do {    u32 __val = tr32(REG); \
6684         (PSTAT)->low += __val; \
6685         if ((PSTAT)->low < __val) \
6686                 (PSTAT)->high += 1; \
6687 } while (0)
6688
6689 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6690 {
6691         struct tg3_hw_stats *sp = tp->hw_stats;
6692
6693         if (!netif_carrier_ok(tp->dev))
6694                 return;
6695
6696         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6697         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6698         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6699         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6700         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6701         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6702         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6703         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6704         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6705         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6706         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6707         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6708         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6709
6710         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6711         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6712         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6713         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6714         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6715         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6716         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6717         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6718         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6719         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6720         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6721         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6722         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6723         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6724
6725         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6726         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6727         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6728 }
6729
6730 static void tg3_timer(unsigned long __opaque)
6731 {
6732         struct tg3 *tp = (struct tg3 *) __opaque;
6733
6734         if (tp->irq_sync)
6735                 goto restart_timer;
6736
6737         spin_lock(&tp->lock);
6738
6739         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6740                 /* All of this garbage is because when using non-tagged
6741                  * IRQ status the mailbox/status_block protocol the chip
6742                  * uses with the cpu is race prone.
6743                  */
6744                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6745                         tw32(GRC_LOCAL_CTRL,
6746                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6747                 } else {
6748                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6749                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6750                 }
6751
6752                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6753                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6754                         spin_unlock(&tp->lock);
6755                         schedule_work(&tp->reset_task);
6756                         return;
6757                 }
6758         }
6759
6760         /* This part only runs once per second. */
6761         if (!--tp->timer_counter) {
6762                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6763                         tg3_periodic_fetch_stats(tp);
6764
6765                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6766                         u32 mac_stat;
6767                         int phy_event;
6768
6769                         mac_stat = tr32(MAC_STATUS);
6770
6771                         phy_event = 0;
6772                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6773                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6774                                         phy_event = 1;
6775                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6776                                 phy_event = 1;
6777
6778                         if (phy_event)
6779                                 tg3_setup_phy(tp, 0);
6780                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6781                         u32 mac_stat = tr32(MAC_STATUS);
6782                         int need_setup = 0;
6783
6784                         if (netif_carrier_ok(tp->dev) &&
6785                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6786                                 need_setup = 1;
6787                         }
6788                         if (! netif_carrier_ok(tp->dev) &&
6789                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6790                                          MAC_STATUS_SIGNAL_DET))) {
6791                                 need_setup = 1;
6792                         }
6793                         if (need_setup) {
6794                                 if (!tp->serdes_counter) {
6795                                         tw32_f(MAC_MODE,
6796                                              (tp->mac_mode &
6797                                               ~MAC_MODE_PORT_MODE_MASK));
6798                                         udelay(40);
6799                                         tw32_f(MAC_MODE, tp->mac_mode);
6800                                         udelay(40);
6801                                 }
6802                                 tg3_setup_phy(tp, 0);
6803                         }
6804                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6805                         tg3_serdes_parallel_detect(tp);
6806
6807                 tp->timer_counter = tp->timer_multiplier;
6808         }
6809
6810         /* Heartbeat is only sent once every 2 seconds.
6811          *
6812          * The heartbeat is to tell the ASF firmware that the host
6813          * driver is still alive.  In the event that the OS crashes,
6814          * ASF needs to reset the hardware to free up the FIFO space
6815          * that may be filled with rx packets destined for the host.
6816          * If the FIFO is full, ASF will no longer function properly.
6817          *
6818          * Unintended resets have been reported on real time kernels
6819          * where the timer doesn't run on time.  Netpoll will also have
6820          * same problem.
6821          *
6822          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6823          * to check the ring condition when the heartbeat is expiring
6824          * before doing the reset.  This will prevent most unintended
6825          * resets.
6826          */
6827         if (!--tp->asf_counter) {
6828                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6829                         u32 val;
6830
6831                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6832                                       FWCMD_NICDRV_ALIVE3);
6833                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6834                         /* 5 seconds timeout */
6835                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6836                         val = tr32(GRC_RX_CPU_EVENT);
6837                         val |= (1 << 14);
6838                         tw32(GRC_RX_CPU_EVENT, val);
6839                 }
6840                 tp->asf_counter = tp->asf_multiplier;
6841         }
6842
6843         spin_unlock(&tp->lock);
6844
6845 restart_timer:
6846         tp->timer.expires = jiffies + tp->timer_offset;
6847         add_timer(&tp->timer);
6848 }
6849
6850 static int tg3_request_irq(struct tg3 *tp)
6851 {
6852         irq_handler_t fn;
6853         unsigned long flags;
6854         struct net_device *dev = tp->dev;
6855
6856         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6857                 fn = tg3_msi;
6858                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6859                         fn = tg3_msi_1shot;
6860                 flags = IRQF_SAMPLE_RANDOM;
6861         } else {
6862                 fn = tg3_interrupt;
6863                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6864                         fn = tg3_interrupt_tagged;
6865                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6866         }
6867         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6868 }
6869
6870 static int tg3_test_interrupt(struct tg3 *tp)
6871 {
6872         struct net_device *dev = tp->dev;
6873         int err, i, intr_ok = 0;
6874
6875         if (!netif_running(dev))
6876                 return -ENODEV;
6877
6878         tg3_disable_ints(tp);
6879
6880         free_irq(tp->pdev->irq, dev);
6881
6882         err = request_irq(tp->pdev->irq, tg3_test_isr,
6883                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6884         if (err)
6885                 return err;
6886
6887         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6888         tg3_enable_ints(tp);
6889
6890         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6891                HOSTCC_MODE_NOW);
6892
6893         for (i = 0; i < 5; i++) {
6894                 u32 int_mbox, misc_host_ctrl;
6895
6896                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6897                                         TG3_64BIT_REG_LOW);
6898                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6899
6900                 if ((int_mbox != 0) ||
6901                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6902                         intr_ok = 1;
6903                         break;
6904                 }
6905
6906                 msleep(10);
6907         }
6908
6909         tg3_disable_ints(tp);
6910
6911         free_irq(tp->pdev->irq, dev);
6912
6913         err = tg3_request_irq(tp);
6914
6915         if (err)
6916                 return err;
6917
6918         if (intr_ok)
6919                 return 0;
6920
6921         return -EIO;
6922 }
6923
6924 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6925  * successfully restored
6926  */
6927 static int tg3_test_msi(struct tg3 *tp)
6928 {
6929         struct net_device *dev = tp->dev;
6930         int err;
6931         u16 pci_cmd;
6932
6933         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6934                 return 0;
6935
6936         /* Turn off SERR reporting in case MSI terminates with Master
6937          * Abort.
6938          */
6939         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6940         pci_write_config_word(tp->pdev, PCI_COMMAND,
6941                               pci_cmd & ~PCI_COMMAND_SERR);
6942
6943         err = tg3_test_interrupt(tp);
6944
6945         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6946
6947         if (!err)
6948                 return 0;
6949
6950         /* other failures */
6951         if (err != -EIO)
6952                 return err;
6953
6954         /* MSI test failed, go back to INTx mode */
6955         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6956                "switching to INTx mode. Please report this failure to "
6957                "the PCI maintainer and include system chipset information.\n",
6958                        tp->dev->name);
6959
6960         free_irq(tp->pdev->irq, dev);
6961         pci_disable_msi(tp->pdev);
6962
6963         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6964
6965         err = tg3_request_irq(tp);
6966         if (err)
6967                 return err;
6968
6969         /* Need to reset the chip because the MSI cycle may have terminated
6970          * with Master Abort.
6971          */
6972         tg3_full_lock(tp, 1);
6973
6974         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6975         err = tg3_init_hw(tp, 1);
6976
6977         tg3_full_unlock(tp);
6978
6979         if (err)
6980                 free_irq(tp->pdev->irq, dev);
6981
6982         return err;
6983 }
6984
6985 static int tg3_open(struct net_device *dev)
6986 {
6987         struct tg3 *tp = netdev_priv(dev);
6988         int err;
6989
6990         netif_carrier_off(tp->dev);
6991
6992         tg3_full_lock(tp, 0);
6993
6994         err = tg3_set_power_state(tp, PCI_D0);
6995         if (err) {
6996                 tg3_full_unlock(tp);
6997                 return err;
6998         }
6999
7000         tg3_disable_ints(tp);
7001         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7002
7003         tg3_full_unlock(tp);
7004
7005         /* The placement of this call is tied
7006          * to the setup and use of Host TX descriptors.
7007          */
7008         err = tg3_alloc_consistent(tp);
7009         if (err)
7010                 return err;
7011
7012         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7013             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7014             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7015             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7016               (tp->pdev_peer == tp->pdev))) {
7017                 /* All MSI supporting chips should support tagged
7018                  * status.  Assert that this is the case.
7019                  */
7020                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7021                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7022                                "Not using MSI.\n", tp->dev->name);
7023                 } else if (pci_enable_msi(tp->pdev) == 0) {
7024                         u32 msi_mode;
7025
7026                         msi_mode = tr32(MSGINT_MODE);
7027                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7028                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7029                 }
7030         }
7031         err = tg3_request_irq(tp);
7032
7033         if (err) {
7034                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7035                         pci_disable_msi(tp->pdev);
7036                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7037                 }
7038                 tg3_free_consistent(tp);
7039                 return err;
7040         }
7041
7042         tg3_full_lock(tp, 0);
7043
7044         err = tg3_init_hw(tp, 1);
7045         if (err) {
7046                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7047                 tg3_free_rings(tp);
7048         } else {
7049                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7050                         tp->timer_offset = HZ;
7051                 else
7052                         tp->timer_offset = HZ / 10;
7053
7054                 BUG_ON(tp->timer_offset > HZ);
7055                 tp->timer_counter = tp->timer_multiplier =
7056                         (HZ / tp->timer_offset);
7057                 tp->asf_counter = tp->asf_multiplier =
7058                         ((HZ / tp->timer_offset) * 2);
7059
7060                 init_timer(&tp->timer);
7061                 tp->timer.expires = jiffies + tp->timer_offset;
7062                 tp->timer.data = (unsigned long) tp;
7063                 tp->timer.function = tg3_timer;
7064         }
7065
7066         tg3_full_unlock(tp);
7067
7068         if (err) {
7069                 free_irq(tp->pdev->irq, dev);
7070                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7071                         pci_disable_msi(tp->pdev);
7072                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7073                 }
7074                 tg3_free_consistent(tp);
7075                 return err;
7076         }
7077
7078         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7079                 err = tg3_test_msi(tp);
7080
7081                 if (err) {
7082                         tg3_full_lock(tp, 0);
7083
7084                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7085                                 pci_disable_msi(tp->pdev);
7086                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7087                         }
7088                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7089                         tg3_free_rings(tp);
7090                         tg3_free_consistent(tp);
7091
7092                         tg3_full_unlock(tp);
7093
7094                         return err;
7095                 }
7096
7097                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7098                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7099                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7100
7101                                 tw32(PCIE_TRANSACTION_CFG,
7102                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7103                         }
7104                 }
7105         }
7106
7107         tg3_full_lock(tp, 0);
7108
7109         add_timer(&tp->timer);
7110         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7111         tg3_enable_ints(tp);
7112
7113         tg3_full_unlock(tp);
7114
7115         netif_start_queue(dev);
7116
7117         return 0;
7118 }
7119
7120 #if 0
7121 /*static*/ void tg3_dump_state(struct tg3 *tp)
7122 {
7123         u32 val32, val32_2, val32_3, val32_4, val32_5;
7124         u16 val16;
7125         int i;
7126
7127         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7128         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7129         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7130                val16, val32);
7131
7132         /* MAC block */
7133         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7134                tr32(MAC_MODE), tr32(MAC_STATUS));
7135         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7136                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7137         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7138                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7139         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7140                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7141
7142         /* Send data initiator control block */
7143         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7144                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7145         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7146                tr32(SNDDATAI_STATSCTRL));
7147
7148         /* Send data completion control block */
7149         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7150
7151         /* Send BD ring selector block */
7152         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7153                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7154
7155         /* Send BD initiator control block */
7156         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7157                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7158
7159         /* Send BD completion control block */
7160         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7161
7162         /* Receive list placement control block */
7163         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7164                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7165         printk("       RCVLPC_STATSCTRL[%08x]\n",
7166                tr32(RCVLPC_STATSCTRL));
7167
7168         /* Receive data and receive BD initiator control block */
7169         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7170                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7171
7172         /* Receive data completion control block */
7173         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7174                tr32(RCVDCC_MODE));
7175
7176         /* Receive BD initiator control block */
7177         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7178                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7179
7180         /* Receive BD completion control block */
7181         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7182                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7183
7184         /* Receive list selector control block */
7185         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7186                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7187
7188         /* Mbuf cluster free block */
7189         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7190                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7191
7192         /* Host coalescing control block */
7193         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7194                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7195         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7196                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7197                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7198         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7199                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7200                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7201         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7202                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7203         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7204                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7205
7206         /* Memory arbiter control block */
7207         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7208                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7209
7210         /* Buffer manager control block */
7211         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7212                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7213         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7214                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7215         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7216                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7217                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7218                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7219
7220         /* Read DMA control block */
7221         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7222                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7223
7224         /* Write DMA control block */
7225         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7226                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7227
7228         /* DMA completion block */
7229         printk("DEBUG: DMAC_MODE[%08x]\n",
7230                tr32(DMAC_MODE));
7231
7232         /* GRC block */
7233         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7234                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7235         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7236                tr32(GRC_LOCAL_CTRL));
7237
7238         /* TG3_BDINFOs */
7239         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7240                tr32(RCVDBDI_JUMBO_BD + 0x0),
7241                tr32(RCVDBDI_JUMBO_BD + 0x4),
7242                tr32(RCVDBDI_JUMBO_BD + 0x8),
7243                tr32(RCVDBDI_JUMBO_BD + 0xc));
7244         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7245                tr32(RCVDBDI_STD_BD + 0x0),
7246                tr32(RCVDBDI_STD_BD + 0x4),
7247                tr32(RCVDBDI_STD_BD + 0x8),
7248                tr32(RCVDBDI_STD_BD + 0xc));
7249         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7250                tr32(RCVDBDI_MINI_BD + 0x0),
7251                tr32(RCVDBDI_MINI_BD + 0x4),
7252                tr32(RCVDBDI_MINI_BD + 0x8),
7253                tr32(RCVDBDI_MINI_BD + 0xc));
7254
7255         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7256         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7257         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7258         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7259         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7260                val32, val32_2, val32_3, val32_4);
7261
7262         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7263         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7264         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7265         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7266         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7267                val32, val32_2, val32_3, val32_4);
7268
7269         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7270         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7271         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7272         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7273         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7274         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7275                val32, val32_2, val32_3, val32_4, val32_5);
7276
7277         /* SW status block */
7278         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7279                tp->hw_status->status,
7280                tp->hw_status->status_tag,
7281                tp->hw_status->rx_jumbo_consumer,
7282                tp->hw_status->rx_consumer,
7283                tp->hw_status->rx_mini_consumer,
7284                tp->hw_status->idx[0].rx_producer,
7285                tp->hw_status->idx[0].tx_consumer);
7286
7287         /* SW statistics block */
7288         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7289                ((u32 *)tp->hw_stats)[0],
7290                ((u32 *)tp->hw_stats)[1],
7291                ((u32 *)tp->hw_stats)[2],
7292                ((u32 *)tp->hw_stats)[3]);
7293
7294         /* Mailboxes */
7295         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7296                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7297                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7298                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7299                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7300
7301         /* NIC side send descriptors. */
7302         for (i = 0; i < 6; i++) {
7303                 unsigned long txd;
7304
7305                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7306                         + (i * sizeof(struct tg3_tx_buffer_desc));
7307                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7308                        i,
7309                        readl(txd + 0x0), readl(txd + 0x4),
7310                        readl(txd + 0x8), readl(txd + 0xc));
7311         }
7312
7313         /* NIC side RX descriptors. */
7314         for (i = 0; i < 6; i++) {
7315                 unsigned long rxd;
7316
7317                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7318                         + (i * sizeof(struct tg3_rx_buffer_desc));
7319                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7320                        i,
7321                        readl(rxd + 0x0), readl(rxd + 0x4),
7322                        readl(rxd + 0x8), readl(rxd + 0xc));
7323                 rxd += (4 * sizeof(u32));
7324                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7325                        i,
7326                        readl(rxd + 0x0), readl(rxd + 0x4),
7327                        readl(rxd + 0x8), readl(rxd + 0xc));
7328         }
7329
7330         for (i = 0; i < 6; i++) {
7331                 unsigned long rxd;
7332
7333                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7334                         + (i * sizeof(struct tg3_rx_buffer_desc));
7335                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7336                        i,
7337                        readl(rxd + 0x0), readl(rxd + 0x4),
7338                        readl(rxd + 0x8), readl(rxd + 0xc));
7339                 rxd += (4 * sizeof(u32));
7340                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7341                        i,
7342                        readl(rxd + 0x0), readl(rxd + 0x4),
7343                        readl(rxd + 0x8), readl(rxd + 0xc));
7344         }
7345 }
7346 #endif
7347
7348 static struct net_device_stats *tg3_get_stats(struct net_device *);
7349 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7350
7351 static int tg3_close(struct net_device *dev)
7352 {
7353         struct tg3 *tp = netdev_priv(dev);
7354
7355         /* Calling flush_scheduled_work() may deadlock because
7356          * linkwatch_event() may be on the workqueue and it will try to get
7357          * the rtnl_lock which we are holding.
7358          */
7359         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7360                 msleep(1);
7361
7362         netif_stop_queue(dev);
7363
7364         del_timer_sync(&tp->timer);
7365
7366         tg3_full_lock(tp, 1);
7367 #if 0
7368         tg3_dump_state(tp);
7369 #endif
7370
7371         tg3_disable_ints(tp);
7372
7373         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7374         tg3_free_rings(tp);
7375         tp->tg3_flags &=
7376                 ~(TG3_FLAG_INIT_COMPLETE |
7377                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7378
7379         tg3_full_unlock(tp);
7380
7381         free_irq(tp->pdev->irq, dev);
7382         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7383                 pci_disable_msi(tp->pdev);
7384                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7385         }
7386
7387         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7388                sizeof(tp->net_stats_prev));
7389         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7390                sizeof(tp->estats_prev));
7391
7392         tg3_free_consistent(tp);
7393
7394         tg3_set_power_state(tp, PCI_D3hot);
7395
7396         netif_carrier_off(tp->dev);
7397
7398         return 0;
7399 }
7400
7401 static inline unsigned long get_stat64(tg3_stat64_t *val)
7402 {
7403         unsigned long ret;
7404
7405 #if (BITS_PER_LONG == 32)
7406         ret = val->low;
7407 #else
7408         ret = ((u64)val->high << 32) | ((u64)val->low);
7409 #endif
7410         return ret;
7411 }
7412
7413 static unsigned long calc_crc_errors(struct tg3 *tp)
7414 {
7415         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7416
7417         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7418             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7419              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7420                 u32 val;
7421
7422                 spin_lock_bh(&tp->lock);
7423                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7424                         tg3_writephy(tp, MII_TG3_TEST1,
7425                                      val | MII_TG3_TEST1_CRC_EN);
7426                         tg3_readphy(tp, 0x14, &val);
7427                 } else
7428                         val = 0;
7429                 spin_unlock_bh(&tp->lock);
7430
7431                 tp->phy_crc_errors += val;
7432
7433                 return tp->phy_crc_errors;
7434         }
7435
7436         return get_stat64(&hw_stats->rx_fcs_errors);
7437 }
7438
7439 #define ESTAT_ADD(member) \
7440         estats->member =        old_estats->member + \
7441                                 get_stat64(&hw_stats->member)
7442
7443 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7444 {
7445         struct tg3_ethtool_stats *estats = &tp->estats;
7446         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7447         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7448
7449         if (!hw_stats)
7450                 return old_estats;
7451
7452         ESTAT_ADD(rx_octets);
7453         ESTAT_ADD(rx_fragments);
7454         ESTAT_ADD(rx_ucast_packets);
7455         ESTAT_ADD(rx_mcast_packets);
7456         ESTAT_ADD(rx_bcast_packets);
7457         ESTAT_ADD(rx_fcs_errors);
7458         ESTAT_ADD(rx_align_errors);
7459         ESTAT_ADD(rx_xon_pause_rcvd);
7460         ESTAT_ADD(rx_xoff_pause_rcvd);
7461         ESTAT_ADD(rx_mac_ctrl_rcvd);
7462         ESTAT_ADD(rx_xoff_entered);
7463         ESTAT_ADD(rx_frame_too_long_errors);
7464         ESTAT_ADD(rx_jabbers);
7465         ESTAT_ADD(rx_undersize_packets);
7466         ESTAT_ADD(rx_in_length_errors);
7467         ESTAT_ADD(rx_out_length_errors);
7468         ESTAT_ADD(rx_64_or_less_octet_packets);
7469         ESTAT_ADD(rx_65_to_127_octet_packets);
7470         ESTAT_ADD(rx_128_to_255_octet_packets);
7471         ESTAT_ADD(rx_256_to_511_octet_packets);
7472         ESTAT_ADD(rx_512_to_1023_octet_packets);
7473         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7474         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7475         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7476         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7477         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7478
7479         ESTAT_ADD(tx_octets);
7480         ESTAT_ADD(tx_collisions);
7481         ESTAT_ADD(tx_xon_sent);
7482         ESTAT_ADD(tx_xoff_sent);
7483         ESTAT_ADD(tx_flow_control);
7484         ESTAT_ADD(tx_mac_errors);
7485         ESTAT_ADD(tx_single_collisions);
7486         ESTAT_ADD(tx_mult_collisions);
7487         ESTAT_ADD(tx_deferred);
7488         ESTAT_ADD(tx_excessive_collisions);
7489         ESTAT_ADD(tx_late_collisions);
7490         ESTAT_ADD(tx_collide_2times);
7491         ESTAT_ADD(tx_collide_3times);
7492         ESTAT_ADD(tx_collide_4times);
7493         ESTAT_ADD(tx_collide_5times);
7494         ESTAT_ADD(tx_collide_6times);
7495         ESTAT_ADD(tx_collide_7times);
7496         ESTAT_ADD(tx_collide_8times);
7497         ESTAT_ADD(tx_collide_9times);
7498         ESTAT_ADD(tx_collide_10times);
7499         ESTAT_ADD(tx_collide_11times);
7500         ESTAT_ADD(tx_collide_12times);
7501         ESTAT_ADD(tx_collide_13times);
7502         ESTAT_ADD(tx_collide_14times);
7503         ESTAT_ADD(tx_collide_15times);
7504         ESTAT_ADD(tx_ucast_packets);
7505         ESTAT_ADD(tx_mcast_packets);
7506         ESTAT_ADD(tx_bcast_packets);
7507         ESTAT_ADD(tx_carrier_sense_errors);
7508         ESTAT_ADD(tx_discards);
7509         ESTAT_ADD(tx_errors);
7510
7511         ESTAT_ADD(dma_writeq_full);
7512         ESTAT_ADD(dma_write_prioq_full);
7513         ESTAT_ADD(rxbds_empty);
7514         ESTAT_ADD(rx_discards);
7515         ESTAT_ADD(rx_errors);
7516         ESTAT_ADD(rx_threshold_hit);
7517
7518         ESTAT_ADD(dma_readq_full);
7519         ESTAT_ADD(dma_read_prioq_full);
7520         ESTAT_ADD(tx_comp_queue_full);
7521
7522         ESTAT_ADD(ring_set_send_prod_index);
7523         ESTAT_ADD(ring_status_update);
7524         ESTAT_ADD(nic_irqs);
7525         ESTAT_ADD(nic_avoided_irqs);
7526         ESTAT_ADD(nic_tx_threshold_hit);
7527
7528         return estats;
7529 }
7530
7531 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7532 {
7533         struct tg3 *tp = netdev_priv(dev);
7534         struct net_device_stats *stats = &tp->net_stats;
7535         struct net_device_stats *old_stats = &tp->net_stats_prev;
7536         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7537
7538         if (!hw_stats)
7539                 return old_stats;
7540
7541         stats->rx_packets = old_stats->rx_packets +
7542                 get_stat64(&hw_stats->rx_ucast_packets) +
7543                 get_stat64(&hw_stats->rx_mcast_packets) +
7544                 get_stat64(&hw_stats->rx_bcast_packets);
7545
7546         stats->tx_packets = old_stats->tx_packets +
7547                 get_stat64(&hw_stats->tx_ucast_packets) +
7548                 get_stat64(&hw_stats->tx_mcast_packets) +
7549                 get_stat64(&hw_stats->tx_bcast_packets);
7550
7551         stats->rx_bytes = old_stats->rx_bytes +
7552                 get_stat64(&hw_stats->rx_octets);
7553         stats->tx_bytes = old_stats->tx_bytes +
7554                 get_stat64(&hw_stats->tx_octets);
7555
7556         stats->rx_errors = old_stats->rx_errors +
7557                 get_stat64(&hw_stats->rx_errors);
7558         stats->tx_errors = old_stats->tx_errors +
7559                 get_stat64(&hw_stats->tx_errors) +
7560                 get_stat64(&hw_stats->tx_mac_errors) +
7561                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7562                 get_stat64(&hw_stats->tx_discards);
7563
7564         stats->multicast = old_stats->multicast +
7565                 get_stat64(&hw_stats->rx_mcast_packets);
7566         stats->collisions = old_stats->collisions +
7567                 get_stat64(&hw_stats->tx_collisions);
7568
7569         stats->rx_length_errors = old_stats->rx_length_errors +
7570                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7571                 get_stat64(&hw_stats->rx_undersize_packets);
7572
7573         stats->rx_over_errors = old_stats->rx_over_errors +
7574                 get_stat64(&hw_stats->rxbds_empty);
7575         stats->rx_frame_errors = old_stats->rx_frame_errors +
7576                 get_stat64(&hw_stats->rx_align_errors);
7577         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7578                 get_stat64(&hw_stats->tx_discards);
7579         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7580                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7581
7582         stats->rx_crc_errors = old_stats->rx_crc_errors +
7583                 calc_crc_errors(tp);
7584
7585         stats->rx_missed_errors = old_stats->rx_missed_errors +
7586                 get_stat64(&hw_stats->rx_discards);
7587
7588         return stats;
7589 }
7590
7591 static inline u32 calc_crc(unsigned char *buf, int len)
7592 {
7593         u32 reg;
7594         u32 tmp;
7595         int j, k;
7596
7597         reg = 0xffffffff;
7598
7599         for (j = 0; j < len; j++) {
7600                 reg ^= buf[j];
7601
7602                 for (k = 0; k < 8; k++) {
7603                         tmp = reg & 0x01;
7604
7605                         reg >>= 1;
7606
7607                         if (tmp) {
7608                                 reg ^= 0xedb88320;
7609                         }
7610                 }
7611         }
7612
7613         return ~reg;
7614 }
7615
7616 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7617 {
7618         /* accept or reject all multicast frames */
7619         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7620         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7621         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7622         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7623 }
7624
7625 static void __tg3_set_rx_mode(struct net_device *dev)
7626 {
7627         struct tg3 *tp = netdev_priv(dev);
7628         u32 rx_mode;
7629
7630         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7631                                   RX_MODE_KEEP_VLAN_TAG);
7632
7633         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7634          * flag clear.
7635          */
7636 #if TG3_VLAN_TAG_USED
7637         if (!tp->vlgrp &&
7638             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7639                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7640 #else
7641         /* By definition, VLAN is disabled always in this
7642          * case.
7643          */
7644         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7645                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7646 #endif
7647
7648         if (dev->flags & IFF_PROMISC) {
7649                 /* Promiscuous mode. */
7650                 rx_mode |= RX_MODE_PROMISC;
7651         } else if (dev->flags & IFF_ALLMULTI) {
7652                 /* Accept all multicast. */
7653                 tg3_set_multi (tp, 1);
7654         } else if (dev->mc_count < 1) {
7655                 /* Reject all multicast. */
7656                 tg3_set_multi (tp, 0);
7657         } else {
7658                 /* Accept one or more multicast(s). */
7659                 struct dev_mc_list *mclist;
7660                 unsigned int i;
7661                 u32 mc_filter[4] = { 0, };
7662                 u32 regidx;
7663                 u32 bit;
7664                 u32 crc;
7665
7666                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7667                      i++, mclist = mclist->next) {
7668
7669                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7670                         bit = ~crc & 0x7f;
7671                         regidx = (bit & 0x60) >> 5;
7672                         bit &= 0x1f;
7673                         mc_filter[regidx] |= (1 << bit);
7674                 }
7675
7676                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7677                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7678                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7679                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7680         }
7681
7682         if (rx_mode != tp->rx_mode) {
7683                 tp->rx_mode = rx_mode;
7684                 tw32_f(MAC_RX_MODE, rx_mode);
7685                 udelay(10);
7686         }
7687 }
7688
7689 static void tg3_set_rx_mode(struct net_device *dev)
7690 {
7691         struct tg3 *tp = netdev_priv(dev);
7692
7693         if (!netif_running(dev))
7694                 return;
7695
7696         tg3_full_lock(tp, 0);
7697         __tg3_set_rx_mode(dev);
7698         tg3_full_unlock(tp);
7699 }
7700
7701 #define TG3_REGDUMP_LEN         (32 * 1024)
7702
7703 static int tg3_get_regs_len(struct net_device *dev)
7704 {
7705         return TG3_REGDUMP_LEN;
7706 }
7707
7708 static void tg3_get_regs(struct net_device *dev,
7709                 struct ethtool_regs *regs, void *_p)
7710 {
7711         u32 *p = _p;
7712         struct tg3 *tp = netdev_priv(dev);
7713         u8 *orig_p = _p;
7714         int i;
7715
7716         regs->version = 0;
7717
7718         memset(p, 0, TG3_REGDUMP_LEN);
7719
7720         if (tp->link_config.phy_is_low_power)
7721                 return;
7722
7723         tg3_full_lock(tp, 0);
7724
7725 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7726 #define GET_REG32_LOOP(base,len)                \
7727 do {    p = (u32 *)(orig_p + (base));           \
7728         for (i = 0; i < len; i += 4)            \
7729                 __GET_REG32((base) + i);        \
7730 } while (0)
7731 #define GET_REG32_1(reg)                        \
7732 do {    p = (u32 *)(orig_p + (reg));            \
7733         __GET_REG32((reg));                     \
7734 } while (0)
7735
7736         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7737         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7738         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7739         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7740         GET_REG32_1(SNDDATAC_MODE);
7741         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7742         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7743         GET_REG32_1(SNDBDC_MODE);
7744         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7745         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7746         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7747         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7748         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7749         GET_REG32_1(RCVDCC_MODE);
7750         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7751         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7752         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7753         GET_REG32_1(MBFREE_MODE);
7754         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7755         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7756         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7757         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7758         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7759         GET_REG32_1(RX_CPU_MODE);
7760         GET_REG32_1(RX_CPU_STATE);
7761         GET_REG32_1(RX_CPU_PGMCTR);
7762         GET_REG32_1(RX_CPU_HWBKPT);
7763         GET_REG32_1(TX_CPU_MODE);
7764         GET_REG32_1(TX_CPU_STATE);
7765         GET_REG32_1(TX_CPU_PGMCTR);
7766         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7767         GET_REG32_LOOP(FTQ_RESET, 0x120);
7768         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7769         GET_REG32_1(DMAC_MODE);
7770         GET_REG32_LOOP(GRC_MODE, 0x4c);
7771         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7772                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7773
7774 #undef __GET_REG32
7775 #undef GET_REG32_LOOP
7776 #undef GET_REG32_1
7777
7778         tg3_full_unlock(tp);
7779 }
7780
7781 static int tg3_get_eeprom_len(struct net_device *dev)
7782 {
7783         struct tg3 *tp = netdev_priv(dev);
7784
7785         return tp->nvram_size;
7786 }
7787
7788 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7789 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7790
7791 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7792 {
7793         struct tg3 *tp = netdev_priv(dev);
7794         int ret;
7795         u8  *pd;
7796         u32 i, offset, len, val, b_offset, b_count;
7797
7798         if (tp->link_config.phy_is_low_power)
7799                 return -EAGAIN;
7800
7801         offset = eeprom->offset;
7802         len = eeprom->len;
7803         eeprom->len = 0;
7804
7805         eeprom->magic = TG3_EEPROM_MAGIC;
7806
7807         if (offset & 3) {
7808                 /* adjustments to start on required 4 byte boundary */
7809                 b_offset = offset & 3;
7810                 b_count = 4 - b_offset;
7811                 if (b_count > len) {
7812                         /* i.e. offset=1 len=2 */
7813                         b_count = len;
7814                 }
7815                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7816                 if (ret)
7817                         return ret;
7818                 val = cpu_to_le32(val);
7819                 memcpy(data, ((char*)&val) + b_offset, b_count);
7820                 len -= b_count;
7821                 offset += b_count;
7822                 eeprom->len += b_count;
7823         }
7824
7825         /* read bytes upto the last 4 byte boundary */
7826         pd = &data[eeprom->len];
7827         for (i = 0; i < (len - (len & 3)); i += 4) {
7828                 ret = tg3_nvram_read(tp, offset + i, &val);
7829                 if (ret) {
7830                         eeprom->len += i;
7831                         return ret;
7832                 }
7833                 val = cpu_to_le32(val);
7834                 memcpy(pd + i, &val, 4);
7835         }
7836         eeprom->len += i;
7837
7838         if (len & 3) {
7839                 /* read last bytes not ending on 4 byte boundary */
7840                 pd = &data[eeprom->len];
7841                 b_count = len & 3;
7842                 b_offset = offset + len - b_count;
7843                 ret = tg3_nvram_read(tp, b_offset, &val);
7844                 if (ret)
7845                         return ret;
7846                 val = cpu_to_le32(val);
7847                 memcpy(pd, ((char*)&val), b_count);
7848                 eeprom->len += b_count;
7849         }
7850         return 0;
7851 }
7852
7853 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7854
7855 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7856 {
7857         struct tg3 *tp = netdev_priv(dev);
7858         int ret;
7859         u32 offset, len, b_offset, odd_len, start, end;
7860         u8 *buf;
7861
7862         if (tp->link_config.phy_is_low_power)
7863                 return -EAGAIN;
7864
7865         if (eeprom->magic != TG3_EEPROM_MAGIC)
7866                 return -EINVAL;
7867
7868         offset = eeprom->offset;
7869         len = eeprom->len;
7870
7871         if ((b_offset = (offset & 3))) {
7872                 /* adjustments to start on required 4 byte boundary */
7873                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7874                 if (ret)
7875                         return ret;
7876                 start = cpu_to_le32(start);
7877                 len += b_offset;
7878                 offset &= ~3;
7879                 if (len < 4)
7880                         len = 4;
7881         }
7882
7883         odd_len = 0;
7884         if (len & 3) {
7885                 /* adjustments to end on required 4 byte boundary */
7886                 odd_len = 1;
7887                 len = (len + 3) & ~3;
7888                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7889                 if (ret)
7890                         return ret;
7891                 end = cpu_to_le32(end);
7892         }
7893
7894         buf = data;
7895         if (b_offset || odd_len) {
7896                 buf = kmalloc(len, GFP_KERNEL);
7897                 if (buf == 0)
7898                         return -ENOMEM;
7899                 if (b_offset)
7900                         memcpy(buf, &start, 4);
7901                 if (odd_len)
7902                         memcpy(buf+len-4, &end, 4);
7903                 memcpy(buf + b_offset, data, eeprom->len);
7904         }
7905
7906         ret = tg3_nvram_write_block(tp, offset, len, buf);
7907
7908         if (buf != data)
7909                 kfree(buf);
7910
7911         return ret;
7912 }
7913
7914 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7915 {
7916         struct tg3 *tp = netdev_priv(dev);
7917
7918         cmd->supported = (SUPPORTED_Autoneg);
7919
7920         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7921                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7922                                    SUPPORTED_1000baseT_Full);
7923
7924         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7925                 cmd->supported |= (SUPPORTED_100baseT_Half |
7926                                   SUPPORTED_100baseT_Full |
7927                                   SUPPORTED_10baseT_Half |
7928                                   SUPPORTED_10baseT_Full |
7929                                   SUPPORTED_MII);
7930                 cmd->port = PORT_TP;
7931         } else {
7932                 cmd->supported |= SUPPORTED_FIBRE;
7933                 cmd->port = PORT_FIBRE;
7934         }
7935
7936         cmd->advertising = tp->link_config.advertising;
7937         if (netif_running(dev)) {
7938                 cmd->speed = tp->link_config.active_speed;
7939                 cmd->duplex = tp->link_config.active_duplex;
7940         }
7941         cmd->phy_address = PHY_ADDR;
7942         cmd->transceiver = 0;
7943         cmd->autoneg = tp->link_config.autoneg;
7944         cmd->maxtxpkt = 0;
7945         cmd->maxrxpkt = 0;
7946         return 0;
7947 }
7948
7949 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7950 {
7951         struct tg3 *tp = netdev_priv(dev);
7952
7953         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7954                 /* These are the only valid advertisement bits allowed.  */
7955                 if (cmd->autoneg == AUTONEG_ENABLE &&
7956                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7957                                           ADVERTISED_1000baseT_Full |
7958                                           ADVERTISED_Autoneg |
7959                                           ADVERTISED_FIBRE)))
7960                         return -EINVAL;
7961                 /* Fiber can only do SPEED_1000.  */
7962                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7963                          (cmd->speed != SPEED_1000))
7964                         return -EINVAL;
7965         /* Copper cannot force SPEED_1000.  */
7966         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7967                    (cmd->speed == SPEED_1000))
7968                 return -EINVAL;
7969         else if ((cmd->speed == SPEED_1000) &&
7970                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7971                 return -EINVAL;
7972
7973         tg3_full_lock(tp, 0);
7974
7975         tp->link_config.autoneg = cmd->autoneg;
7976         if (cmd->autoneg == AUTONEG_ENABLE) {
7977                 tp->link_config.advertising = cmd->advertising;
7978                 tp->link_config.speed = SPEED_INVALID;
7979                 tp->link_config.duplex = DUPLEX_INVALID;
7980         } else {
7981                 tp->link_config.advertising = 0;
7982                 tp->link_config.speed = cmd->speed;
7983                 tp->link_config.duplex = cmd->duplex;
7984         }
7985
7986         tp->link_config.orig_speed = tp->link_config.speed;
7987         tp->link_config.orig_duplex = tp->link_config.duplex;
7988         tp->link_config.orig_autoneg = tp->link_config.autoneg;
7989
7990         if (netif_running(dev))
7991                 tg3_setup_phy(tp, 1);
7992
7993         tg3_full_unlock(tp);
7994
7995         return 0;
7996 }
7997
7998 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7999 {
8000         struct tg3 *tp = netdev_priv(dev);
8001
8002         strcpy(info->driver, DRV_MODULE_NAME);
8003         strcpy(info->version, DRV_MODULE_VERSION);
8004         strcpy(info->fw_version, tp->fw_ver);
8005         strcpy(info->bus_info, pci_name(tp->pdev));
8006 }
8007
8008 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8009 {
8010         struct tg3 *tp = netdev_priv(dev);
8011
8012         wol->supported = WAKE_MAGIC;
8013         wol->wolopts = 0;
8014         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8015                 wol->wolopts = WAKE_MAGIC;
8016         memset(&wol->sopass, 0, sizeof(wol->sopass));
8017 }
8018
8019 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8020 {
8021         struct tg3 *tp = netdev_priv(dev);
8022
8023         if (wol->wolopts & ~WAKE_MAGIC)
8024                 return -EINVAL;
8025         if ((wol->wolopts & WAKE_MAGIC) &&
8026             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8027             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8028                 return -EINVAL;
8029
8030         spin_lock_bh(&tp->lock);
8031         if (wol->wolopts & WAKE_MAGIC)
8032                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8033         else
8034                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8035         spin_unlock_bh(&tp->lock);
8036
8037         return 0;
8038 }
8039
8040 static u32 tg3_get_msglevel(struct net_device *dev)
8041 {
8042         struct tg3 *tp = netdev_priv(dev);
8043         return tp->msg_enable;
8044 }
8045
8046 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8047 {
8048         struct tg3 *tp = netdev_priv(dev);
8049         tp->msg_enable = value;
8050 }
8051
8052 static int tg3_set_tso(struct net_device *dev, u32 value)
8053 {
8054         struct tg3 *tp = netdev_priv(dev);
8055
8056         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8057                 if (value)
8058                         return -EINVAL;
8059                 return 0;
8060         }
8061         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8062             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8063                 if (value)
8064                         dev->features |= NETIF_F_TSO6;
8065                 else
8066                         dev->features &= ~NETIF_F_TSO6;
8067         }
8068         return ethtool_op_set_tso(dev, value);
8069 }
8070
8071 static int tg3_nway_reset(struct net_device *dev)
8072 {
8073         struct tg3 *tp = netdev_priv(dev);
8074         u32 bmcr;
8075         int r;
8076
8077         if (!netif_running(dev))
8078                 return -EAGAIN;
8079
8080         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8081                 return -EINVAL;
8082
8083         spin_lock_bh(&tp->lock);
8084         r = -EINVAL;
8085         tg3_readphy(tp, MII_BMCR, &bmcr);
8086         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8087             ((bmcr & BMCR_ANENABLE) ||
8088              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8089                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8090                                            BMCR_ANENABLE);
8091                 r = 0;
8092         }
8093         spin_unlock_bh(&tp->lock);
8094
8095         return r;
8096 }
8097
8098 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8099 {
8100         struct tg3 *tp = netdev_priv(dev);
8101
8102         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8103         ering->rx_mini_max_pending = 0;
8104         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8105                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8106         else
8107                 ering->rx_jumbo_max_pending = 0;
8108
8109         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8110
8111         ering->rx_pending = tp->rx_pending;
8112         ering->rx_mini_pending = 0;
8113         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8114                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8115         else
8116                 ering->rx_jumbo_pending = 0;
8117
8118         ering->tx_pending = tp->tx_pending;
8119 }
8120
8121 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8122 {
8123         struct tg3 *tp = netdev_priv(dev);
8124         int irq_sync = 0, err = 0;
8125
8126         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8127             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8128             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8129             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8130             ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
8131              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8132                 return -EINVAL;
8133
8134         if (netif_running(dev)) {
8135                 tg3_netif_stop(tp);
8136                 irq_sync = 1;
8137         }
8138
8139         tg3_full_lock(tp, irq_sync);
8140
8141         tp->rx_pending = ering->rx_pending;
8142
8143         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8144             tp->rx_pending > 63)
8145                 tp->rx_pending = 63;
8146         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8147         tp->tx_pending = ering->tx_pending;
8148
8149         if (netif_running(dev)) {
8150                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8151                 err = tg3_restart_hw(tp, 1);
8152                 if (!err)
8153                         tg3_netif_start(tp);
8154         }
8155
8156         tg3_full_unlock(tp);
8157
8158         return err;
8159 }
8160
8161 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8162 {
8163         struct tg3 *tp = netdev_priv(dev);
8164
8165         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8166         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8167         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8168 }
8169
8170 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8171 {
8172         struct tg3 *tp = netdev_priv(dev);
8173         int irq_sync = 0, err = 0;
8174
8175         if (netif_running(dev)) {
8176                 tg3_netif_stop(tp);
8177                 irq_sync = 1;
8178         }
8179
8180         tg3_full_lock(tp, irq_sync);
8181
8182         if (epause->autoneg)
8183                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8184         else
8185                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8186         if (epause->rx_pause)
8187                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8188         else
8189                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8190         if (epause->tx_pause)
8191                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8192         else
8193                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8194
8195         if (netif_running(dev)) {
8196                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8197                 err = tg3_restart_hw(tp, 1);
8198                 if (!err)
8199                         tg3_netif_start(tp);
8200         }
8201
8202         tg3_full_unlock(tp);
8203
8204         return err;
8205 }
8206
8207 static u32 tg3_get_rx_csum(struct net_device *dev)
8208 {
8209         struct tg3 *tp = netdev_priv(dev);
8210         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8211 }
8212
8213 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8214 {
8215         struct tg3 *tp = netdev_priv(dev);
8216
8217         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8218                 if (data != 0)
8219                         return -EINVAL;
8220                 return 0;
8221         }
8222
8223         spin_lock_bh(&tp->lock);
8224         if (data)
8225                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8226         else
8227                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8228         spin_unlock_bh(&tp->lock);
8229
8230         return 0;
8231 }
8232
8233 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8234 {
8235         struct tg3 *tp = netdev_priv(dev);
8236
8237         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8238                 if (data != 0)
8239                         return -EINVAL;
8240                 return 0;
8241         }
8242
8243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8245                 ethtool_op_set_tx_hw_csum(dev, data);
8246         else
8247                 ethtool_op_set_tx_csum(dev, data);
8248
8249         return 0;
8250 }
8251
8252 static int tg3_get_stats_count (struct net_device *dev)
8253 {
8254         return TG3_NUM_STATS;
8255 }
8256
8257 static int tg3_get_test_count (struct net_device *dev)
8258 {
8259         return TG3_NUM_TEST;
8260 }
8261
8262 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8263 {
8264         switch (stringset) {
8265         case ETH_SS_STATS:
8266                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8267                 break;
8268         case ETH_SS_TEST:
8269                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8270                 break;
8271         default:
8272                 WARN_ON(1);     /* we need a WARN() */
8273                 break;
8274         }
8275 }
8276
8277 static int tg3_phys_id(struct net_device *dev, u32 data)
8278 {
8279         struct tg3 *tp = netdev_priv(dev);
8280         int i;
8281
8282         if (!netif_running(tp->dev))
8283                 return -EAGAIN;
8284
8285         if (data == 0)
8286                 data = 2;
8287
8288         for (i = 0; i < (data * 2); i++) {
8289                 if ((i % 2) == 0)
8290                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8291                                            LED_CTRL_1000MBPS_ON |
8292                                            LED_CTRL_100MBPS_ON |
8293                                            LED_CTRL_10MBPS_ON |
8294                                            LED_CTRL_TRAFFIC_OVERRIDE |
8295                                            LED_CTRL_TRAFFIC_BLINK |
8296                                            LED_CTRL_TRAFFIC_LED);
8297
8298                 else
8299                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8300                                            LED_CTRL_TRAFFIC_OVERRIDE);
8301
8302                 if (msleep_interruptible(500))
8303                         break;
8304         }
8305         tw32(MAC_LED_CTRL, tp->led_ctrl);
8306         return 0;
8307 }
8308
8309 static void tg3_get_ethtool_stats (struct net_device *dev,
8310                                    struct ethtool_stats *estats, u64 *tmp_stats)
8311 {
8312         struct tg3 *tp = netdev_priv(dev);
8313         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8314 }
8315
8316 #define NVRAM_TEST_SIZE 0x100
8317 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8318 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8319 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8320
8321 static int tg3_test_nvram(struct tg3 *tp)
8322 {
8323         u32 *buf, csum, magic;
8324         int i, j, err = 0, size;
8325
8326         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8327                 return -EIO;
8328
8329         if (magic == TG3_EEPROM_MAGIC)
8330                 size = NVRAM_TEST_SIZE;
8331         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8332                 if ((magic & 0xe00000) == 0x200000)
8333                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8334                 else
8335                         return 0;
8336         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8337                 size = NVRAM_SELFBOOT_HW_SIZE;
8338         else
8339                 return -EIO;
8340
8341         buf = kmalloc(size, GFP_KERNEL);
8342         if (buf == NULL)
8343                 return -ENOMEM;
8344
8345         err = -EIO;
8346         for (i = 0, j = 0; i < size; i += 4, j++) {
8347                 u32 val;
8348
8349                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8350                         break;
8351                 buf[j] = cpu_to_le32(val);
8352         }
8353         if (i < size)
8354                 goto out;
8355
8356         /* Selfboot format */
8357         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8358             TG3_EEPROM_MAGIC_FW) {
8359                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8360
8361                 for (i = 0; i < size; i++)
8362                         csum8 += buf8[i];
8363
8364                 if (csum8 == 0) {
8365                         err = 0;
8366                         goto out;
8367                 }
8368
8369                 err = -EIO;
8370                 goto out;
8371         }
8372
8373         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8374             TG3_EEPROM_MAGIC_HW) {
8375                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8376                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8377                 u8 *buf8 = (u8 *) buf;
8378                 int j, k;
8379
8380                 /* Separate the parity bits and the data bytes.  */
8381                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8382                         if ((i == 0) || (i == 8)) {
8383                                 int l;
8384                                 u8 msk;
8385
8386                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8387                                         parity[k++] = buf8[i] & msk;
8388                                 i++;
8389                         }
8390                         else if (i == 16) {
8391                                 int l;
8392                                 u8 msk;
8393
8394                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8395                                         parity[k++] = buf8[i] & msk;
8396                                 i++;
8397
8398                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8399                                         parity[k++] = buf8[i] & msk;
8400                                 i++;
8401                         }
8402                         data[j++] = buf8[i];
8403                 }
8404
8405                 err = -EIO;
8406                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8407                         u8 hw8 = hweight8(data[i]);
8408
8409                         if ((hw8 & 0x1) && parity[i])
8410                                 goto out;
8411                         else if (!(hw8 & 0x1) && !parity[i])
8412                                 goto out;
8413                 }
8414                 err = 0;
8415                 goto out;
8416         }
8417
8418         /* Bootstrap checksum at offset 0x10 */
8419         csum = calc_crc((unsigned char *) buf, 0x10);
8420         if(csum != cpu_to_le32(buf[0x10/4]))
8421                 goto out;
8422
8423         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8424         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8425         if (csum != cpu_to_le32(buf[0xfc/4]))
8426                  goto out;
8427
8428         err = 0;
8429
8430 out:
8431         kfree(buf);
8432         return err;
8433 }
8434
8435 #define TG3_SERDES_TIMEOUT_SEC  2
8436 #define TG3_COPPER_TIMEOUT_SEC  6
8437
8438 static int tg3_test_link(struct tg3 *tp)
8439 {
8440         int i, max;
8441
8442         if (!netif_running(tp->dev))
8443                 return -ENODEV;
8444
8445         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8446                 max = TG3_SERDES_TIMEOUT_SEC;
8447         else
8448                 max = TG3_COPPER_TIMEOUT_SEC;
8449
8450         for (i = 0; i < max; i++) {
8451                 if (netif_carrier_ok(tp->dev))
8452                         return 0;
8453
8454                 if (msleep_interruptible(1000))
8455                         break;
8456         }
8457
8458         return -EIO;
8459 }
8460
8461 /* Only test the commonly used registers */
8462 static int tg3_test_registers(struct tg3 *tp)
8463 {
8464         int i, is_5705, is_5750;
8465         u32 offset, read_mask, write_mask, val, save_val, read_val;
8466         static struct {
8467                 u16 offset;
8468                 u16 flags;
8469 #define TG3_FL_5705     0x1
8470 #define TG3_FL_NOT_5705 0x2
8471 #define TG3_FL_NOT_5788 0x4
8472 #define TG3_FL_NOT_5750 0x8
8473                 u32 read_mask;
8474                 u32 write_mask;
8475         } reg_tbl[] = {
8476                 /* MAC Control Registers */
8477                 { MAC_MODE, TG3_FL_NOT_5705,
8478                         0x00000000, 0x00ef6f8c },
8479                 { MAC_MODE, TG3_FL_5705,
8480                         0x00000000, 0x01ef6b8c },
8481                 { MAC_STATUS, TG3_FL_NOT_5705,
8482                         0x03800107, 0x00000000 },
8483                 { MAC_STATUS, TG3_FL_5705,
8484                         0x03800100, 0x00000000 },
8485                 { MAC_ADDR_0_HIGH, 0x0000,
8486                         0x00000000, 0x0000ffff },
8487                 { MAC_ADDR_0_LOW, 0x0000,
8488                         0x00000000, 0xffffffff },
8489                 { MAC_RX_MTU_SIZE, 0x0000,
8490                         0x00000000, 0x0000ffff },
8491                 { MAC_TX_MODE, 0x0000,
8492                         0x00000000, 0x00000070 },
8493                 { MAC_TX_LENGTHS, 0x0000,
8494                         0x00000000, 0x00003fff },
8495                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8496                         0x00000000, 0x000007fc },
8497                 { MAC_RX_MODE, TG3_FL_5705,
8498                         0x00000000, 0x000007dc },
8499                 { MAC_HASH_REG_0, 0x0000,
8500                         0x00000000, 0xffffffff },
8501                 { MAC_HASH_REG_1, 0x0000,
8502                         0x00000000, 0xffffffff },
8503                 { MAC_HASH_REG_2, 0x0000,
8504                         0x00000000, 0xffffffff },
8505                 { MAC_HASH_REG_3, 0x0000,
8506                         0x00000000, 0xffffffff },
8507
8508                 /* Receive Data and Receive BD Initiator Control Registers. */
8509                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8510                         0x00000000, 0xffffffff },
8511                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8512                         0x00000000, 0xffffffff },
8513                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8514                         0x00000000, 0x00000003 },
8515                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8516                         0x00000000, 0xffffffff },
8517                 { RCVDBDI_STD_BD+0, 0x0000,
8518                         0x00000000, 0xffffffff },
8519                 { RCVDBDI_STD_BD+4, 0x0000,
8520                         0x00000000, 0xffffffff },
8521                 { RCVDBDI_STD_BD+8, 0x0000,
8522                         0x00000000, 0xffff0002 },
8523                 { RCVDBDI_STD_BD+0xc, 0x0000,
8524                         0x00000000, 0xffffffff },
8525
8526                 /* Receive BD Initiator Control Registers. */
8527                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8528                         0x00000000, 0xffffffff },
8529                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8530                         0x00000000, 0x000003ff },
8531                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8532                         0x00000000, 0xffffffff },
8533
8534                 /* Host Coalescing Control Registers. */
8535                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8536                         0x00000000, 0x00000004 },
8537                 { HOSTCC_MODE, TG3_FL_5705,
8538                         0x00000000, 0x000000f6 },
8539                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8540                         0x00000000, 0xffffffff },
8541                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8542                         0x00000000, 0x000003ff },
8543                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8544                         0x00000000, 0xffffffff },
8545                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8546                         0x00000000, 0x000003ff },
8547                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8548                         0x00000000, 0xffffffff },
8549                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8550                         0x00000000, 0x000000ff },
8551                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8552                         0x00000000, 0xffffffff },
8553                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8554                         0x00000000, 0x000000ff },
8555                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8556                         0x00000000, 0xffffffff },
8557                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8558                         0x00000000, 0xffffffff },
8559                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8560                         0x00000000, 0xffffffff },
8561                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8562                         0x00000000, 0x000000ff },
8563                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8564                         0x00000000, 0xffffffff },
8565                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8566                         0x00000000, 0x000000ff },
8567                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8568                         0x00000000, 0xffffffff },
8569                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8570                         0x00000000, 0xffffffff },
8571                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8572                         0x00000000, 0xffffffff },
8573                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8574                         0x00000000, 0xffffffff },
8575                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8576                         0x00000000, 0xffffffff },
8577                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8578                         0xffffffff, 0x00000000 },
8579                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8580                         0xffffffff, 0x00000000 },
8581
8582                 /* Buffer Manager Control Registers. */
8583                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8584                         0x00000000, 0x007fff80 },
8585                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8586                         0x00000000, 0x007fffff },
8587                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8588                         0x00000000, 0x0000003f },
8589                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8590                         0x00000000, 0x000001ff },
8591                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8592                         0x00000000, 0x000001ff },
8593                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8594                         0xffffffff, 0x00000000 },
8595                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8596                         0xffffffff, 0x00000000 },
8597
8598                 /* Mailbox Registers */
8599                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8600                         0x00000000, 0x000001ff },
8601                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8602                         0x00000000, 0x000001ff },
8603                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8604                         0x00000000, 0x000007ff },
8605                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8606                         0x00000000, 0x000001ff },
8607
8608                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8609         };
8610
8611         is_5705 = is_5750 = 0;
8612         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8613                 is_5705 = 1;
8614                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8615                         is_5750 = 1;
8616         }
8617
8618         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8619                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8620                         continue;
8621
8622                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8623                         continue;
8624
8625                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8626                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8627                         continue;
8628
8629                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8630                         continue;
8631
8632                 offset = (u32) reg_tbl[i].offset;
8633                 read_mask = reg_tbl[i].read_mask;
8634                 write_mask = reg_tbl[i].write_mask;
8635
8636                 /* Save the original register content */
8637                 save_val = tr32(offset);
8638
8639                 /* Determine the read-only value. */
8640                 read_val = save_val & read_mask;
8641
8642                 /* Write zero to the register, then make sure the read-only bits
8643                  * are not changed and the read/write bits are all zeros.
8644                  */
8645                 tw32(offset, 0);
8646
8647                 val = tr32(offset);
8648
8649                 /* Test the read-only and read/write bits. */
8650                 if (((val & read_mask) != read_val) || (val & write_mask))
8651                         goto out;
8652
8653                 /* Write ones to all the bits defined by RdMask and WrMask, then
8654                  * make sure the read-only bits are not changed and the
8655                  * read/write bits are all ones.
8656                  */
8657                 tw32(offset, read_mask | write_mask);
8658
8659                 val = tr32(offset);
8660
8661                 /* Test the read-only bits. */
8662                 if ((val & read_mask) != read_val)
8663                         goto out;
8664
8665                 /* Test the read/write bits. */
8666                 if ((val & write_mask) != write_mask)
8667                         goto out;
8668
8669                 tw32(offset, save_val);
8670         }
8671
8672         return 0;
8673
8674 out:
8675         if (netif_msg_hw(tp))
8676                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8677                        offset);
8678         tw32(offset, save_val);
8679         return -EIO;
8680 }
8681
8682 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8683 {
8684         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8685         int i;
8686         u32 j;
8687
8688         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8689                 for (j = 0; j < len; j += 4) {
8690                         u32 val;
8691
8692                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8693                         tg3_read_mem(tp, offset + j, &val);
8694                         if (val != test_pattern[i])
8695                                 return -EIO;
8696                 }
8697         }
8698         return 0;
8699 }
8700
8701 static int tg3_test_memory(struct tg3 *tp)
8702 {
8703         static struct mem_entry {
8704                 u32 offset;
8705                 u32 len;
8706         } mem_tbl_570x[] = {
8707                 { 0x00000000, 0x00b50},
8708                 { 0x00002000, 0x1c000},
8709                 { 0xffffffff, 0x00000}
8710         }, mem_tbl_5705[] = {
8711                 { 0x00000100, 0x0000c},
8712                 { 0x00000200, 0x00008},
8713                 { 0x00004000, 0x00800},
8714                 { 0x00006000, 0x01000},
8715                 { 0x00008000, 0x02000},
8716                 { 0x00010000, 0x0e000},
8717                 { 0xffffffff, 0x00000}
8718         }, mem_tbl_5755[] = {
8719                 { 0x00000200, 0x00008},
8720                 { 0x00004000, 0x00800},
8721                 { 0x00006000, 0x00800},
8722                 { 0x00008000, 0x02000},
8723                 { 0x00010000, 0x0c000},
8724                 { 0xffffffff, 0x00000}
8725         }, mem_tbl_5906[] = {
8726                 { 0x00000200, 0x00008},
8727                 { 0x00004000, 0x00400},
8728                 { 0x00006000, 0x00400},
8729                 { 0x00008000, 0x01000},
8730                 { 0x00010000, 0x01000},
8731                 { 0xffffffff, 0x00000}
8732         };
8733         struct mem_entry *mem_tbl;
8734         int err = 0;
8735         int i;
8736
8737         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8738                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8739                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8740                         mem_tbl = mem_tbl_5755;
8741                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8742                         mem_tbl = mem_tbl_5906;
8743                 else
8744                         mem_tbl = mem_tbl_5705;
8745         } else
8746                 mem_tbl = mem_tbl_570x;
8747
8748         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8749                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8750                     mem_tbl[i].len)) != 0)
8751                         break;
8752         }
8753
8754         return err;
8755 }
8756
8757 #define TG3_MAC_LOOPBACK        0
8758 #define TG3_PHY_LOOPBACK        1
8759
8760 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8761 {
8762         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8763         u32 desc_idx;
8764         struct sk_buff *skb, *rx_skb;
8765         u8 *tx_data;
8766         dma_addr_t map;
8767         int num_pkts, tx_len, rx_len, i, err;
8768         struct tg3_rx_buffer_desc *desc;
8769
8770         if (loopback_mode == TG3_MAC_LOOPBACK) {
8771                 /* HW errata - mac loopback fails in some cases on 5780.
8772                  * Normal traffic and PHY loopback are not affected by
8773                  * errata.
8774                  */
8775                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8776                         return 0;
8777
8778                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8779                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8780                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8781                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8782                 else
8783                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8784                 tw32(MAC_MODE, mac_mode);
8785         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8786                 u32 val;
8787
8788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8789                         u32 phytest;
8790
8791                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8792                                 u32 phy;
8793
8794                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8795                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8796                                 if (!tg3_readphy(tp, 0x1b, &phy))
8797                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8798                                 if (!tg3_readphy(tp, 0x10, &phy))
8799                                         tg3_writephy(tp, 0x10, phy & ~0x4000);
8800                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8801                         }
8802                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8803                 } else
8804                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8805
8806                 tg3_writephy(tp, MII_BMCR, val);
8807                 udelay(40);
8808
8809                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8810                            MAC_MODE_LINK_POLARITY;
8811                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8812                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8813                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8814                 } else
8815                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8816
8817                 /* reset to prevent losing 1st rx packet intermittently */
8818                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8819                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8820                         udelay(10);
8821                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8822                 }
8823                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8824                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8825                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8826                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8827                 }
8828                 tw32(MAC_MODE, mac_mode);
8829         }
8830         else
8831                 return -EINVAL;
8832
8833         err = -EIO;
8834
8835         tx_len = 1514;
8836         skb = netdev_alloc_skb(tp->dev, tx_len);
8837         if (!skb)
8838                 return -ENOMEM;
8839
8840         tx_data = skb_put(skb, tx_len);
8841         memcpy(tx_data, tp->dev->dev_addr, 6);
8842         memset(tx_data + 6, 0x0, 8);
8843
8844         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8845
8846         for (i = 14; i < tx_len; i++)
8847                 tx_data[i] = (u8) (i & 0xff);
8848
8849         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8850
8851         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8852              HOSTCC_MODE_NOW);
8853
8854         udelay(10);
8855
8856         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8857
8858         num_pkts = 0;
8859
8860         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8861
8862         tp->tx_prod++;
8863         num_pkts++;
8864
8865         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8866                      tp->tx_prod);
8867         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8868
8869         udelay(10);
8870
8871         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8872         for (i = 0; i < 25; i++) {
8873                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8874                        HOSTCC_MODE_NOW);
8875
8876                 udelay(10);
8877
8878                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8879                 rx_idx = tp->hw_status->idx[0].rx_producer;
8880                 if ((tx_idx == tp->tx_prod) &&
8881                     (rx_idx == (rx_start_idx + num_pkts)))
8882                         break;
8883         }
8884
8885         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8886         dev_kfree_skb(skb);
8887
8888         if (tx_idx != tp->tx_prod)
8889                 goto out;
8890
8891         if (rx_idx != rx_start_idx + num_pkts)
8892                 goto out;
8893
8894         desc = &tp->rx_rcb[rx_start_idx];
8895         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8896         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8897         if (opaque_key != RXD_OPAQUE_RING_STD)
8898                 goto out;
8899
8900         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8901             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8902                 goto out;
8903
8904         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8905         if (rx_len != tx_len)
8906                 goto out;
8907
8908         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8909
8910         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8911         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8912
8913         for (i = 14; i < tx_len; i++) {
8914                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8915                         goto out;
8916         }
8917         err = 0;
8918
8919         /* tg3_free_rings will unmap and free the rx_skb */
8920 out:
8921         return err;
8922 }
8923
8924 #define TG3_MAC_LOOPBACK_FAILED         1
8925 #define TG3_PHY_LOOPBACK_FAILED         2
8926 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8927                                          TG3_PHY_LOOPBACK_FAILED)
8928
8929 static int tg3_test_loopback(struct tg3 *tp)
8930 {
8931         int err = 0;
8932
8933         if (!netif_running(tp->dev))
8934                 return TG3_LOOPBACK_FAILED;
8935
8936         err = tg3_reset_hw(tp, 1);
8937         if (err)
8938                 return TG3_LOOPBACK_FAILED;
8939
8940         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8941                 err |= TG3_MAC_LOOPBACK_FAILED;
8942         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8943                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8944                         err |= TG3_PHY_LOOPBACK_FAILED;
8945         }
8946
8947         return err;
8948 }
8949
8950 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8951                           u64 *data)
8952 {
8953         struct tg3 *tp = netdev_priv(dev);
8954
8955         if (tp->link_config.phy_is_low_power)
8956                 tg3_set_power_state(tp, PCI_D0);
8957
8958         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8959
8960         if (tg3_test_nvram(tp) != 0) {
8961                 etest->flags |= ETH_TEST_FL_FAILED;
8962                 data[0] = 1;
8963         }
8964         if (tg3_test_link(tp) != 0) {
8965                 etest->flags |= ETH_TEST_FL_FAILED;
8966                 data[1] = 1;
8967         }
8968         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8969                 int err, irq_sync = 0;
8970
8971                 if (netif_running(dev)) {
8972                         tg3_netif_stop(tp);
8973                         irq_sync = 1;
8974                 }
8975
8976                 tg3_full_lock(tp, irq_sync);
8977
8978                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8979                 err = tg3_nvram_lock(tp);
8980                 tg3_halt_cpu(tp, RX_CPU_BASE);
8981                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8982                         tg3_halt_cpu(tp, TX_CPU_BASE);
8983                 if (!err)
8984                         tg3_nvram_unlock(tp);
8985
8986                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8987                         tg3_phy_reset(tp);
8988
8989                 if (tg3_test_registers(tp) != 0) {
8990                         etest->flags |= ETH_TEST_FL_FAILED;
8991                         data[2] = 1;
8992                 }
8993                 if (tg3_test_memory(tp) != 0) {
8994                         etest->flags |= ETH_TEST_FL_FAILED;
8995                         data[3] = 1;
8996                 }
8997                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8998                         etest->flags |= ETH_TEST_FL_FAILED;
8999
9000                 tg3_full_unlock(tp);
9001
9002                 if (tg3_test_interrupt(tp) != 0) {
9003                         etest->flags |= ETH_TEST_FL_FAILED;
9004                         data[5] = 1;
9005                 }
9006
9007                 tg3_full_lock(tp, 0);
9008
9009                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9010                 if (netif_running(dev)) {
9011                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9012                         if (!tg3_restart_hw(tp, 1))
9013                                 tg3_netif_start(tp);
9014                 }
9015
9016                 tg3_full_unlock(tp);
9017         }
9018         if (tp->link_config.phy_is_low_power)
9019                 tg3_set_power_state(tp, PCI_D3hot);
9020
9021 }
9022
9023 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9024 {
9025         struct mii_ioctl_data *data = if_mii(ifr);
9026         struct tg3 *tp = netdev_priv(dev);
9027         int err;
9028
9029         switch(cmd) {
9030         case SIOCGMIIPHY:
9031                 data->phy_id = PHY_ADDR;
9032
9033                 /* fallthru */
9034         case SIOCGMIIREG: {
9035                 u32 mii_regval;
9036
9037                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9038                         break;                  /* We have no PHY */
9039
9040                 if (tp->link_config.phy_is_low_power)
9041                         return -EAGAIN;
9042
9043                 spin_lock_bh(&tp->lock);
9044                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9045                 spin_unlock_bh(&tp->lock);
9046
9047                 data->val_out = mii_regval;
9048
9049                 return err;
9050         }
9051
9052         case SIOCSMIIREG:
9053                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9054                         break;                  /* We have no PHY */
9055
9056                 if (!capable(CAP_NET_ADMIN))
9057                         return -EPERM;
9058
9059                 if (tp->link_config.phy_is_low_power)
9060                         return -EAGAIN;
9061
9062                 spin_lock_bh(&tp->lock);
9063                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9064                 spin_unlock_bh(&tp->lock);
9065
9066                 return err;
9067
9068         default:
9069                 /* do nothing */
9070                 break;
9071         }
9072         return -EOPNOTSUPP;
9073 }
9074
9075 #if TG3_VLAN_TAG_USED
9076 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9077 {
9078         struct tg3 *tp = netdev_priv(dev);
9079
9080         if (netif_running(dev))
9081                 tg3_netif_stop(tp);
9082
9083         tg3_full_lock(tp, 0);
9084
9085         tp->vlgrp = grp;
9086
9087         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9088         __tg3_set_rx_mode(dev);
9089
9090         tg3_full_unlock(tp);
9091
9092         if (netif_running(dev))
9093                 tg3_netif_start(tp);
9094 }
9095
9096 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9097 {
9098         struct tg3 *tp = netdev_priv(dev);
9099
9100         if (netif_running(dev))
9101                 tg3_netif_stop(tp);
9102
9103         tg3_full_lock(tp, 0);
9104         if (tp->vlgrp)
9105                 tp->vlgrp->vlan_devices[vid] = NULL;
9106         tg3_full_unlock(tp);
9107
9108         if (netif_running(dev))
9109                 tg3_netif_start(tp);
9110 }
9111 #endif
9112
9113 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9114 {
9115         struct tg3 *tp = netdev_priv(dev);
9116
9117         memcpy(ec, &tp->coal, sizeof(*ec));
9118         return 0;
9119 }
9120
9121 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9122 {
9123         struct tg3 *tp = netdev_priv(dev);
9124         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9125         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9126
9127         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9128                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9129                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9130                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9131                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9132         }
9133
9134         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9135             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9136             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9137             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9138             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9139             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9140             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9141             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9142             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9143             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9144                 return -EINVAL;
9145
9146         /* No rx interrupts will be generated if both are zero */
9147         if ((ec->rx_coalesce_usecs == 0) &&
9148             (ec->rx_max_coalesced_frames == 0))
9149                 return -EINVAL;
9150
9151         /* No tx interrupts will be generated if both are zero */
9152         if ((ec->tx_coalesce_usecs == 0) &&
9153             (ec->tx_max_coalesced_frames == 0))
9154                 return -EINVAL;
9155
9156         /* Only copy relevant parameters, ignore all others. */
9157         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9158         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9159         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9160         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9161         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9162         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9163         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9164         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9165         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9166
9167         if (netif_running(dev)) {
9168                 tg3_full_lock(tp, 0);
9169                 __tg3_set_coalesce(tp, &tp->coal);
9170                 tg3_full_unlock(tp);
9171         }
9172         return 0;
9173 }
9174
9175 static const struct ethtool_ops tg3_ethtool_ops = {
9176         .get_settings           = tg3_get_settings,
9177         .set_settings           = tg3_set_settings,
9178         .get_drvinfo            = tg3_get_drvinfo,
9179         .get_regs_len           = tg3_get_regs_len,
9180         .get_regs               = tg3_get_regs,
9181         .get_wol                = tg3_get_wol,
9182         .set_wol                = tg3_set_wol,
9183         .get_msglevel           = tg3_get_msglevel,
9184         .set_msglevel           = tg3_set_msglevel,
9185         .nway_reset             = tg3_nway_reset,
9186         .get_link               = ethtool_op_get_link,
9187         .get_eeprom_len         = tg3_get_eeprom_len,
9188         .get_eeprom             = tg3_get_eeprom,
9189         .set_eeprom             = tg3_set_eeprom,
9190         .get_ringparam          = tg3_get_ringparam,
9191         .set_ringparam          = tg3_set_ringparam,
9192         .get_pauseparam         = tg3_get_pauseparam,
9193         .set_pauseparam         = tg3_set_pauseparam,
9194         .get_rx_csum            = tg3_get_rx_csum,
9195         .set_rx_csum            = tg3_set_rx_csum,
9196         .get_tx_csum            = ethtool_op_get_tx_csum,
9197         .set_tx_csum            = tg3_set_tx_csum,
9198         .get_sg                 = ethtool_op_get_sg,
9199         .set_sg                 = ethtool_op_set_sg,
9200         .get_tso                = ethtool_op_get_tso,
9201         .set_tso                = tg3_set_tso,
9202         .self_test_count        = tg3_get_test_count,
9203         .self_test              = tg3_self_test,
9204         .get_strings            = tg3_get_strings,
9205         .phys_id                = tg3_phys_id,
9206         .get_stats_count        = tg3_get_stats_count,
9207         .get_ethtool_stats      = tg3_get_ethtool_stats,
9208         .get_coalesce           = tg3_get_coalesce,
9209         .set_coalesce           = tg3_set_coalesce,
9210         .get_perm_addr          = ethtool_op_get_perm_addr,
9211 };
9212
9213 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9214 {
9215         u32 cursize, val, magic;
9216
9217         tp->nvram_size = EEPROM_CHIP_SIZE;
9218
9219         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9220                 return;
9221
9222         if ((magic != TG3_EEPROM_MAGIC) &&
9223             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9224             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9225                 return;
9226
9227         /*
9228          * Size the chip by reading offsets at increasing powers of two.
9229          * When we encounter our validation signature, we know the addressing
9230          * has wrapped around, and thus have our chip size.
9231          */
9232         cursize = 0x10;
9233
9234         while (cursize < tp->nvram_size) {
9235                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9236                         return;
9237
9238                 if (val == magic)
9239                         break;
9240
9241                 cursize <<= 1;
9242         }
9243
9244         tp->nvram_size = cursize;
9245 }
9246
9247 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9248 {
9249         u32 val;
9250
9251         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9252                 return;
9253
9254         /* Selfboot format */
9255         if (val != TG3_EEPROM_MAGIC) {
9256                 tg3_get_eeprom_size(tp);
9257                 return;
9258         }
9259
9260         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9261                 if (val != 0) {
9262                         tp->nvram_size = (val >> 16) * 1024;
9263                         return;
9264                 }
9265         }
9266         tp->nvram_size = 0x20000;
9267 }
9268
9269 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9270 {
9271         u32 nvcfg1;
9272
9273         nvcfg1 = tr32(NVRAM_CFG1);
9274         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9275                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9276         }
9277         else {
9278                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9279                 tw32(NVRAM_CFG1, nvcfg1);
9280         }
9281
9282         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9283             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9284                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9285                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9286                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9287                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9288                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9289                                 break;
9290                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9291                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9292                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9293                                 break;
9294                         case FLASH_VENDOR_ATMEL_EEPROM:
9295                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9296                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9297                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9298                                 break;
9299                         case FLASH_VENDOR_ST:
9300                                 tp->nvram_jedecnum = JEDEC_ST;
9301                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9302                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9303                                 break;
9304                         case FLASH_VENDOR_SAIFUN:
9305                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9306                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9307                                 break;
9308                         case FLASH_VENDOR_SST_SMALL:
9309                         case FLASH_VENDOR_SST_LARGE:
9310                                 tp->nvram_jedecnum = JEDEC_SST;
9311                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9312                                 break;
9313                 }
9314         }
9315         else {
9316                 tp->nvram_jedecnum = JEDEC_ATMEL;
9317                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9318                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9319         }
9320 }
9321
9322 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9323 {
9324         u32 nvcfg1;
9325
9326         nvcfg1 = tr32(NVRAM_CFG1);
9327
9328         /* NVRAM protection for TPM */
9329         if (nvcfg1 & (1 << 27))
9330                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9331
9332         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9333                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9334                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9335                         tp->nvram_jedecnum = JEDEC_ATMEL;
9336                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9337                         break;
9338                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9339                         tp->nvram_jedecnum = JEDEC_ATMEL;
9340                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9341                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9342                         break;
9343                 case FLASH_5752VENDOR_ST_M45PE10:
9344                 case FLASH_5752VENDOR_ST_M45PE20:
9345                 case FLASH_5752VENDOR_ST_M45PE40:
9346                         tp->nvram_jedecnum = JEDEC_ST;
9347                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9348                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9349                         break;
9350         }
9351
9352         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9353                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9354                         case FLASH_5752PAGE_SIZE_256:
9355                                 tp->nvram_pagesize = 256;
9356                                 break;
9357                         case FLASH_5752PAGE_SIZE_512:
9358                                 tp->nvram_pagesize = 512;
9359                                 break;
9360                         case FLASH_5752PAGE_SIZE_1K:
9361                                 tp->nvram_pagesize = 1024;
9362                                 break;
9363                         case FLASH_5752PAGE_SIZE_2K:
9364                                 tp->nvram_pagesize = 2048;
9365                                 break;
9366                         case FLASH_5752PAGE_SIZE_4K:
9367                                 tp->nvram_pagesize = 4096;
9368                                 break;
9369                         case FLASH_5752PAGE_SIZE_264:
9370                                 tp->nvram_pagesize = 264;
9371                                 break;
9372                 }
9373         }
9374         else {
9375                 /* For eeprom, set pagesize to maximum eeprom size */
9376                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9377
9378                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9379                 tw32(NVRAM_CFG1, nvcfg1);
9380         }
9381 }
9382
9383 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9384 {
9385         u32 nvcfg1;
9386
9387         nvcfg1 = tr32(NVRAM_CFG1);
9388
9389         /* NVRAM protection for TPM */
9390         if (nvcfg1 & (1 << 27))
9391                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9392
9393         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9394                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9395                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9396                         tp->nvram_jedecnum = JEDEC_ATMEL;
9397                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9398                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9399
9400                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9401                         tw32(NVRAM_CFG1, nvcfg1);
9402                         break;
9403                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9404                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9405                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9406                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9407                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9408                         tp->nvram_jedecnum = JEDEC_ATMEL;
9409                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9410                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9411                         tp->nvram_pagesize = 264;
9412                         break;
9413                 case FLASH_5752VENDOR_ST_M45PE10:
9414                 case FLASH_5752VENDOR_ST_M45PE20:
9415                 case FLASH_5752VENDOR_ST_M45PE40:
9416                         tp->nvram_jedecnum = JEDEC_ST;
9417                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9418                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9419                         tp->nvram_pagesize = 256;
9420                         break;
9421         }
9422 }
9423
9424 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9425 {
9426         u32 nvcfg1;
9427
9428         nvcfg1 = tr32(NVRAM_CFG1);
9429
9430         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9431                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9432                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9433                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9434                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9435                         tp->nvram_jedecnum = JEDEC_ATMEL;
9436                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9437                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9438
9439                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9440                         tw32(NVRAM_CFG1, nvcfg1);
9441                         break;
9442                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9443                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9444                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9445                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9446                         tp->nvram_jedecnum = JEDEC_ATMEL;
9447                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9448                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9449                         tp->nvram_pagesize = 264;
9450                         break;
9451                 case FLASH_5752VENDOR_ST_M45PE10:
9452                 case FLASH_5752VENDOR_ST_M45PE20:
9453                 case FLASH_5752VENDOR_ST_M45PE40:
9454                         tp->nvram_jedecnum = JEDEC_ST;
9455                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9456                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9457                         tp->nvram_pagesize = 256;
9458                         break;
9459         }
9460 }
9461
9462 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9463 {
9464         tp->nvram_jedecnum = JEDEC_ATMEL;
9465         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9466         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9467 }
9468
9469 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9470 static void __devinit tg3_nvram_init(struct tg3 *tp)
9471 {
9472         tw32_f(GRC_EEPROM_ADDR,
9473              (EEPROM_ADDR_FSM_RESET |
9474               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9475                EEPROM_ADDR_CLKPERD_SHIFT)));
9476
9477         msleep(1);
9478
9479         /* Enable seeprom accesses. */
9480         tw32_f(GRC_LOCAL_CTRL,
9481              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9482         udelay(100);
9483
9484         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9485             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9486                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9487
9488                 if (tg3_nvram_lock(tp)) {
9489                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9490                                "tg3_nvram_init failed.\n", tp->dev->name);
9491                         return;
9492                 }
9493                 tg3_enable_nvram_access(tp);
9494
9495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9496                         tg3_get_5752_nvram_info(tp);
9497                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9498                         tg3_get_5755_nvram_info(tp);
9499                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9500                         tg3_get_5787_nvram_info(tp);
9501                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9502                         tg3_get_5906_nvram_info(tp);
9503                 else
9504                         tg3_get_nvram_info(tp);
9505
9506                 tg3_get_nvram_size(tp);
9507
9508                 tg3_disable_nvram_access(tp);
9509                 tg3_nvram_unlock(tp);
9510
9511         } else {
9512                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9513
9514                 tg3_get_eeprom_size(tp);
9515         }
9516 }
9517
9518 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9519                                         u32 offset, u32 *val)
9520 {
9521         u32 tmp;
9522         int i;
9523
9524         if (offset > EEPROM_ADDR_ADDR_MASK ||
9525             (offset % 4) != 0)
9526                 return -EINVAL;
9527
9528         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9529                                         EEPROM_ADDR_DEVID_MASK |
9530                                         EEPROM_ADDR_READ);
9531         tw32(GRC_EEPROM_ADDR,
9532              tmp |
9533              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9534              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9535               EEPROM_ADDR_ADDR_MASK) |
9536              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9537
9538         for (i = 0; i < 1000; i++) {
9539                 tmp = tr32(GRC_EEPROM_ADDR);
9540
9541                 if (tmp & EEPROM_ADDR_COMPLETE)
9542                         break;
9543                 msleep(1);
9544         }
9545         if (!(tmp & EEPROM_ADDR_COMPLETE))
9546                 return -EBUSY;
9547
9548         *val = tr32(GRC_EEPROM_DATA);
9549         return 0;
9550 }
9551
9552 #define NVRAM_CMD_TIMEOUT 10000
9553
9554 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9555 {
9556         int i;
9557
9558         tw32(NVRAM_CMD, nvram_cmd);
9559         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9560                 udelay(10);
9561                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9562                         udelay(10);
9563                         break;
9564                 }
9565         }
9566         if (i == NVRAM_CMD_TIMEOUT) {
9567                 return -EBUSY;
9568         }
9569         return 0;
9570 }
9571
9572 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9573 {
9574         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9575             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9576             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9577             (tp->nvram_jedecnum == JEDEC_ATMEL))
9578
9579                 addr = ((addr / tp->nvram_pagesize) <<
9580                         ATMEL_AT45DB0X1B_PAGE_POS) +
9581                        (addr % tp->nvram_pagesize);
9582
9583         return addr;
9584 }
9585
9586 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9587 {
9588         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9589             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9590             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9591             (tp->nvram_jedecnum == JEDEC_ATMEL))
9592
9593                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9594                         tp->nvram_pagesize) +
9595                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9596
9597         return addr;
9598 }
9599
9600 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9601 {
9602         int ret;
9603
9604         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9605                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9606
9607         offset = tg3_nvram_phys_addr(tp, offset);
9608
9609         if (offset > NVRAM_ADDR_MSK)
9610                 return -EINVAL;
9611
9612         ret = tg3_nvram_lock(tp);
9613         if (ret)
9614                 return ret;
9615
9616         tg3_enable_nvram_access(tp);
9617
9618         tw32(NVRAM_ADDR, offset);
9619         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9620                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9621
9622         if (ret == 0)
9623                 *val = swab32(tr32(NVRAM_RDDATA));
9624
9625         tg3_disable_nvram_access(tp);
9626
9627         tg3_nvram_unlock(tp);
9628
9629         return ret;
9630 }
9631
9632 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9633 {
9634         int err;
9635         u32 tmp;
9636
9637         err = tg3_nvram_read(tp, offset, &tmp);
9638         *val = swab32(tmp);
9639         return err;
9640 }
9641
9642 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9643                                     u32 offset, u32 len, u8 *buf)
9644 {
9645         int i, j, rc = 0;
9646         u32 val;
9647
9648         for (i = 0; i < len; i += 4) {
9649                 u32 addr, data;
9650
9651                 addr = offset + i;
9652
9653                 memcpy(&data, buf + i, 4);
9654
9655                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9656
9657                 val = tr32(GRC_EEPROM_ADDR);
9658                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9659
9660                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9661                         EEPROM_ADDR_READ);
9662                 tw32(GRC_EEPROM_ADDR, val |
9663                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9664                         (addr & EEPROM_ADDR_ADDR_MASK) |
9665                         EEPROM_ADDR_START |
9666                         EEPROM_ADDR_WRITE);
9667
9668                 for (j = 0; j < 1000; j++) {
9669                         val = tr32(GRC_EEPROM_ADDR);
9670
9671                         if (val & EEPROM_ADDR_COMPLETE)
9672                                 break;
9673                         msleep(1);
9674                 }
9675                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9676                         rc = -EBUSY;
9677                         break;
9678                 }
9679         }
9680
9681         return rc;
9682 }
9683
9684 /* offset and length are dword aligned */
9685 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9686                 u8 *buf)
9687 {
9688         int ret = 0;
9689         u32 pagesize = tp->nvram_pagesize;
9690         u32 pagemask = pagesize - 1;
9691         u32 nvram_cmd;
9692         u8 *tmp;
9693
9694         tmp = kmalloc(pagesize, GFP_KERNEL);
9695         if (tmp == NULL)
9696                 return -ENOMEM;
9697
9698         while (len) {
9699                 int j;
9700                 u32 phy_addr, page_off, size;
9701
9702                 phy_addr = offset & ~pagemask;
9703
9704                 for (j = 0; j < pagesize; j += 4) {
9705                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9706                                                 (u32 *) (tmp + j))))
9707                                 break;
9708                 }
9709                 if (ret)
9710                         break;
9711
9712                 page_off = offset & pagemask;
9713                 size = pagesize;
9714                 if (len < size)
9715                         size = len;
9716
9717                 len -= size;
9718
9719                 memcpy(tmp + page_off, buf, size);
9720
9721                 offset = offset + (pagesize - page_off);
9722
9723                 tg3_enable_nvram_access(tp);
9724
9725                 /*
9726                  * Before we can erase the flash page, we need
9727                  * to issue a special "write enable" command.
9728                  */
9729                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9730
9731                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9732                         break;
9733
9734                 /* Erase the target page */
9735                 tw32(NVRAM_ADDR, phy_addr);
9736
9737                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9738                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9739
9740                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9741                         break;
9742
9743                 /* Issue another write enable to start the write. */
9744                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9745
9746                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9747                         break;
9748
9749                 for (j = 0; j < pagesize; j += 4) {
9750                         u32 data;
9751
9752                         data = *((u32 *) (tmp + j));
9753                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9754
9755                         tw32(NVRAM_ADDR, phy_addr + j);
9756
9757                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9758                                 NVRAM_CMD_WR;
9759
9760                         if (j == 0)
9761                                 nvram_cmd |= NVRAM_CMD_FIRST;
9762                         else if (j == (pagesize - 4))
9763                                 nvram_cmd |= NVRAM_CMD_LAST;
9764
9765                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9766                                 break;
9767                 }
9768                 if (ret)
9769                         break;
9770         }
9771
9772         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9773         tg3_nvram_exec_cmd(tp, nvram_cmd);
9774
9775         kfree(tmp);
9776
9777         return ret;
9778 }
9779
9780 /* offset and length are dword aligned */
9781 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9782                 u8 *buf)
9783 {
9784         int i, ret = 0;
9785
9786         for (i = 0; i < len; i += 4, offset += 4) {
9787                 u32 data, page_off, phy_addr, nvram_cmd;
9788
9789                 memcpy(&data, buf + i, 4);
9790                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9791
9792                 page_off = offset % tp->nvram_pagesize;
9793
9794                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9795
9796                 tw32(NVRAM_ADDR, phy_addr);
9797
9798                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9799
9800                 if ((page_off == 0) || (i == 0))
9801                         nvram_cmd |= NVRAM_CMD_FIRST;
9802                 if (page_off == (tp->nvram_pagesize - 4))
9803                         nvram_cmd |= NVRAM_CMD_LAST;
9804
9805                 if (i == (len - 4))
9806                         nvram_cmd |= NVRAM_CMD_LAST;
9807
9808                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9809                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9810                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9811                     (tp->nvram_jedecnum == JEDEC_ST) &&
9812                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9813
9814                         if ((ret = tg3_nvram_exec_cmd(tp,
9815                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9816                                 NVRAM_CMD_DONE)))
9817
9818                                 break;
9819                 }
9820                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9821                         /* We always do complete word writes to eeprom. */
9822                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9823                 }
9824
9825                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9826                         break;
9827         }
9828         return ret;
9829 }
9830
9831 /* offset and length are dword aligned */
9832 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9833 {
9834         int ret;
9835
9836         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9837                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9838                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9839                 udelay(40);
9840         }
9841
9842         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9843                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9844         }
9845         else {
9846                 u32 grc_mode;
9847
9848                 ret = tg3_nvram_lock(tp);
9849                 if (ret)
9850                         return ret;
9851
9852                 tg3_enable_nvram_access(tp);
9853                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9854                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9855                         tw32(NVRAM_WRITE1, 0x406);
9856
9857                 grc_mode = tr32(GRC_MODE);
9858                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9859
9860                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9861                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9862
9863                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9864                                 buf);
9865                 }
9866                 else {
9867                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9868                                 buf);
9869                 }
9870
9871                 grc_mode = tr32(GRC_MODE);
9872                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9873
9874                 tg3_disable_nvram_access(tp);
9875                 tg3_nvram_unlock(tp);
9876         }
9877
9878         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9879                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9880                 udelay(40);
9881         }
9882
9883         return ret;
9884 }
9885
9886 struct subsys_tbl_ent {
9887         u16 subsys_vendor, subsys_devid;
9888         u32 phy_id;
9889 };
9890
9891 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9892         /* Broadcom boards. */
9893         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9894         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9895         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9896         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9897         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9898         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9899         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9900         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9901         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9902         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9903         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9904
9905         /* 3com boards. */
9906         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9907         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9908         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9909         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9910         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9911
9912         /* DELL boards. */
9913         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9914         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9915         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9916         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9917
9918         /* Compaq boards. */
9919         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9920         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9921         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9922         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9923         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9924
9925         /* IBM boards. */
9926         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9927 };
9928
9929 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9930 {
9931         int i;
9932
9933         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9934                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9935                      tp->pdev->subsystem_vendor) &&
9936                     (subsys_id_to_phy_id[i].subsys_devid ==
9937                      tp->pdev->subsystem_device))
9938                         return &subsys_id_to_phy_id[i];
9939         }
9940         return NULL;
9941 }
9942
9943 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9944 {
9945         u32 val;
9946         u16 pmcsr;
9947
9948         /* On some early chips the SRAM cannot be accessed in D3hot state,
9949          * so need make sure we're in D0.
9950          */
9951         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9952         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9953         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9954         msleep(1);
9955
9956         /* Make sure register accesses (indirect or otherwise)
9957          * will function correctly.
9958          */
9959         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9960                                tp->misc_host_ctrl);
9961
9962         /* The memory arbiter has to be enabled in order for SRAM accesses
9963          * to succeed.  Normally on powerup the tg3 chip firmware will make
9964          * sure it is enabled, but other entities such as system netboot
9965          * code might disable it.
9966          */
9967         val = tr32(MEMARB_MODE);
9968         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9969
9970         tp->phy_id = PHY_ID_INVALID;
9971         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9972
9973         /* Assume an onboard device by default.  */
9974         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9975
9976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9977                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
9978                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9979                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
9980                 }
9981                 return;
9982         }
9983
9984         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9985         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9986                 u32 nic_cfg, led_cfg;
9987                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9988                 int eeprom_phy_serdes = 0;
9989
9990                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9991                 tp->nic_sram_data_cfg = nic_cfg;
9992
9993                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9994                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9995                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9996                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9997                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9998                     (ver > 0) && (ver < 0x100))
9999                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10000
10001                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10002                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10003                         eeprom_phy_serdes = 1;
10004
10005                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10006                 if (nic_phy_id != 0) {
10007                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10008                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10009
10010                         eeprom_phy_id  = (id1 >> 16) << 10;
10011                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10012                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10013                 } else
10014                         eeprom_phy_id = 0;
10015
10016                 tp->phy_id = eeprom_phy_id;
10017                 if (eeprom_phy_serdes) {
10018                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10019                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10020                         else
10021                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10022                 }
10023
10024                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10025                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10026                                     SHASTA_EXT_LED_MODE_MASK);
10027                 else
10028                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10029
10030                 switch (led_cfg) {
10031                 default:
10032                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10033                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10034                         break;
10035
10036                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10037                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10038                         break;
10039
10040                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10041                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10042
10043                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10044                          * read on some older 5700/5701 bootcode.
10045                          */
10046                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10047                             ASIC_REV_5700 ||
10048                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10049                             ASIC_REV_5701)
10050                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10051
10052                         break;
10053
10054                 case SHASTA_EXT_LED_SHARED:
10055                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10056                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10057                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10058                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10059                                                  LED_CTRL_MODE_PHY_2);
10060                         break;
10061
10062                 case SHASTA_EXT_LED_MAC:
10063                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10064                         break;
10065
10066                 case SHASTA_EXT_LED_COMBO:
10067                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10068                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10069                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10070                                                  LED_CTRL_MODE_PHY_2);
10071                         break;
10072
10073                 };
10074
10075                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10076                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10077                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10078                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10079
10080                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10081                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10082                         if ((tp->pdev->subsystem_vendor ==
10083                              PCI_VENDOR_ID_ARIMA) &&
10084                             (tp->pdev->subsystem_device == 0x205a ||
10085                              tp->pdev->subsystem_device == 0x2063))
10086                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10087                 } else {
10088                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10089                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10090                 }
10091
10092                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10093                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10094                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10095                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10096                 }
10097                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10098                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10099
10100                 if (cfg2 & (1 << 17))
10101                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10102
10103                 /* serdes signal pre-emphasis in register 0x590 set by */
10104                 /* bootcode if bit 18 is set */
10105                 if (cfg2 & (1 << 18))
10106                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10107         }
10108 }
10109
10110 static int __devinit tg3_phy_probe(struct tg3 *tp)
10111 {
10112         u32 hw_phy_id_1, hw_phy_id_2;
10113         u32 hw_phy_id, hw_phy_id_masked;
10114         int err;
10115
10116         /* Reading the PHY ID register can conflict with ASF
10117          * firwmare access to the PHY hardware.
10118          */
10119         err = 0;
10120         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10121                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10122         } else {
10123                 /* Now read the physical PHY_ID from the chip and verify
10124                  * that it is sane.  If it doesn't look good, we fall back
10125                  * to either the hard-coded table based PHY_ID and failing
10126                  * that the value found in the eeprom area.
10127                  */
10128                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10129                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10130
10131                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10132                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10133                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10134
10135                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10136         }
10137
10138         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10139                 tp->phy_id = hw_phy_id;
10140                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10141                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10142                 else
10143                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10144         } else {
10145                 if (tp->phy_id != PHY_ID_INVALID) {
10146                         /* Do nothing, phy ID already set up in
10147                          * tg3_get_eeprom_hw_cfg().
10148                          */
10149                 } else {
10150                         struct subsys_tbl_ent *p;
10151
10152                         /* No eeprom signature?  Try the hardcoded
10153                          * subsys device table.
10154                          */
10155                         p = lookup_by_subsys(tp);
10156                         if (!p)
10157                                 return -ENODEV;
10158
10159                         tp->phy_id = p->phy_id;
10160                         if (!tp->phy_id ||
10161                             tp->phy_id == PHY_ID_BCM8002)
10162                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10163                 }
10164         }
10165
10166         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10167             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10168                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10169
10170                 tg3_readphy(tp, MII_BMSR, &bmsr);
10171                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10172                     (bmsr & BMSR_LSTATUS))
10173                         goto skip_phy_reset;
10174
10175                 err = tg3_phy_reset(tp);
10176                 if (err)
10177                         return err;
10178
10179                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10180                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10181                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10182                 tg3_ctrl = 0;
10183                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10184                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10185                                     MII_TG3_CTRL_ADV_1000_FULL);
10186                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10187                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10188                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10189                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10190                 }
10191
10192                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10193                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10194                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10195                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10196                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10197
10198                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10199                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10200
10201                         tg3_writephy(tp, MII_BMCR,
10202                                      BMCR_ANENABLE | BMCR_ANRESTART);
10203                 }
10204                 tg3_phy_set_wirespeed(tp);
10205
10206                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10207                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10208                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10209         }
10210
10211 skip_phy_reset:
10212         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10213                 err = tg3_init_5401phy_dsp(tp);
10214                 if (err)
10215                         return err;
10216         }
10217
10218         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10219                 err = tg3_init_5401phy_dsp(tp);
10220         }
10221
10222         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10223                 tp->link_config.advertising =
10224                         (ADVERTISED_1000baseT_Half |
10225                          ADVERTISED_1000baseT_Full |
10226                          ADVERTISED_Autoneg |
10227                          ADVERTISED_FIBRE);
10228         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10229                 tp->link_config.advertising &=
10230                         ~(ADVERTISED_1000baseT_Half |
10231                           ADVERTISED_1000baseT_Full);
10232
10233         return err;
10234 }
10235
10236 static void __devinit tg3_read_partno(struct tg3 *tp)
10237 {
10238         unsigned char vpd_data[256];
10239         unsigned int i;
10240         u32 magic;
10241
10242         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10243                 goto out_not_found;
10244
10245         if (magic == TG3_EEPROM_MAGIC) {
10246                 for (i = 0; i < 256; i += 4) {
10247                         u32 tmp;
10248
10249                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10250                                 goto out_not_found;
10251
10252                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10253                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10254                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10255                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10256                 }
10257         } else {
10258                 int vpd_cap;
10259
10260                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10261                 for (i = 0; i < 256; i += 4) {
10262                         u32 tmp, j = 0;
10263                         u16 tmp16;
10264
10265                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10266                                               i);
10267                         while (j++ < 100) {
10268                                 pci_read_config_word(tp->pdev, vpd_cap +
10269                                                      PCI_VPD_ADDR, &tmp16);
10270                                 if (tmp16 & 0x8000)
10271                                         break;
10272                                 msleep(1);
10273                         }
10274                         if (!(tmp16 & 0x8000))
10275                                 goto out_not_found;
10276
10277                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10278                                               &tmp);
10279                         tmp = cpu_to_le32(tmp);
10280                         memcpy(&vpd_data[i], &tmp, 4);
10281                 }
10282         }
10283
10284         /* Now parse and find the part number. */
10285         for (i = 0; i < 254; ) {
10286                 unsigned char val = vpd_data[i];
10287                 unsigned int block_end;
10288
10289                 if (val == 0x82 || val == 0x91) {
10290                         i = (i + 3 +
10291                              (vpd_data[i + 1] +
10292                               (vpd_data[i + 2] << 8)));
10293                         continue;
10294                 }
10295
10296                 if (val != 0x90)
10297                         goto out_not_found;
10298
10299                 block_end = (i + 3 +
10300                              (vpd_data[i + 1] +
10301                               (vpd_data[i + 2] << 8)));
10302                 i += 3;
10303
10304                 if (block_end > 256)
10305                         goto out_not_found;
10306
10307                 while (i < (block_end - 2)) {
10308                         if (vpd_data[i + 0] == 'P' &&
10309                             vpd_data[i + 1] == 'N') {
10310                                 int partno_len = vpd_data[i + 2];
10311
10312                                 i += 3;
10313                                 if (partno_len > 24 || (partno_len + i) > 256)
10314                                         goto out_not_found;
10315
10316                                 memcpy(tp->board_part_number,
10317                                        &vpd_data[i], partno_len);
10318
10319                                 /* Success. */
10320                                 return;
10321                         }
10322                         i += 3 + vpd_data[i + 2];
10323                 }
10324
10325                 /* Part number not found. */
10326                 goto out_not_found;
10327         }
10328
10329 out_not_found:
10330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10331                 strcpy(tp->board_part_number, "BCM95906");
10332         else
10333                 strcpy(tp->board_part_number, "none");
10334 }
10335
10336 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10337 {
10338         u32 val, offset, start;
10339
10340         if (tg3_nvram_read_swab(tp, 0, &val))
10341                 return;
10342
10343         if (val != TG3_EEPROM_MAGIC)
10344                 return;
10345
10346         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10347             tg3_nvram_read_swab(tp, 0x4, &start))
10348                 return;
10349
10350         offset = tg3_nvram_logical_addr(tp, offset);
10351         if (tg3_nvram_read_swab(tp, offset, &val))
10352                 return;
10353
10354         if ((val & 0xfc000000) == 0x0c000000) {
10355                 u32 ver_offset, addr;
10356                 int i;
10357
10358                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10359                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10360                         return;
10361
10362                 if (val != 0)
10363                         return;
10364
10365                 addr = offset + ver_offset - start;
10366                 for (i = 0; i < 16; i += 4) {
10367                         if (tg3_nvram_read(tp, addr + i, &val))
10368                                 return;
10369
10370                         val = cpu_to_le32(val);
10371                         memcpy(tp->fw_ver + i, &val, 4);
10372                 }
10373         }
10374 }
10375
10376 static int __devinit tg3_get_invariants(struct tg3 *tp)
10377 {
10378         static struct pci_device_id write_reorder_chipsets[] = {
10379                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10380                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10381                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10382                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10383                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10384                              PCI_DEVICE_ID_VIA_8385_0) },
10385                 { },
10386         };
10387         u32 misc_ctrl_reg;
10388         u32 cacheline_sz_reg;
10389         u32 pci_state_reg, grc_misc_cfg;
10390         u32 val;
10391         u16 pci_cmd;
10392         int err, pcie_cap;
10393
10394         /* Force memory write invalidate off.  If we leave it on,
10395          * then on 5700_BX chips we have to enable a workaround.
10396          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10397          * to match the cacheline size.  The Broadcom driver have this
10398          * workaround but turns MWI off all the times so never uses
10399          * it.  This seems to suggest that the workaround is insufficient.
10400          */
10401         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10402         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10403         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10404
10405         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10406          * has the register indirect write enable bit set before
10407          * we try to access any of the MMIO registers.  It is also
10408          * critical that the PCI-X hw workaround situation is decided
10409          * before that as well.
10410          */
10411         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10412                               &misc_ctrl_reg);
10413
10414         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10415                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10416
10417         /* Wrong chip ID in 5752 A0. This code can be removed later
10418          * as A0 is not in production.
10419          */
10420         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10421                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10422
10423         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10424          * we need to disable memory and use config. cycles
10425          * only to access all registers. The 5702/03 chips
10426          * can mistakenly decode the special cycles from the
10427          * ICH chipsets as memory write cycles, causing corruption
10428          * of register and memory space. Only certain ICH bridges
10429          * will drive special cycles with non-zero data during the
10430          * address phase which can fall within the 5703's address
10431          * range. This is not an ICH bug as the PCI spec allows
10432          * non-zero address during special cycles. However, only
10433          * these ICH bridges are known to drive non-zero addresses
10434          * during special cycles.
10435          *
10436          * Since special cycles do not cross PCI bridges, we only
10437          * enable this workaround if the 5703 is on the secondary
10438          * bus of these ICH bridges.
10439          */
10440         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10441             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10442                 static struct tg3_dev_id {
10443                         u32     vendor;
10444                         u32     device;
10445                         u32     rev;
10446                 } ich_chipsets[] = {
10447                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10448                           PCI_ANY_ID },
10449                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10450                           PCI_ANY_ID },
10451                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10452                           0xa },
10453                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10454                           PCI_ANY_ID },
10455                         { },
10456                 };
10457                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10458                 struct pci_dev *bridge = NULL;
10459
10460                 while (pci_id->vendor != 0) {
10461                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10462                                                 bridge);
10463                         if (!bridge) {
10464                                 pci_id++;
10465                                 continue;
10466                         }
10467                         if (pci_id->rev != PCI_ANY_ID) {
10468                                 u8 rev;
10469
10470                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10471                                                      &rev);
10472                                 if (rev > pci_id->rev)
10473                                         continue;
10474                         }
10475                         if (bridge->subordinate &&
10476                             (bridge->subordinate->number ==
10477                              tp->pdev->bus->number)) {
10478
10479                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10480                                 pci_dev_put(bridge);
10481                                 break;
10482                         }
10483                 }
10484         }
10485
10486         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10487          * DMA addresses > 40-bit. This bridge may have other additional
10488          * 57xx devices behind it in some 4-port NIC designs for example.
10489          * Any tg3 device found behind the bridge will also need the 40-bit
10490          * DMA workaround.
10491          */
10492         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10493             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10494                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10495                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10496                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10497         }
10498         else {
10499                 struct pci_dev *bridge = NULL;
10500
10501                 do {
10502                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10503                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10504                                                 bridge);
10505                         if (bridge && bridge->subordinate &&
10506                             (bridge->subordinate->number <=
10507                              tp->pdev->bus->number) &&
10508                             (bridge->subordinate->subordinate >=
10509                              tp->pdev->bus->number)) {
10510                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10511                                 pci_dev_put(bridge);
10512                                 break;
10513                         }
10514                 } while (bridge);
10515         }
10516
10517         /* Initialize misc host control in PCI block. */
10518         tp->misc_host_ctrl |= (misc_ctrl_reg &
10519                                MISC_HOST_CTRL_CHIPREV);
10520         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10521                                tp->misc_host_ctrl);
10522
10523         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10524                               &cacheline_sz_reg);
10525
10526         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10527         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10528         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10529         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10530
10531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10536             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10537                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10538
10539         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10540             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10541                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10542
10543         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10544                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10545                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10546                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10547                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10548                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10549                 } else {
10550                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10551                                           TG3_FLG2_HW_TSO_1_BUG;
10552                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10553                                 ASIC_REV_5750 &&
10554                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10555                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10556                 }
10557         }
10558
10559         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10560             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10561             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10562             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10563             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10564             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10565                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10566
10567         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10568         if (pcie_cap != 0) {
10569                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10570                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10571                         u16 lnkctl;
10572
10573                         pci_read_config_word(tp->pdev,
10574                                              pcie_cap + PCI_EXP_LNKCTL,
10575                                              &lnkctl);
10576                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10577                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10578                 }
10579         }
10580
10581         /* If we have an AMD 762 or VIA K8T800 chipset, write
10582          * reordering to the mailbox registers done by the host
10583          * controller can cause major troubles.  We read back from
10584          * every mailbox register write to force the writes to be
10585          * posted to the chip in order.
10586          */
10587         if (pci_dev_present(write_reorder_chipsets) &&
10588             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10589                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10590
10591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10592             tp->pci_lat_timer < 64) {
10593                 tp->pci_lat_timer = 64;
10594
10595                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10596                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10597                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10598                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10599
10600                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10601                                        cacheline_sz_reg);
10602         }
10603
10604         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10605                               &pci_state_reg);
10606
10607         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10608                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10609
10610                 /* If this is a 5700 BX chipset, and we are in PCI-X
10611                  * mode, enable register write workaround.
10612                  *
10613                  * The workaround is to use indirect register accesses
10614                  * for all chip writes not to mailbox registers.
10615                  */
10616                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10617                         u32 pm_reg;
10618                         u16 pci_cmd;
10619
10620                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10621
10622                         /* The chip can have it's power management PCI config
10623                          * space registers clobbered due to this bug.
10624                          * So explicitly force the chip into D0 here.
10625                          */
10626                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10627                                               &pm_reg);
10628                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10629                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10630                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10631                                                pm_reg);
10632
10633                         /* Also, force SERR#/PERR# in PCI command. */
10634                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10635                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10636                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10637                 }
10638         }
10639
10640         /* 5700 BX chips need to have their TX producer index mailboxes
10641          * written twice to workaround a bug.
10642          */
10643         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10644                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10645
10646         /* Back to back register writes can cause problems on this chip,
10647          * the workaround is to read back all reg writes except those to
10648          * mailbox regs.  See tg3_write_indirect_reg32().
10649          *
10650          * PCI Express 5750_A0 rev chips need this workaround too.
10651          */
10652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10653             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10654              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10655                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10656
10657         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10658                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10659         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10660                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10661
10662         /* Chip-specific fixup from Broadcom driver */
10663         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10664             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10665                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10666                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10667         }
10668
10669         /* Default fast path register access methods */
10670         tp->read32 = tg3_read32;
10671         tp->write32 = tg3_write32;
10672         tp->read32_mbox = tg3_read32;
10673         tp->write32_mbox = tg3_write32;
10674         tp->write32_tx_mbox = tg3_write32;
10675         tp->write32_rx_mbox = tg3_write32;
10676
10677         /* Various workaround register access methods */
10678         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10679                 tp->write32 = tg3_write_indirect_reg32;
10680         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10681                 tp->write32 = tg3_write_flush_reg32;
10682
10683         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10684             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10685                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10686                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10687                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10688         }
10689
10690         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10691                 tp->read32 = tg3_read_indirect_reg32;
10692                 tp->write32 = tg3_write_indirect_reg32;
10693                 tp->read32_mbox = tg3_read_indirect_mbox;
10694                 tp->write32_mbox = tg3_write_indirect_mbox;
10695                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10696                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10697
10698                 iounmap(tp->regs);
10699                 tp->regs = NULL;
10700
10701                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10702                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10703                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10704         }
10705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10706                 tp->read32_mbox = tg3_read32_mbox_5906;
10707                 tp->write32_mbox = tg3_write32_mbox_5906;
10708                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10709                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10710         }
10711
10712         if (tp->write32 == tg3_write_indirect_reg32 ||
10713             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10714              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10715               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10716                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10717
10718         /* Get eeprom hw config before calling tg3_set_power_state().
10719          * In particular, the TG3_FLG2_IS_NIC flag must be
10720          * determined before calling tg3_set_power_state() so that
10721          * we know whether or not to switch out of Vaux power.
10722          * When the flag is set, it means that GPIO1 is used for eeprom
10723          * write protect and also implies that it is a LOM where GPIOs
10724          * are not used to switch power.
10725          */
10726         tg3_get_eeprom_hw_cfg(tp);
10727
10728         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10729          * GPIO1 driven high will bring 5700's external PHY out of reset.
10730          * It is also used as eeprom write protect on LOMs.
10731          */
10732         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10733         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10734             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10735                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10736                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10737         /* Unused GPIO3 must be driven as output on 5752 because there
10738          * are no pull-up resistors on unused GPIO pins.
10739          */
10740         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10741                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10742
10743         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10744                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10745
10746         /* Force the chip into D0. */
10747         err = tg3_set_power_state(tp, PCI_D0);
10748         if (err) {
10749                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10750                        pci_name(tp->pdev));
10751                 return err;
10752         }
10753
10754         /* 5700 B0 chips do not support checksumming correctly due
10755          * to hardware bugs.
10756          */
10757         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10758                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10759
10760         /* Derive initial jumbo mode from MTU assigned in
10761          * ether_setup() via the alloc_etherdev() call
10762          */
10763         if (tp->dev->mtu > ETH_DATA_LEN &&
10764             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10765                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10766
10767         /* Determine WakeOnLan speed to use. */
10768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10769             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10770             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10771             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10772                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10773         } else {
10774                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10775         }
10776
10777         /* A few boards don't want Ethernet@WireSpeed phy feature */
10778         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10779             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10780              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10781              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10782             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10783             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10784                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10785
10786         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10787             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10788                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10789         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10790                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10791
10792         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10793                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10794                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10795                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10796                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10797                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10798                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10799                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10800                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10801                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10802         }
10803
10804         tp->coalesce_mode = 0;
10805         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10806             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10807                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10808
10809         /* Initialize MAC MI mode, polling disabled. */
10810         tw32_f(MAC_MI_MODE, tp->mi_mode);
10811         udelay(80);
10812
10813         /* Initialize data/descriptor byte/word swapping. */
10814         val = tr32(GRC_MODE);
10815         val &= GRC_MODE_HOST_STACKUP;
10816         tw32(GRC_MODE, val | tp->grc_mode);
10817
10818         tg3_switch_clocks(tp);
10819
10820         /* Clear this out for sanity. */
10821         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10822
10823         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10824                               &pci_state_reg);
10825         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10826             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10827                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10828
10829                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10830                     chiprevid == CHIPREV_ID_5701_B0 ||
10831                     chiprevid == CHIPREV_ID_5701_B2 ||
10832                     chiprevid == CHIPREV_ID_5701_B5) {
10833                         void __iomem *sram_base;
10834
10835                         /* Write some dummy words into the SRAM status block
10836                          * area, see if it reads back correctly.  If the return
10837                          * value is bad, force enable the PCIX workaround.
10838                          */
10839                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10840
10841                         writel(0x00000000, sram_base);
10842                         writel(0x00000000, sram_base + 4);
10843                         writel(0xffffffff, sram_base + 4);
10844                         if (readl(sram_base) != 0x00000000)
10845                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10846                 }
10847         }
10848
10849         udelay(50);
10850         tg3_nvram_init(tp);
10851
10852         grc_misc_cfg = tr32(GRC_MISC_CFG);
10853         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10854
10855         /* Broadcom's driver says that CIOBE multisplit has a bug */
10856 #if 0
10857         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10858             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10859                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10860                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10861         }
10862 #endif
10863         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10864             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10865              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10866                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10867
10868         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10869             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10870                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10871         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10872                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10873                                       HOSTCC_MODE_CLRTICK_TXBD);
10874
10875                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10876                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10877                                        tp->misc_host_ctrl);
10878         }
10879
10880         /* these are limited to 10/100 only */
10881         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10882              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10883             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10884              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10885              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10886               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10887               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10888             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10889              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10890               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10891               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10893                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10894
10895         err = tg3_phy_probe(tp);
10896         if (err) {
10897                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10898                        pci_name(tp->pdev), err);
10899                 /* ... but do not return immediately ... */
10900         }
10901
10902         tg3_read_partno(tp);
10903         tg3_read_fw_ver(tp);
10904
10905         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10906                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10907         } else {
10908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10909                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10910                 else
10911                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10912         }
10913
10914         /* 5700 {AX,BX} chips have a broken status block link
10915          * change bit implementation, so we must use the
10916          * status register in those cases.
10917          */
10918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10919                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10920         else
10921                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10922
10923         /* The led_ctrl is set during tg3_phy_probe, here we might
10924          * have to force the link status polling mechanism based
10925          * upon subsystem IDs.
10926          */
10927         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10928             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10929                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10930                                   TG3_FLAG_USE_LINKCHG_REG);
10931         }
10932
10933         /* For all SERDES we poll the MAC status register. */
10934         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10935                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10936         else
10937                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10938
10939         /* All chips before 5787 can get confused if TX buffers
10940          * straddle the 4GB address boundary in some cases.
10941          */
10942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10945                 tp->dev->hard_start_xmit = tg3_start_xmit;
10946         else
10947                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10948
10949         tp->rx_offset = 2;
10950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10951             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10952                 tp->rx_offset = 0;
10953
10954         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10955
10956         /* Increment the rx prod index on the rx std ring by at most
10957          * 8 for these chips to workaround hw errata.
10958          */
10959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10961             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10962                 tp->rx_std_max_post = 8;
10963
10964         /* By default, disable wake-on-lan.  User can change this
10965          * using ETHTOOL_SWOL.
10966          */
10967         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10968
10969         return err;
10970 }
10971
10972 #ifdef CONFIG_SPARC64
10973 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10974 {
10975         struct net_device *dev = tp->dev;
10976         struct pci_dev *pdev = tp->pdev;
10977         struct pcidev_cookie *pcp = pdev->sysdata;
10978
10979         if (pcp != NULL) {
10980                 unsigned char *addr;
10981                 int len;
10982
10983                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10984                                         &len);
10985                 if (addr && len == 6) {
10986                         memcpy(dev->dev_addr, addr, 6);
10987                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10988                         return 0;
10989                 }
10990         }
10991         return -ENODEV;
10992 }
10993
10994 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10995 {
10996         struct net_device *dev = tp->dev;
10997
10998         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10999         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11000         return 0;
11001 }
11002 #endif
11003
11004 static int __devinit tg3_get_device_address(struct tg3 *tp)
11005 {
11006         struct net_device *dev = tp->dev;
11007         u32 hi, lo, mac_offset;
11008         int addr_ok = 0;
11009
11010 #ifdef CONFIG_SPARC64
11011         if (!tg3_get_macaddr_sparc(tp))
11012                 return 0;
11013 #endif
11014
11015         mac_offset = 0x7c;
11016         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11017             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11018                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11019                         mac_offset = 0xcc;
11020                 if (tg3_nvram_lock(tp))
11021                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11022                 else
11023                         tg3_nvram_unlock(tp);
11024         }
11025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11026                 mac_offset = 0x10;
11027
11028         /* First try to get it from MAC address mailbox. */
11029         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11030         if ((hi >> 16) == 0x484b) {
11031                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11032                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11033
11034                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11035                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11036                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11037                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11038                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11039
11040                 /* Some old bootcode may report a 0 MAC address in SRAM */
11041                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11042         }
11043         if (!addr_ok) {
11044                 /* Next, try NVRAM. */
11045                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11046                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11047                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11048                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11049                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11050                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11051                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11052                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11053                 }
11054                 /* Finally just fetch it out of the MAC control regs. */
11055                 else {
11056                         hi = tr32(MAC_ADDR_0_HIGH);
11057                         lo = tr32(MAC_ADDR_0_LOW);
11058
11059                         dev->dev_addr[5] = lo & 0xff;
11060                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11061                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11062                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11063                         dev->dev_addr[1] = hi & 0xff;
11064                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11065                 }
11066         }
11067
11068         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11069 #ifdef CONFIG_SPARC64
11070                 if (!tg3_get_default_macaddr_sparc(tp))
11071                         return 0;
11072 #endif
11073                 return -EINVAL;
11074         }
11075         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11076         return 0;
11077 }
11078
11079 #define BOUNDARY_SINGLE_CACHELINE       1
11080 #define BOUNDARY_MULTI_CACHELINE        2
11081
11082 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11083 {
11084         int cacheline_size;
11085         u8 byte;
11086         int goal;
11087
11088         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11089         if (byte == 0)
11090                 cacheline_size = 1024;
11091         else
11092                 cacheline_size = (int) byte * 4;
11093
11094         /* On 5703 and later chips, the boundary bits have no
11095          * effect.
11096          */
11097         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11098             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11099             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11100                 goto out;
11101
11102 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11103         goal = BOUNDARY_MULTI_CACHELINE;
11104 #else
11105 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11106         goal = BOUNDARY_SINGLE_CACHELINE;
11107 #else
11108         goal = 0;
11109 #endif
11110 #endif
11111
11112         if (!goal)
11113                 goto out;
11114
11115         /* PCI controllers on most RISC systems tend to disconnect
11116          * when a device tries to burst across a cache-line boundary.
11117          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11118          *
11119          * Unfortunately, for PCI-E there are only limited
11120          * write-side controls for this, and thus for reads
11121          * we will still get the disconnects.  We'll also waste
11122          * these PCI cycles for both read and write for chips
11123          * other than 5700 and 5701 which do not implement the
11124          * boundary bits.
11125          */
11126         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11127             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11128                 switch (cacheline_size) {
11129                 case 16:
11130                 case 32:
11131                 case 64:
11132                 case 128:
11133                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11134                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11135                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11136                         } else {
11137                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11138                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11139                         }
11140                         break;
11141
11142                 case 256:
11143                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11144                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11145                         break;
11146
11147                 default:
11148                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11149                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11150                         break;
11151                 };
11152         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11153                 switch (cacheline_size) {
11154                 case 16:
11155                 case 32:
11156                 case 64:
11157                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11158                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11159                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11160                                 break;
11161                         }
11162                         /* fallthrough */
11163                 case 128:
11164                 default:
11165                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11166                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11167                         break;
11168                 };
11169         } else {
11170                 switch (cacheline_size) {
11171                 case 16:
11172                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11173                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11174                                         DMA_RWCTRL_WRITE_BNDRY_16);
11175                                 break;
11176                         }
11177                         /* fallthrough */
11178                 case 32:
11179                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11180                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11181                                         DMA_RWCTRL_WRITE_BNDRY_32);
11182                                 break;
11183                         }
11184                         /* fallthrough */
11185                 case 64:
11186                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11187                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11188                                         DMA_RWCTRL_WRITE_BNDRY_64);
11189                                 break;
11190                         }
11191                         /* fallthrough */
11192                 case 128:
11193                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11194                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11195                                         DMA_RWCTRL_WRITE_BNDRY_128);
11196                                 break;
11197                         }
11198                         /* fallthrough */
11199                 case 256:
11200                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11201                                 DMA_RWCTRL_WRITE_BNDRY_256);
11202                         break;
11203                 case 512:
11204                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11205                                 DMA_RWCTRL_WRITE_BNDRY_512);
11206                         break;
11207                 case 1024:
11208                 default:
11209                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11210                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11211                         break;
11212                 };
11213         }
11214
11215 out:
11216         return val;
11217 }
11218
11219 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11220 {
11221         struct tg3_internal_buffer_desc test_desc;
11222         u32 sram_dma_descs;
11223         int i, ret;
11224
11225         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11226
11227         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11228         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11229         tw32(RDMAC_STATUS, 0);
11230         tw32(WDMAC_STATUS, 0);
11231
11232         tw32(BUFMGR_MODE, 0);
11233         tw32(FTQ_RESET, 0);
11234
11235         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11236         test_desc.addr_lo = buf_dma & 0xffffffff;
11237         test_desc.nic_mbuf = 0x00002100;
11238         test_desc.len = size;
11239
11240         /*
11241          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11242          * the *second* time the tg3 driver was getting loaded after an
11243          * initial scan.
11244          *
11245          * Broadcom tells me:
11246          *   ...the DMA engine is connected to the GRC block and a DMA
11247          *   reset may affect the GRC block in some unpredictable way...
11248          *   The behavior of resets to individual blocks has not been tested.
11249          *
11250          * Broadcom noted the GRC reset will also reset all sub-components.
11251          */
11252         if (to_device) {
11253                 test_desc.cqid_sqid = (13 << 8) | 2;
11254
11255                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11256                 udelay(40);
11257         } else {
11258                 test_desc.cqid_sqid = (16 << 8) | 7;
11259
11260                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11261                 udelay(40);
11262         }
11263         test_desc.flags = 0x00000005;
11264
11265         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11266                 u32 val;
11267
11268                 val = *(((u32 *)&test_desc) + i);
11269                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11270                                        sram_dma_descs + (i * sizeof(u32)));
11271                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11272         }
11273         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11274
11275         if (to_device) {
11276                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11277         } else {
11278                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11279         }
11280
11281         ret = -ENODEV;
11282         for (i = 0; i < 40; i++) {
11283                 u32 val;
11284
11285                 if (to_device)
11286                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11287                 else
11288                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11289                 if ((val & 0xffff) == sram_dma_descs) {
11290                         ret = 0;
11291                         break;
11292                 }
11293
11294                 udelay(100);
11295         }
11296
11297         return ret;
11298 }
11299
11300 #define TEST_BUFFER_SIZE        0x2000
11301
11302 static int __devinit tg3_test_dma(struct tg3 *tp)
11303 {
11304         dma_addr_t buf_dma;
11305         u32 *buf, saved_dma_rwctrl;
11306         int ret;
11307
11308         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11309         if (!buf) {
11310                 ret = -ENOMEM;
11311                 goto out_nofree;
11312         }
11313
11314         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11315                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11316
11317         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11318
11319         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11320                 /* DMA read watermark not used on PCIE */
11321                 tp->dma_rwctrl |= 0x00180000;
11322         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11323                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11324                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11325                         tp->dma_rwctrl |= 0x003f0000;
11326                 else
11327                         tp->dma_rwctrl |= 0x003f000f;
11328         } else {
11329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11330                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11331                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11332                         u32 read_water = 0x7;
11333
11334                         /* If the 5704 is behind the EPB bridge, we can
11335                          * do the less restrictive ONE_DMA workaround for
11336                          * better performance.
11337                          */
11338                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11339                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11340                                 tp->dma_rwctrl |= 0x8000;
11341                         else if (ccval == 0x6 || ccval == 0x7)
11342                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11343
11344                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11345                                 read_water = 4;
11346                         /* Set bit 23 to enable PCIX hw bug fix */
11347                         tp->dma_rwctrl |=
11348                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11349                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11350                                 (1 << 23);
11351                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11352                         /* 5780 always in PCIX mode */
11353                         tp->dma_rwctrl |= 0x00144000;
11354                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11355                         /* 5714 always in PCIX mode */
11356                         tp->dma_rwctrl |= 0x00148000;
11357                 } else {
11358                         tp->dma_rwctrl |= 0x001b000f;
11359                 }
11360         }
11361
11362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11364                 tp->dma_rwctrl &= 0xfffffff0;
11365
11366         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11367             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11368                 /* Remove this if it causes problems for some boards. */
11369                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11370
11371                 /* On 5700/5701 chips, we need to set this bit.
11372                  * Otherwise the chip will issue cacheline transactions
11373                  * to streamable DMA memory with not all the byte
11374                  * enables turned on.  This is an error on several
11375                  * RISC PCI controllers, in particular sparc64.
11376                  *
11377                  * On 5703/5704 chips, this bit has been reassigned
11378                  * a different meaning.  In particular, it is used
11379                  * on those chips to enable a PCI-X workaround.
11380                  */
11381                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11382         }
11383
11384         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11385
11386 #if 0
11387         /* Unneeded, already done by tg3_get_invariants.  */
11388         tg3_switch_clocks(tp);
11389 #endif
11390
11391         ret = 0;
11392         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11393             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11394                 goto out;
11395
11396         /* It is best to perform DMA test with maximum write burst size
11397          * to expose the 5700/5701 write DMA bug.
11398          */
11399         saved_dma_rwctrl = tp->dma_rwctrl;
11400         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11401         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11402
11403         while (1) {
11404                 u32 *p = buf, i;
11405
11406                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11407                         p[i] = i;
11408
11409                 /* Send the buffer to the chip. */
11410                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11411                 if (ret) {
11412                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11413                         break;
11414                 }
11415
11416 #if 0
11417                 /* validate data reached card RAM correctly. */
11418                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11419                         u32 val;
11420                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11421                         if (le32_to_cpu(val) != p[i]) {
11422                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11423                                 /* ret = -ENODEV here? */
11424                         }
11425                         p[i] = 0;
11426                 }
11427 #endif
11428                 /* Now read it back. */
11429                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11430                 if (ret) {
11431                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11432
11433                         break;
11434                 }
11435
11436                 /* Verify it. */
11437                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11438                         if (p[i] == i)
11439                                 continue;
11440
11441                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11442                             DMA_RWCTRL_WRITE_BNDRY_16) {
11443                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11444                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11445                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11446                                 break;
11447                         } else {
11448                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11449                                 ret = -ENODEV;
11450                                 goto out;
11451                         }
11452                 }
11453
11454                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11455                         /* Success. */
11456                         ret = 0;
11457                         break;
11458                 }
11459         }
11460         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11461             DMA_RWCTRL_WRITE_BNDRY_16) {
11462                 static struct pci_device_id dma_wait_state_chipsets[] = {
11463                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11464                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11465                         { },
11466                 };
11467
11468                 /* DMA test passed without adjusting DMA boundary,
11469                  * now look for chipsets that are known to expose the
11470                  * DMA bug without failing the test.
11471                  */
11472                 if (pci_dev_present(dma_wait_state_chipsets)) {
11473                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11474                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11475                 }
11476                 else
11477                         /* Safe to use the calculated DMA boundary. */
11478                         tp->dma_rwctrl = saved_dma_rwctrl;
11479
11480                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11481         }
11482
11483 out:
11484         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11485 out_nofree:
11486         return ret;
11487 }
11488
11489 static void __devinit tg3_init_link_config(struct tg3 *tp)
11490 {
11491         tp->link_config.advertising =
11492                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11493                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11494                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11495                  ADVERTISED_Autoneg | ADVERTISED_MII);
11496         tp->link_config.speed = SPEED_INVALID;
11497         tp->link_config.duplex = DUPLEX_INVALID;
11498         tp->link_config.autoneg = AUTONEG_ENABLE;
11499         tp->link_config.active_speed = SPEED_INVALID;
11500         tp->link_config.active_duplex = DUPLEX_INVALID;
11501         tp->link_config.phy_is_low_power = 0;
11502         tp->link_config.orig_speed = SPEED_INVALID;
11503         tp->link_config.orig_duplex = DUPLEX_INVALID;
11504         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11505 }
11506
11507 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11508 {
11509         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11510                 tp->bufmgr_config.mbuf_read_dma_low_water =
11511                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11512                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11513                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11514                 tp->bufmgr_config.mbuf_high_water =
11515                         DEFAULT_MB_HIGH_WATER_5705;
11516                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11517                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11518                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11519                         tp->bufmgr_config.mbuf_high_water =
11520                                 DEFAULT_MB_HIGH_WATER_5906;
11521                 }
11522
11523                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11524                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11525                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11526                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11527                 tp->bufmgr_config.mbuf_high_water_jumbo =
11528                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11529         } else {
11530                 tp->bufmgr_config.mbuf_read_dma_low_water =
11531                         DEFAULT_MB_RDMA_LOW_WATER;
11532                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11533                         DEFAULT_MB_MACRX_LOW_WATER;
11534                 tp->bufmgr_config.mbuf_high_water =
11535                         DEFAULT_MB_HIGH_WATER;
11536
11537                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11538                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11539                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11540                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11541                 tp->bufmgr_config.mbuf_high_water_jumbo =
11542                         DEFAULT_MB_HIGH_WATER_JUMBO;
11543         }
11544
11545         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11546         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11547 }
11548
11549 static char * __devinit tg3_phy_string(struct tg3 *tp)
11550 {
11551         switch (tp->phy_id & PHY_ID_MASK) {
11552         case PHY_ID_BCM5400:    return "5400";
11553         case PHY_ID_BCM5401:    return "5401";
11554         case PHY_ID_BCM5411:    return "5411";
11555         case PHY_ID_BCM5701:    return "5701";
11556         case PHY_ID_BCM5703:    return "5703";
11557         case PHY_ID_BCM5704:    return "5704";
11558         case PHY_ID_BCM5705:    return "5705";
11559         case PHY_ID_BCM5750:    return "5750";
11560         case PHY_ID_BCM5752:    return "5752";
11561         case PHY_ID_BCM5714:    return "5714";
11562         case PHY_ID_BCM5780:    return "5780";
11563         case PHY_ID_BCM5755:    return "5755";
11564         case PHY_ID_BCM5787:    return "5787";
11565         case PHY_ID_BCM5756:    return "5722/5756";
11566         case PHY_ID_BCM5906:    return "5906";
11567         case PHY_ID_BCM8002:    return "8002/serdes";
11568         case 0:                 return "serdes";
11569         default:                return "unknown";
11570         };
11571 }
11572
11573 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11574 {
11575         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11576                 strcpy(str, "PCI Express");
11577                 return str;
11578         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11579                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11580
11581                 strcpy(str, "PCIX:");
11582
11583                 if ((clock_ctrl == 7) ||
11584                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11585                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11586                         strcat(str, "133MHz");
11587                 else if (clock_ctrl == 0)
11588                         strcat(str, "33MHz");
11589                 else if (clock_ctrl == 2)
11590                         strcat(str, "50MHz");
11591                 else if (clock_ctrl == 4)
11592                         strcat(str, "66MHz");
11593                 else if (clock_ctrl == 6)
11594                         strcat(str, "100MHz");
11595         } else {
11596                 strcpy(str, "PCI:");
11597                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11598                         strcat(str, "66MHz");
11599                 else
11600                         strcat(str, "33MHz");
11601         }
11602         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11603                 strcat(str, ":32-bit");
11604         else
11605                 strcat(str, ":64-bit");
11606         return str;
11607 }
11608
11609 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11610 {
11611         struct pci_dev *peer;
11612         unsigned int func, devnr = tp->pdev->devfn & ~7;
11613
11614         for (func = 0; func < 8; func++) {
11615                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11616                 if (peer && peer != tp->pdev)
11617                         break;
11618                 pci_dev_put(peer);
11619         }
11620         /* 5704 can be configured in single-port mode, set peer to
11621          * tp->pdev in that case.
11622          */
11623         if (!peer) {
11624                 peer = tp->pdev;
11625                 return peer;
11626         }
11627
11628         /*
11629          * We don't need to keep the refcount elevated; there's no way
11630          * to remove one half of this device without removing the other
11631          */
11632         pci_dev_put(peer);
11633
11634         return peer;
11635 }
11636
11637 static void __devinit tg3_init_coal(struct tg3 *tp)
11638 {
11639         struct ethtool_coalesce *ec = &tp->coal;
11640
11641         memset(ec, 0, sizeof(*ec));
11642         ec->cmd = ETHTOOL_GCOALESCE;
11643         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11644         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11645         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11646         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11647         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11648         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11649         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11650         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11651         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11652
11653         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11654                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11655                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11656                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11657                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11658                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11659         }
11660
11661         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11662                 ec->rx_coalesce_usecs_irq = 0;
11663                 ec->tx_coalesce_usecs_irq = 0;
11664                 ec->stats_block_coalesce_usecs = 0;
11665         }
11666 }
11667
11668 static int __devinit tg3_init_one(struct pci_dev *pdev,
11669                                   const struct pci_device_id *ent)
11670 {
11671         static int tg3_version_printed = 0;
11672         unsigned long tg3reg_base, tg3reg_len;
11673         struct net_device *dev;
11674         struct tg3 *tp;
11675         int i, err, pm_cap;
11676         char str[40];
11677         u64 dma_mask, persist_dma_mask;
11678
11679         if (tg3_version_printed++ == 0)
11680                 printk(KERN_INFO "%s", version);
11681
11682         err = pci_enable_device(pdev);
11683         if (err) {
11684                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11685                        "aborting.\n");
11686                 return err;
11687         }
11688
11689         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11690                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11691                        "base address, aborting.\n");
11692                 err = -ENODEV;
11693                 goto err_out_disable_pdev;
11694         }
11695
11696         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11697         if (err) {
11698                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11699                        "aborting.\n");
11700                 goto err_out_disable_pdev;
11701         }
11702
11703         pci_set_master(pdev);
11704
11705         /* Find power-management capability. */
11706         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11707         if (pm_cap == 0) {
11708                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11709                        "aborting.\n");
11710                 err = -EIO;
11711                 goto err_out_free_res;
11712         }
11713
11714         tg3reg_base = pci_resource_start(pdev, 0);
11715         tg3reg_len = pci_resource_len(pdev, 0);
11716
11717         dev = alloc_etherdev(sizeof(*tp));
11718         if (!dev) {
11719                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11720                 err = -ENOMEM;
11721                 goto err_out_free_res;
11722         }
11723
11724         SET_MODULE_OWNER(dev);
11725         SET_NETDEV_DEV(dev, &pdev->dev);
11726
11727 #if TG3_VLAN_TAG_USED
11728         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11729         dev->vlan_rx_register = tg3_vlan_rx_register;
11730         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11731 #endif
11732
11733         tp = netdev_priv(dev);
11734         tp->pdev = pdev;
11735         tp->dev = dev;
11736         tp->pm_cap = pm_cap;
11737         tp->mac_mode = TG3_DEF_MAC_MODE;
11738         tp->rx_mode = TG3_DEF_RX_MODE;
11739         tp->tx_mode = TG3_DEF_TX_MODE;
11740         tp->mi_mode = MAC_MI_MODE_BASE;
11741         if (tg3_debug > 0)
11742                 tp->msg_enable = tg3_debug;
11743         else
11744                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11745
11746         /* The word/byte swap controls here control register access byte
11747          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11748          * setting below.
11749          */
11750         tp->misc_host_ctrl =
11751                 MISC_HOST_CTRL_MASK_PCI_INT |
11752                 MISC_HOST_CTRL_WORD_SWAP |
11753                 MISC_HOST_CTRL_INDIR_ACCESS |
11754                 MISC_HOST_CTRL_PCISTATE_RW;
11755
11756         /* The NONFRM (non-frame) byte/word swap controls take effect
11757          * on descriptor entries, anything which isn't packet data.
11758          *
11759          * The StrongARM chips on the board (one for tx, one for rx)
11760          * are running in big-endian mode.
11761          */
11762         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11763                         GRC_MODE_WSWAP_NONFRM_DATA);
11764 #ifdef __BIG_ENDIAN
11765         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11766 #endif
11767         spin_lock_init(&tp->lock);
11768         spin_lock_init(&tp->indirect_lock);
11769         INIT_WORK(&tp->reset_task, tg3_reset_task);
11770
11771         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11772         if (tp->regs == 0UL) {
11773                 printk(KERN_ERR PFX "Cannot map device registers, "
11774                        "aborting.\n");
11775                 err = -ENOMEM;
11776                 goto err_out_free_dev;
11777         }
11778
11779         tg3_init_link_config(tp);
11780
11781         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11782         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11783         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11784
11785         dev->open = tg3_open;
11786         dev->stop = tg3_close;
11787         dev->get_stats = tg3_get_stats;
11788         dev->set_multicast_list = tg3_set_rx_mode;
11789         dev->set_mac_address = tg3_set_mac_addr;
11790         dev->do_ioctl = tg3_ioctl;
11791         dev->tx_timeout = tg3_tx_timeout;
11792         dev->poll = tg3_poll;
11793         dev->ethtool_ops = &tg3_ethtool_ops;
11794         dev->weight = 64;
11795         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11796         dev->change_mtu = tg3_change_mtu;
11797         dev->irq = pdev->irq;
11798 #ifdef CONFIG_NET_POLL_CONTROLLER
11799         dev->poll_controller = tg3_poll_controller;
11800 #endif
11801
11802         err = tg3_get_invariants(tp);
11803         if (err) {
11804                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11805                        "aborting.\n");
11806                 goto err_out_iounmap;
11807         }
11808
11809         /* The EPB bridge inside 5714, 5715, and 5780 and any
11810          * device behind the EPB cannot support DMA addresses > 40-bit.
11811          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11812          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11813          * do DMA address check in tg3_start_xmit().
11814          */
11815         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11816                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11817         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11818                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11819 #ifdef CONFIG_HIGHMEM
11820                 dma_mask = DMA_64BIT_MASK;
11821 #endif
11822         } else
11823                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11824
11825         /* Configure DMA attributes. */
11826         if (dma_mask > DMA_32BIT_MASK) {
11827                 err = pci_set_dma_mask(pdev, dma_mask);
11828                 if (!err) {
11829                         dev->features |= NETIF_F_HIGHDMA;
11830                         err = pci_set_consistent_dma_mask(pdev,
11831                                                           persist_dma_mask);
11832                         if (err < 0) {
11833                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11834                                        "DMA for consistent allocations\n");
11835                                 goto err_out_iounmap;
11836                         }
11837                 }
11838         }
11839         if (err || dma_mask == DMA_32BIT_MASK) {
11840                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11841                 if (err) {
11842                         printk(KERN_ERR PFX "No usable DMA configuration, "
11843                                "aborting.\n");
11844                         goto err_out_iounmap;
11845                 }
11846         }
11847
11848         tg3_init_bufmgr_config(tp);
11849
11850         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11851                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11852         }
11853         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11854             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11855             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11856             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11857             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11858                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11859         } else {
11860                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11861         }
11862
11863         /* TSO is on by default on chips that support hardware TSO.
11864          * Firmware TSO on older chips gives lower performance, so it
11865          * is off by default, but can be enabled using ethtool.
11866          */
11867         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11868                 dev->features |= NETIF_F_TSO;
11869                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11870                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11871                         dev->features |= NETIF_F_TSO6;
11872         }
11873
11874
11875         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11876             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11877             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11878                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11879                 tp->rx_pending = 63;
11880         }
11881
11882         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11883             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11884                 tp->pdev_peer = tg3_find_peer(tp);
11885
11886         err = tg3_get_device_address(tp);
11887         if (err) {
11888                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11889                        "aborting.\n");
11890                 goto err_out_iounmap;
11891         }
11892
11893         /*
11894          * Reset chip in case UNDI or EFI driver did not shutdown
11895          * DMA self test will enable WDMAC and we'll see (spurious)
11896          * pending DMA on the PCI bus at that point.
11897          */
11898         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11899             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11900                 pci_save_state(tp->pdev);
11901                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11902                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11903         }
11904
11905         err = tg3_test_dma(tp);
11906         if (err) {
11907                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11908                 goto err_out_iounmap;
11909         }
11910
11911         /* Tigon3 can do ipv4 only... and some chips have buggy
11912          * checksumming.
11913          */
11914         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11916                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11917                         dev->features |= NETIF_F_HW_CSUM;
11918                 else
11919                         dev->features |= NETIF_F_IP_CSUM;
11920                 dev->features |= NETIF_F_SG;
11921                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11922         } else
11923                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11924
11925         /* flow control autonegotiation is default behavior */
11926         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11927
11928         tg3_init_coal(tp);
11929
11930         /* Now that we have fully setup the chip, save away a snapshot
11931          * of the PCI config space.  We need to restore this after
11932          * GRC_MISC_CFG core clock resets and some resume events.
11933          */
11934         pci_save_state(tp->pdev);
11935
11936         pci_set_drvdata(pdev, dev);
11937
11938         err = register_netdev(dev);
11939         if (err) {
11940                 printk(KERN_ERR PFX "Cannot register net device, "
11941                        "aborting.\n");
11942                 goto err_out_iounmap;
11943         }
11944
11945         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
11946                dev->name,
11947                tp->board_part_number,
11948                tp->pci_chip_rev_id,
11949                tg3_phy_string(tp),
11950                tg3_bus_string(tp, str),
11951                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
11952                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
11953                  "10/100/1000Base-T")));
11954
11955         for (i = 0; i < 6; i++)
11956                 printk("%2.2x%c", dev->dev_addr[i],
11957                        i == 5 ? '\n' : ':');
11958
11959         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11960                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11961                "TSOcap[%d] \n",
11962                dev->name,
11963                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11964                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11965                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11966                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11967                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11968                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11969                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11970         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11971                dev->name, tp->dma_rwctrl,
11972                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11973                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11974
11975         return 0;
11976
11977 err_out_iounmap:
11978         if (tp->regs) {
11979                 iounmap(tp->regs);
11980                 tp->regs = NULL;
11981         }
11982
11983 err_out_free_dev:
11984         free_netdev(dev);
11985
11986 err_out_free_res:
11987         pci_release_regions(pdev);
11988
11989 err_out_disable_pdev:
11990         pci_disable_device(pdev);
11991         pci_set_drvdata(pdev, NULL);
11992         return err;
11993 }
11994
11995 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11996 {
11997         struct net_device *dev = pci_get_drvdata(pdev);
11998
11999         if (dev) {
12000                 struct tg3 *tp = netdev_priv(dev);
12001
12002                 flush_scheduled_work();
12003                 unregister_netdev(dev);
12004                 if (tp->regs) {
12005                         iounmap(tp->regs);
12006                         tp->regs = NULL;
12007                 }
12008                 free_netdev(dev);
12009                 pci_release_regions(pdev);
12010                 pci_disable_device(pdev);
12011                 pci_set_drvdata(pdev, NULL);
12012         }
12013 }
12014
12015 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12016 {
12017         struct net_device *dev = pci_get_drvdata(pdev);
12018         struct tg3 *tp = netdev_priv(dev);
12019         int err;
12020
12021         if (!netif_running(dev))
12022                 return 0;
12023
12024         flush_scheduled_work();
12025         tg3_netif_stop(tp);
12026
12027         del_timer_sync(&tp->timer);
12028
12029         tg3_full_lock(tp, 1);
12030         tg3_disable_ints(tp);
12031         tg3_full_unlock(tp);
12032
12033         netif_device_detach(dev);
12034
12035         tg3_full_lock(tp, 0);
12036         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12037         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12038         tg3_full_unlock(tp);
12039
12040         /* Save MSI address and data for resume.  */
12041         pci_save_state(pdev);
12042
12043         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12044         if (err) {
12045                 tg3_full_lock(tp, 0);
12046
12047                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12048                 if (tg3_restart_hw(tp, 1))
12049                         goto out;
12050
12051                 tp->timer.expires = jiffies + tp->timer_offset;
12052                 add_timer(&tp->timer);
12053
12054                 netif_device_attach(dev);
12055                 tg3_netif_start(tp);
12056
12057 out:
12058                 tg3_full_unlock(tp);
12059         }
12060
12061         return err;
12062 }
12063
12064 static int tg3_resume(struct pci_dev *pdev)
12065 {
12066         struct net_device *dev = pci_get_drvdata(pdev);
12067         struct tg3 *tp = netdev_priv(dev);
12068         int err;
12069
12070         if (!netif_running(dev))
12071                 return 0;
12072
12073         pci_restore_state(tp->pdev);
12074
12075         err = tg3_set_power_state(tp, PCI_D0);
12076         if (err)
12077                 return err;
12078
12079         netif_device_attach(dev);
12080
12081         tg3_full_lock(tp, 0);
12082
12083         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12084         err = tg3_restart_hw(tp, 1);
12085         if (err)
12086                 goto out;
12087
12088         tp->timer.expires = jiffies + tp->timer_offset;
12089         add_timer(&tp->timer);
12090
12091         tg3_netif_start(tp);
12092
12093 out:
12094         tg3_full_unlock(tp);
12095
12096         return err;
12097 }
12098
12099 static struct pci_driver tg3_driver = {
12100         .name           = DRV_MODULE_NAME,
12101         .id_table       = tg3_pci_tbl,
12102         .probe          = tg3_init_one,
12103         .remove         = __devexit_p(tg3_remove_one),
12104         .suspend        = tg3_suspend,
12105         .resume         = tg3_resume
12106 };
12107
12108 static int __init tg3_init(void)
12109 {
12110         return pci_register_driver(&tg3_driver);
12111 }
12112
12113 static void __exit tg3_cleanup(void)
12114 {
12115         pci_unregister_driver(&tg3_driver);
12116 }
12117
12118 module_init(tg3_init);
12119 module_exit(tg3_cleanup);