]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: ASIC decoding and basic CPMU support.
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.81"
68 #define DRV_MODULE_RELDATE      "September 5, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
207         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
208         {}
209 };
210
211 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
212
213 static const struct {
214         const char string[ETH_GSTRING_LEN];
215 } ethtool_stats_keys[TG3_NUM_STATS] = {
216         { "rx_octets" },
217         { "rx_fragments" },
218         { "rx_ucast_packets" },
219         { "rx_mcast_packets" },
220         { "rx_bcast_packets" },
221         { "rx_fcs_errors" },
222         { "rx_align_errors" },
223         { "rx_xon_pause_rcvd" },
224         { "rx_xoff_pause_rcvd" },
225         { "rx_mac_ctrl_rcvd" },
226         { "rx_xoff_entered" },
227         { "rx_frame_too_long_errors" },
228         { "rx_jabbers" },
229         { "rx_undersize_packets" },
230         { "rx_in_length_errors" },
231         { "rx_out_length_errors" },
232         { "rx_64_or_less_octet_packets" },
233         { "rx_65_to_127_octet_packets" },
234         { "rx_128_to_255_octet_packets" },
235         { "rx_256_to_511_octet_packets" },
236         { "rx_512_to_1023_octet_packets" },
237         { "rx_1024_to_1522_octet_packets" },
238         { "rx_1523_to_2047_octet_packets" },
239         { "rx_2048_to_4095_octet_packets" },
240         { "rx_4096_to_8191_octet_packets" },
241         { "rx_8192_to_9022_octet_packets" },
242
243         { "tx_octets" },
244         { "tx_collisions" },
245
246         { "tx_xon_sent" },
247         { "tx_xoff_sent" },
248         { "tx_flow_control" },
249         { "tx_mac_errors" },
250         { "tx_single_collisions" },
251         { "tx_mult_collisions" },
252         { "tx_deferred" },
253         { "tx_excessive_collisions" },
254         { "tx_late_collisions" },
255         { "tx_collide_2times" },
256         { "tx_collide_3times" },
257         { "tx_collide_4times" },
258         { "tx_collide_5times" },
259         { "tx_collide_6times" },
260         { "tx_collide_7times" },
261         { "tx_collide_8times" },
262         { "tx_collide_9times" },
263         { "tx_collide_10times" },
264         { "tx_collide_11times" },
265         { "tx_collide_12times" },
266         { "tx_collide_13times" },
267         { "tx_collide_14times" },
268         { "tx_collide_15times" },
269         { "tx_ucast_packets" },
270         { "tx_mcast_packets" },
271         { "tx_bcast_packets" },
272         { "tx_carrier_sense_errors" },
273         { "tx_discards" },
274         { "tx_errors" },
275
276         { "dma_writeq_full" },
277         { "dma_write_prioq_full" },
278         { "rxbds_empty" },
279         { "rx_discards" },
280         { "rx_errors" },
281         { "rx_threshold_hit" },
282
283         { "dma_readq_full" },
284         { "dma_read_prioq_full" },
285         { "tx_comp_queue_full" },
286
287         { "ring_set_send_prod_index" },
288         { "ring_status_update" },
289         { "nic_irqs" },
290         { "nic_avoided_irqs" },
291         { "nic_tx_threshold_hit" }
292 };
293
294 static const struct {
295         const char string[ETH_GSTRING_LEN];
296 } ethtool_test_keys[TG3_NUM_TEST] = {
297         { "nvram test     (online) " },
298         { "link test      (online) " },
299         { "register test  (offline)" },
300         { "memory test    (offline)" },
301         { "loopback test  (offline)" },
302         { "interrupt test (offline)" },
303 };
304
305 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
306 {
307         writel(val, tp->regs + off);
308 }
309
310 static u32 tg3_read32(struct tg3 *tp, u32 off)
311 {
312         return (readl(tp->regs + off));
313 }
314
315 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
316 {
317         unsigned long flags;
318
319         spin_lock_irqsave(&tp->indirect_lock, flags);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322         spin_unlock_irqrestore(&tp->indirect_lock, flags);
323 }
324
325 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
326 {
327         writel(val, tp->regs + off);
328         readl(tp->regs + off);
329 }
330
331 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
332 {
333         unsigned long flags;
334         u32 val;
335
336         spin_lock_irqsave(&tp->indirect_lock, flags);
337         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
338         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
339         spin_unlock_irqrestore(&tp->indirect_lock, flags);
340         return val;
341 }
342
343 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
348                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
349                                        TG3_64BIT_REG_LOW, val);
350                 return;
351         }
352         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
353                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
354                                        TG3_64BIT_REG_LOW, val);
355                 return;
356         }
357
358         spin_lock_irqsave(&tp->indirect_lock, flags);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361         spin_unlock_irqrestore(&tp->indirect_lock, flags);
362
363         /* In indirect mode when disabling interrupts, we also need
364          * to clear the interrupt bit in the GRC local ctrl register.
365          */
366         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
367             (val == 0x1)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
369                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
370         }
371 }
372
373 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
374 {
375         unsigned long flags;
376         u32 val;
377
378         spin_lock_irqsave(&tp->indirect_lock, flags);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382         return val;
383 }
384
385 /* usec_wait specifies the wait time in usec when writing to certain registers
386  * where it is unsafe to read back the register without some delay.
387  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
388  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
389  */
390 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
391 {
392         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
393             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
394                 /* Non-posted methods */
395                 tp->write32(tp, off, val);
396         else {
397                 /* Posted method */
398                 tg3_write32(tp, off, val);
399                 if (usec_wait)
400                         udelay(usec_wait);
401                 tp->read32(tp, off);
402         }
403         /* Wait again after the read for the posted method to guarantee that
404          * the wait time is met.
405          */
406         if (usec_wait)
407                 udelay(usec_wait);
408 }
409
410 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
411 {
412         tp->write32_mbox(tp, off, val);
413         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
414             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415                 tp->read32_mbox(tp, off);
416 }
417
418 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
419 {
420         void __iomem *mbox = tp->regs + off;
421         writel(val, mbox);
422         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
423                 writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
425                 readl(mbox);
426 }
427
428 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
429 {
430         return (readl(tp->regs + off + GRCMBOX_BASE));
431 }
432
433 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
434 {
435         writel(val, tp->regs + off + GRCMBOX_BASE);
436 }
437
438 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
439 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
440 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
441 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
442 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
443
444 #define tw32(reg,val)           tp->write32(tp, reg, val)
445 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
446 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
447 #define tr32(reg)               tp->read32(tp, reg)
448
449 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
454             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
455                 return;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
460                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
461
462                 /* Always leave this as zero. */
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
464         } else {
465                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         }
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
480                 *val = 0;
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489                 /* Always leave this as zero. */
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491         } else {
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493                 *val = tr32(TG3PCI_MEM_WIN_DATA);
494
495                 /* Always leave this as zero. */
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497         }
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_disable_ints(struct tg3 *tp)
502 {
503         tw32(TG3PCI_MISC_HOST_CTRL,
504              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
505         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
506 }
507
508 static inline void tg3_cond_int(struct tg3 *tp)
509 {
510         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
511             (tp->hw_status->status & SD_STATUS_UPDATED))
512                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
513         else
514                 tw32(HOSTCC_MODE, tp->coalesce_mode |
515                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
516 }
517
518 static void tg3_enable_ints(struct tg3 *tp)
519 {
520         tp->irq_sync = 0;
521         wmb();
522
523         tw32(TG3PCI_MISC_HOST_CTRL,
524              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
526                        (tp->last_tag << 24));
527         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
528                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529                                (tp->last_tag << 24));
530         tg3_cond_int(tp);
531 }
532
533 static inline unsigned int tg3_has_work(struct tg3 *tp)
534 {
535         struct tg3_hw_status *sblk = tp->hw_status;
536         unsigned int work_exists = 0;
537
538         /* check for phy events */
539         if (!(tp->tg3_flags &
540               (TG3_FLAG_USE_LINKCHG_REG |
541                TG3_FLAG_POLL_SERDES))) {
542                 if (sblk->status & SD_STATUS_LINK_CHG)
543                         work_exists = 1;
544         }
545         /* check for RX/TX work to do */
546         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
547             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
548                 work_exists = 1;
549
550         return work_exists;
551 }
552
553 /* tg3_restart_ints
554  *  similar to tg3_enable_ints, but it accurately determines whether there
555  *  is new work pending and can return without flushing the PIO write
556  *  which reenables interrupts
557  */
558 static void tg3_restart_ints(struct tg3 *tp)
559 {
560         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                      tp->last_tag << 24);
562         mmiowb();
563
564         /* When doing tagged status, this work check is unnecessary.
565          * The last_tag we write above tells the chip which piece of
566          * work we've completed.
567          */
568         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
569             tg3_has_work(tp))
570                 tw32(HOSTCC_MODE, tp->coalesce_mode |
571                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
572 }
573
574 static inline void tg3_netif_stop(struct tg3 *tp)
575 {
576         tp->dev->trans_start = jiffies; /* prevent tx timeout */
577         napi_disable(&tp->napi);
578         netif_tx_disable(tp->dev);
579 }
580
581 static inline void tg3_netif_start(struct tg3 *tp)
582 {
583         netif_wake_queue(tp->dev);
584         /* NOTE: unconditional netif_wake_queue is only appropriate
585          * so long as all callers are assured to have free tx slots
586          * (such as after tg3_init_hw)
587          */
588         napi_enable(&tp->napi);
589         tp->hw_status->status |= SD_STATUS_UPDATED;
590         tg3_enable_ints(tp);
591 }
592
593 static void tg3_switch_clocks(struct tg3 *tp)
594 {
595         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
596         u32 orig_clock_ctrl;
597
598         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
599             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
600                 return;
601
602         orig_clock_ctrl = clock_ctrl;
603         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
604                        CLOCK_CTRL_CLKRUN_OENABLE |
605                        0x1f);
606         tp->pci_clock_ctrl = clock_ctrl;
607
608         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
609                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
610                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
611                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
612                 }
613         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
614                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
615                             clock_ctrl |
616                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
617                             40);
618                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
619                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
620                             40);
621         }
622         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
623 }
624
625 #define PHY_BUSY_LOOPS  5000
626
627 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
628 {
629         u32 frame_val;
630         unsigned int loops;
631         int ret;
632
633         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
634                 tw32_f(MAC_MI_MODE,
635                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
636                 udelay(80);
637         }
638
639         *val = 0x0;
640
641         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
642                       MI_COM_PHY_ADDR_MASK);
643         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
644                       MI_COM_REG_ADDR_MASK);
645         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
646
647         tw32_f(MAC_MI_COM, frame_val);
648
649         loops = PHY_BUSY_LOOPS;
650         while (loops != 0) {
651                 udelay(10);
652                 frame_val = tr32(MAC_MI_COM);
653
654                 if ((frame_val & MI_COM_BUSY) == 0) {
655                         udelay(5);
656                         frame_val = tr32(MAC_MI_COM);
657                         break;
658                 }
659                 loops -= 1;
660         }
661
662         ret = -EBUSY;
663         if (loops != 0) {
664                 *val = frame_val & MI_COM_DATA_MASK;
665                 ret = 0;
666         }
667
668         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
669                 tw32_f(MAC_MI_MODE, tp->mi_mode);
670                 udelay(80);
671         }
672
673         return ret;
674 }
675
676 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
677 {
678         u32 frame_val;
679         unsigned int loops;
680         int ret;
681
682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
683             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
684                 return 0;
685
686         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
687                 tw32_f(MAC_MI_MODE,
688                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
689                 udelay(80);
690         }
691
692         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
693                       MI_COM_PHY_ADDR_MASK);
694         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
695                       MI_COM_REG_ADDR_MASK);
696         frame_val |= (val & MI_COM_DATA_MASK);
697         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
698
699         tw32_f(MAC_MI_COM, frame_val);
700
701         loops = PHY_BUSY_LOOPS;
702         while (loops != 0) {
703                 udelay(10);
704                 frame_val = tr32(MAC_MI_COM);
705                 if ((frame_val & MI_COM_BUSY) == 0) {
706                         udelay(5);
707                         frame_val = tr32(MAC_MI_COM);
708                         break;
709                 }
710                 loops -= 1;
711         }
712
713         ret = -EBUSY;
714         if (loops != 0)
715                 ret = 0;
716
717         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
718                 tw32_f(MAC_MI_MODE, tp->mi_mode);
719                 udelay(80);
720         }
721
722         return ret;
723 }
724
725 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
726 {
727         u32 phy;
728
729         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
730             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
731                 return;
732
733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
734                 u32 ephy;
735
736                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
737                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
738                                      ephy | MII_TG3_EPHY_SHADOW_EN);
739                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
740                                 if (enable)
741                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
742                                 else
743                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
744                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
745                         }
746                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
747                 }
748         } else {
749                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
750                       MII_TG3_AUXCTL_SHDWSEL_MISC;
751                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
752                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
753                         if (enable)
754                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
755                         else
756                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
757                         phy |= MII_TG3_AUXCTL_MISC_WREN;
758                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
759                 }
760         }
761 }
762
763 static void tg3_phy_set_wirespeed(struct tg3 *tp)
764 {
765         u32 val;
766
767         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
768                 return;
769
770         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
771             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
772                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
773                              (val | (1 << 15) | (1 << 4)));
774 }
775
776 static int tg3_bmcr_reset(struct tg3 *tp)
777 {
778         u32 phy_control;
779         int limit, err;
780
781         /* OK, reset it, and poll the BMCR_RESET bit until it
782          * clears or we time out.
783          */
784         phy_control = BMCR_RESET;
785         err = tg3_writephy(tp, MII_BMCR, phy_control);
786         if (err != 0)
787                 return -EBUSY;
788
789         limit = 5000;
790         while (limit--) {
791                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
792                 if (err != 0)
793                         return -EBUSY;
794
795                 if ((phy_control & BMCR_RESET) == 0) {
796                         udelay(40);
797                         break;
798                 }
799                 udelay(10);
800         }
801         if (limit <= 0)
802                 return -EBUSY;
803
804         return 0;
805 }
806
807 static int tg3_wait_macro_done(struct tg3 *tp)
808 {
809         int limit = 100;
810
811         while (limit--) {
812                 u32 tmp32;
813
814                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
815                         if ((tmp32 & 0x1000) == 0)
816                                 break;
817                 }
818         }
819         if (limit <= 0)
820                 return -EBUSY;
821
822         return 0;
823 }
824
825 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
826 {
827         static const u32 test_pat[4][6] = {
828         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
829         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
830         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
831         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
832         };
833         int chan;
834
835         for (chan = 0; chan < 4; chan++) {
836                 int i;
837
838                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
839                              (chan * 0x2000) | 0x0200);
840                 tg3_writephy(tp, 0x16, 0x0002);
841
842                 for (i = 0; i < 6; i++)
843                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
844                                      test_pat[chan][i]);
845
846                 tg3_writephy(tp, 0x16, 0x0202);
847                 if (tg3_wait_macro_done(tp)) {
848                         *resetp = 1;
849                         return -EBUSY;
850                 }
851
852                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
853                              (chan * 0x2000) | 0x0200);
854                 tg3_writephy(tp, 0x16, 0x0082);
855                 if (tg3_wait_macro_done(tp)) {
856                         *resetp = 1;
857                         return -EBUSY;
858                 }
859
860                 tg3_writephy(tp, 0x16, 0x0802);
861                 if (tg3_wait_macro_done(tp)) {
862                         *resetp = 1;
863                         return -EBUSY;
864                 }
865
866                 for (i = 0; i < 6; i += 2) {
867                         u32 low, high;
868
869                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
870                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
871                             tg3_wait_macro_done(tp)) {
872                                 *resetp = 1;
873                                 return -EBUSY;
874                         }
875                         low &= 0x7fff;
876                         high &= 0x000f;
877                         if (low != test_pat[chan][i] ||
878                             high != test_pat[chan][i+1]) {
879                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
880                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
881                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
882
883                                 return -EBUSY;
884                         }
885                 }
886         }
887
888         return 0;
889 }
890
891 static int tg3_phy_reset_chanpat(struct tg3 *tp)
892 {
893         int chan;
894
895         for (chan = 0; chan < 4; chan++) {
896                 int i;
897
898                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
899                              (chan * 0x2000) | 0x0200);
900                 tg3_writephy(tp, 0x16, 0x0002);
901                 for (i = 0; i < 6; i++)
902                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
903                 tg3_writephy(tp, 0x16, 0x0202);
904                 if (tg3_wait_macro_done(tp))
905                         return -EBUSY;
906         }
907
908         return 0;
909 }
910
911 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
912 {
913         u32 reg32, phy9_orig;
914         int retries, do_phy_reset, err;
915
916         retries = 10;
917         do_phy_reset = 1;
918         do {
919                 if (do_phy_reset) {
920                         err = tg3_bmcr_reset(tp);
921                         if (err)
922                                 return err;
923                         do_phy_reset = 0;
924                 }
925
926                 /* Disable transmitter and interrupt.  */
927                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
928                         continue;
929
930                 reg32 |= 0x3000;
931                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
932
933                 /* Set full-duplex, 1000 mbps.  */
934                 tg3_writephy(tp, MII_BMCR,
935                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
936
937                 /* Set to master mode.  */
938                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
939                         continue;
940
941                 tg3_writephy(tp, MII_TG3_CTRL,
942                              (MII_TG3_CTRL_AS_MASTER |
943                               MII_TG3_CTRL_ENABLE_AS_MASTER));
944
945                 /* Enable SM_DSP_CLOCK and 6dB.  */
946                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
947
948                 /* Block the PHY control access.  */
949                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
950                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
951
952                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
953                 if (!err)
954                         break;
955         } while (--retries);
956
957         err = tg3_phy_reset_chanpat(tp);
958         if (err)
959                 return err;
960
961         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
962         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
963
964         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
965         tg3_writephy(tp, 0x16, 0x0000);
966
967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
969                 /* Set Extended packet length bit for jumbo frames */
970                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
971         }
972         else {
973                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
974         }
975
976         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
977
978         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
979                 reg32 &= ~0x3000;
980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
981         } else if (!err)
982                 err = -EBUSY;
983
984         return err;
985 }
986
987 static void tg3_link_report(struct tg3 *);
988
989 /* This will reset the tigon3 PHY if there is no valid
990  * link unless the FORCE argument is non-zero.
991  */
992 static int tg3_phy_reset(struct tg3 *tp)
993 {
994         u32 phy_status;
995         int err;
996
997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
998                 u32 val;
999
1000                 val = tr32(GRC_MISC_CFG);
1001                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1002                 udelay(40);
1003         }
1004         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1005         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1006         if (err != 0)
1007                 return -EBUSY;
1008
1009         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1010                 netif_carrier_off(tp->dev);
1011                 tg3_link_report(tp);
1012         }
1013
1014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1015             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1016             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1017                 err = tg3_phy_reset_5703_4_5(tp);
1018                 if (err)
1019                         return err;
1020                 goto out;
1021         }
1022
1023         err = tg3_bmcr_reset(tp);
1024         if (err)
1025                 return err;
1026
1027 out:
1028         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1029                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1032                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1033                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1034                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1035         }
1036         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1037                 tg3_writephy(tp, 0x1c, 0x8d68);
1038                 tg3_writephy(tp, 0x1c, 0x8d68);
1039         }
1040         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1041                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1042                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1043                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1045                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1047                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1048                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1049         }
1050         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1051                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1052                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1053                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1054                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1055                         tg3_writephy(tp, MII_TG3_TEST1,
1056                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1057                 } else
1058                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1059                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1060         }
1061         /* Set Extended packet length bit (bit 14) on all chips that */
1062         /* support jumbo frames */
1063         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1064                 /* Cannot do read-modify-write on 5401 */
1065                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1066         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1067                 u32 phy_reg;
1068
1069                 /* Set bit 14 with read-modify-write to preserve other bits */
1070                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1071                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1072                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1073         }
1074
1075         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1076          * jumbo frames transmission.
1077          */
1078         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1079                 u32 phy_reg;
1080
1081                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1082                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1083                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1084         }
1085
1086         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1087                 /* adjust output voltage */
1088                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1089         }
1090
1091         tg3_phy_toggle_automdix(tp, 1);
1092         tg3_phy_set_wirespeed(tp);
1093         return 0;
1094 }
1095
1096 static void tg3_frob_aux_power(struct tg3 *tp)
1097 {
1098         struct tg3 *tp_peer = tp;
1099
1100         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1101                 return;
1102
1103         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1104             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1105                 struct net_device *dev_peer;
1106
1107                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1108                 /* remove_one() may have been run on the peer. */
1109                 if (!dev_peer)
1110                         tp_peer = tp;
1111                 else
1112                         tp_peer = netdev_priv(dev_peer);
1113         }
1114
1115         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1116             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1117             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1118             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1119                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1120                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1121                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122                                     (GRC_LCLCTRL_GPIO_OE0 |
1123                                      GRC_LCLCTRL_GPIO_OE1 |
1124                                      GRC_LCLCTRL_GPIO_OE2 |
1125                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1126                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1127                                     100);
1128                 } else {
1129                         u32 no_gpio2;
1130                         u32 grc_local_ctrl = 0;
1131
1132                         if (tp_peer != tp &&
1133                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1134                                 return;
1135
1136                         /* Workaround to prevent overdrawing Amps. */
1137                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1138                             ASIC_REV_5714) {
1139                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143
1144                         /* On 5753 and variants, GPIO2 cannot be used. */
1145                         no_gpio2 = tp->nic_sram_data_cfg &
1146                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1147
1148                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1149                                          GRC_LCLCTRL_GPIO_OE1 |
1150                                          GRC_LCLCTRL_GPIO_OE2 |
1151                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1152                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1153                         if (no_gpio2) {
1154                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1155                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1156                         }
1157                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1158                                                     grc_local_ctrl, 100);
1159
1160                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1161
1162                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1163                                                     grc_local_ctrl, 100);
1164
1165                         if (!no_gpio2) {
1166                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1167                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1168                                             grc_local_ctrl, 100);
1169                         }
1170                 }
1171         } else {
1172                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1173                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1174                         if (tp_peer != tp &&
1175                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1176                                 return;
1177
1178                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1179                                     (GRC_LCLCTRL_GPIO_OE1 |
1180                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1181
1182                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1183                                     GRC_LCLCTRL_GPIO_OE1, 100);
1184
1185                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1186                                     (GRC_LCLCTRL_GPIO_OE1 |
1187                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1188                 }
1189         }
1190 }
1191
1192 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1193 {
1194         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1195                 return 1;
1196         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1197                 if (speed != SPEED_10)
1198                         return 1;
1199         } else if (speed == SPEED_10)
1200                 return 1;
1201
1202         return 0;
1203 }
1204
1205 static int tg3_setup_phy(struct tg3 *, int);
1206
1207 #define RESET_KIND_SHUTDOWN     0
1208 #define RESET_KIND_INIT         1
1209 #define RESET_KIND_SUSPEND      2
1210
1211 static void tg3_write_sig_post_reset(struct tg3 *, int);
1212 static int tg3_halt_cpu(struct tg3 *, u32);
1213 static int tg3_nvram_lock(struct tg3 *);
1214 static void tg3_nvram_unlock(struct tg3 *);
1215
1216 static void tg3_power_down_phy(struct tg3 *tp)
1217 {
1218         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1220                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1221                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1222
1223                         sg_dig_ctrl |=
1224                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1225                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1226                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1227                 }
1228                 return;
1229         }
1230
1231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1232                 u32 val;
1233
1234                 tg3_bmcr_reset(tp);
1235                 val = tr32(GRC_MISC_CFG);
1236                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1237                 udelay(40);
1238                 return;
1239         } else {
1240                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1241                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1242                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1243         }
1244
1245         /* The PHY should not be powered down on some chips because
1246          * of bugs.
1247          */
1248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1250             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1251              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1252                 return;
1253         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1254 }
1255
1256 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1257 {
1258         u32 misc_host_ctrl;
1259         u16 power_control, power_caps;
1260         int pm = tp->pm_cap;
1261
1262         /* Make sure register accesses (indirect or otherwise)
1263          * will function correctly.
1264          */
1265         pci_write_config_dword(tp->pdev,
1266                                TG3PCI_MISC_HOST_CTRL,
1267                                tp->misc_host_ctrl);
1268
1269         pci_read_config_word(tp->pdev,
1270                              pm + PCI_PM_CTRL,
1271                              &power_control);
1272         power_control |= PCI_PM_CTRL_PME_STATUS;
1273         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1274         switch (state) {
1275         case PCI_D0:
1276                 power_control |= 0;
1277                 pci_write_config_word(tp->pdev,
1278                                       pm + PCI_PM_CTRL,
1279                                       power_control);
1280                 udelay(100);    /* Delay after power state change */
1281
1282                 /* Switch out of Vaux if it is a NIC */
1283                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1284                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1285
1286                 return 0;
1287
1288         case PCI_D1:
1289                 power_control |= 1;
1290                 break;
1291
1292         case PCI_D2:
1293                 power_control |= 2;
1294                 break;
1295
1296         case PCI_D3hot:
1297                 power_control |= 3;
1298                 break;
1299
1300         default:
1301                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1302                        "requested.\n",
1303                        tp->dev->name, state);
1304                 return -EINVAL;
1305         };
1306
1307         power_control |= PCI_PM_CTRL_PME_ENABLE;
1308
1309         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1310         tw32(TG3PCI_MISC_HOST_CTRL,
1311              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1312
1313         if (tp->link_config.phy_is_low_power == 0) {
1314                 tp->link_config.phy_is_low_power = 1;
1315                 tp->link_config.orig_speed = tp->link_config.speed;
1316                 tp->link_config.orig_duplex = tp->link_config.duplex;
1317                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1318         }
1319
1320         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1321                 tp->link_config.speed = SPEED_10;
1322                 tp->link_config.duplex = DUPLEX_HALF;
1323                 tp->link_config.autoneg = AUTONEG_ENABLE;
1324                 tg3_setup_phy(tp, 0);
1325         }
1326
1327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1328                 u32 val;
1329
1330                 val = tr32(GRC_VCPU_EXT_CTRL);
1331                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1332         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1333                 int i;
1334                 u32 val;
1335
1336                 for (i = 0; i < 200; i++) {
1337                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1338                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1339                                 break;
1340                         msleep(1);
1341                 }
1342         }
1343         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1344                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1345                                                      WOL_DRV_STATE_SHUTDOWN |
1346                                                      WOL_DRV_WOL |
1347                                                      WOL_SET_MAGIC_PKT);
1348
1349         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1350
1351         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1352                 u32 mac_mode;
1353
1354                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1355                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1356                         udelay(40);
1357
1358                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1359                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1360                         else
1361                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1362
1363                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1364                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1365                             ASIC_REV_5700) {
1366                                 u32 speed = (tp->tg3_flags &
1367                                              TG3_FLAG_WOL_SPEED_100MB) ?
1368                                              SPEED_100 : SPEED_10;
1369                                 if (tg3_5700_link_polarity(tp, speed))
1370                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1371                                 else
1372                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1373                         }
1374                 } else {
1375                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1376                 }
1377
1378                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1379                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1380
1381                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1382                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1383                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1384
1385                 tw32_f(MAC_MODE, mac_mode);
1386                 udelay(100);
1387
1388                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1389                 udelay(10);
1390         }
1391
1392         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1393             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1394              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1395                 u32 base_val;
1396
1397                 base_val = tp->pci_clock_ctrl;
1398                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1399                              CLOCK_CTRL_TXCLK_DISABLE);
1400
1401                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1402                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1403         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1404                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1405                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1406                 /* do nothing */
1407         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1408                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1409                 u32 newbits1, newbits2;
1410
1411                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1412                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1413                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1414                                     CLOCK_CTRL_TXCLK_DISABLE |
1415                                     CLOCK_CTRL_ALTCLK);
1416                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1417                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1418                         newbits1 = CLOCK_CTRL_625_CORE;
1419                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1420                 } else {
1421                         newbits1 = CLOCK_CTRL_ALTCLK;
1422                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1423                 }
1424
1425                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1426                             40);
1427
1428                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1429                             40);
1430
1431                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1432                         u32 newbits3;
1433
1434                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1435                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1436                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1437                                             CLOCK_CTRL_TXCLK_DISABLE |
1438                                             CLOCK_CTRL_44MHZ_CORE);
1439                         } else {
1440                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1441                         }
1442
1443                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1444                                     tp->pci_clock_ctrl | newbits3, 40);
1445                 }
1446         }
1447
1448         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1449             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1450                 tg3_power_down_phy(tp);
1451
1452         tg3_frob_aux_power(tp);
1453
1454         /* Workaround for unstable PLL clock */
1455         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1456             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1457                 u32 val = tr32(0x7d00);
1458
1459                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1460                 tw32(0x7d00, val);
1461                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1462                         int err;
1463
1464                         err = tg3_nvram_lock(tp);
1465                         tg3_halt_cpu(tp, RX_CPU_BASE);
1466                         if (!err)
1467                                 tg3_nvram_unlock(tp);
1468                 }
1469         }
1470
1471         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1472
1473         /* Finally, set the new power state. */
1474         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1475         udelay(100);    /* Delay after power state change */
1476
1477         return 0;
1478 }
1479
1480 static void tg3_link_report(struct tg3 *tp)
1481 {
1482         if (!netif_carrier_ok(tp->dev)) {
1483                 if (netif_msg_link(tp))
1484                         printk(KERN_INFO PFX "%s: Link is down.\n",
1485                                tp->dev->name);
1486         } else if (netif_msg_link(tp)) {
1487                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1488                        tp->dev->name,
1489                        (tp->link_config.active_speed == SPEED_1000 ?
1490                         1000 :
1491                         (tp->link_config.active_speed == SPEED_100 ?
1492                          100 : 10)),
1493                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1494                         "full" : "half"));
1495
1496                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1497                        "%s for RX.\n",
1498                        tp->dev->name,
1499                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1500                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1501         }
1502 }
1503
1504 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1505 {
1506         u32 new_tg3_flags = 0;
1507         u32 old_rx_mode = tp->rx_mode;
1508         u32 old_tx_mode = tp->tx_mode;
1509
1510         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1511
1512                 /* Convert 1000BaseX flow control bits to 1000BaseT
1513                  * bits before resolving flow control.
1514                  */
1515                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1516                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1517                                        ADVERTISE_PAUSE_ASYM);
1518                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1519
1520                         if (local_adv & ADVERTISE_1000XPAUSE)
1521                                 local_adv |= ADVERTISE_PAUSE_CAP;
1522                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1523                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1524                         if (remote_adv & LPA_1000XPAUSE)
1525                                 remote_adv |= LPA_PAUSE_CAP;
1526                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1527                                 remote_adv |= LPA_PAUSE_ASYM;
1528                 }
1529
1530                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1531                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1532                                 if (remote_adv & LPA_PAUSE_CAP)
1533                                         new_tg3_flags |=
1534                                                 (TG3_FLAG_RX_PAUSE |
1535                                                 TG3_FLAG_TX_PAUSE);
1536                                 else if (remote_adv & LPA_PAUSE_ASYM)
1537                                         new_tg3_flags |=
1538                                                 (TG3_FLAG_RX_PAUSE);
1539                         } else {
1540                                 if (remote_adv & LPA_PAUSE_CAP)
1541                                         new_tg3_flags |=
1542                                                 (TG3_FLAG_RX_PAUSE |
1543                                                 TG3_FLAG_TX_PAUSE);
1544                         }
1545                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1546                         if ((remote_adv & LPA_PAUSE_CAP) &&
1547                         (remote_adv & LPA_PAUSE_ASYM))
1548                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1549                 }
1550
1551                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1552                 tp->tg3_flags |= new_tg3_flags;
1553         } else {
1554                 new_tg3_flags = tp->tg3_flags;
1555         }
1556
1557         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1558                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1559         else
1560                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1561
1562         if (old_rx_mode != tp->rx_mode) {
1563                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1564         }
1565
1566         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1567                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1568         else
1569                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1570
1571         if (old_tx_mode != tp->tx_mode) {
1572                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1573         }
1574 }
1575
1576 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1577 {
1578         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1579         case MII_TG3_AUX_STAT_10HALF:
1580                 *speed = SPEED_10;
1581                 *duplex = DUPLEX_HALF;
1582                 break;
1583
1584         case MII_TG3_AUX_STAT_10FULL:
1585                 *speed = SPEED_10;
1586                 *duplex = DUPLEX_FULL;
1587                 break;
1588
1589         case MII_TG3_AUX_STAT_100HALF:
1590                 *speed = SPEED_100;
1591                 *duplex = DUPLEX_HALF;
1592                 break;
1593
1594         case MII_TG3_AUX_STAT_100FULL:
1595                 *speed = SPEED_100;
1596                 *duplex = DUPLEX_FULL;
1597                 break;
1598
1599         case MII_TG3_AUX_STAT_1000HALF:
1600                 *speed = SPEED_1000;
1601                 *duplex = DUPLEX_HALF;
1602                 break;
1603
1604         case MII_TG3_AUX_STAT_1000FULL:
1605                 *speed = SPEED_1000;
1606                 *duplex = DUPLEX_FULL;
1607                 break;
1608
1609         default:
1610                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1611                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1612                                  SPEED_10;
1613                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1614                                   DUPLEX_HALF;
1615                         break;
1616                 }
1617                 *speed = SPEED_INVALID;
1618                 *duplex = DUPLEX_INVALID;
1619                 break;
1620         };
1621 }
1622
1623 static void tg3_phy_copper_begin(struct tg3 *tp)
1624 {
1625         u32 new_adv;
1626         int i;
1627
1628         if (tp->link_config.phy_is_low_power) {
1629                 /* Entering low power mode.  Disable gigabit and
1630                  * 100baseT advertisements.
1631                  */
1632                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1633
1634                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1635                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1636                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1637                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1638
1639                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1640         } else if (tp->link_config.speed == SPEED_INVALID) {
1641                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1642                         tp->link_config.advertising &=
1643                                 ~(ADVERTISED_1000baseT_Half |
1644                                   ADVERTISED_1000baseT_Full);
1645
1646                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1647                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1648                         new_adv |= ADVERTISE_10HALF;
1649                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1650                         new_adv |= ADVERTISE_10FULL;
1651                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1652                         new_adv |= ADVERTISE_100HALF;
1653                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1654                         new_adv |= ADVERTISE_100FULL;
1655                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1656
1657                 if (tp->link_config.advertising &
1658                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1659                         new_adv = 0;
1660                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1661                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1662                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1663                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1664                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1665                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1666                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1667                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1668                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1669                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1670                 } else {
1671                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1672                 }
1673         } else {
1674                 /* Asking for a specific link mode. */
1675                 if (tp->link_config.speed == SPEED_1000) {
1676                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1677                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1678
1679                         if (tp->link_config.duplex == DUPLEX_FULL)
1680                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1681                         else
1682                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1683                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1684                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1685                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1686                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1687                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1688                 } else {
1689                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1690
1691                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1692                         if (tp->link_config.speed == SPEED_100) {
1693                                 if (tp->link_config.duplex == DUPLEX_FULL)
1694                                         new_adv |= ADVERTISE_100FULL;
1695                                 else
1696                                         new_adv |= ADVERTISE_100HALF;
1697                         } else {
1698                                 if (tp->link_config.duplex == DUPLEX_FULL)
1699                                         new_adv |= ADVERTISE_10FULL;
1700                                 else
1701                                         new_adv |= ADVERTISE_10HALF;
1702                         }
1703                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1704                 }
1705         }
1706
1707         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1708             tp->link_config.speed != SPEED_INVALID) {
1709                 u32 bmcr, orig_bmcr;
1710
1711                 tp->link_config.active_speed = tp->link_config.speed;
1712                 tp->link_config.active_duplex = tp->link_config.duplex;
1713
1714                 bmcr = 0;
1715                 switch (tp->link_config.speed) {
1716                 default:
1717                 case SPEED_10:
1718                         break;
1719
1720                 case SPEED_100:
1721                         bmcr |= BMCR_SPEED100;
1722                         break;
1723
1724                 case SPEED_1000:
1725                         bmcr |= TG3_BMCR_SPEED1000;
1726                         break;
1727                 };
1728
1729                 if (tp->link_config.duplex == DUPLEX_FULL)
1730                         bmcr |= BMCR_FULLDPLX;
1731
1732                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1733                     (bmcr != orig_bmcr)) {
1734                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1735                         for (i = 0; i < 1500; i++) {
1736                                 u32 tmp;
1737
1738                                 udelay(10);
1739                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1740                                     tg3_readphy(tp, MII_BMSR, &tmp))
1741                                         continue;
1742                                 if (!(tmp & BMSR_LSTATUS)) {
1743                                         udelay(40);
1744                                         break;
1745                                 }
1746                         }
1747                         tg3_writephy(tp, MII_BMCR, bmcr);
1748                         udelay(40);
1749                 }
1750         } else {
1751                 tg3_writephy(tp, MII_BMCR,
1752                              BMCR_ANENABLE | BMCR_ANRESTART);
1753         }
1754 }
1755
1756 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1757 {
1758         int err;
1759
1760         /* Turn off tap power management. */
1761         /* Set Extended packet length bit */
1762         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1763
1764         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1765         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1766
1767         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1768         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1769
1770         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1771         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1772
1773         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1774         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1775
1776         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1777         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1778
1779         udelay(40);
1780
1781         return err;
1782 }
1783
1784 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1785 {
1786         u32 adv_reg, all_mask = 0;
1787
1788         if (mask & ADVERTISED_10baseT_Half)
1789                 all_mask |= ADVERTISE_10HALF;
1790         if (mask & ADVERTISED_10baseT_Full)
1791                 all_mask |= ADVERTISE_10FULL;
1792         if (mask & ADVERTISED_100baseT_Half)
1793                 all_mask |= ADVERTISE_100HALF;
1794         if (mask & ADVERTISED_100baseT_Full)
1795                 all_mask |= ADVERTISE_100FULL;
1796
1797         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1798                 return 0;
1799
1800         if ((adv_reg & all_mask) != all_mask)
1801                 return 0;
1802         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1803                 u32 tg3_ctrl;
1804
1805                 all_mask = 0;
1806                 if (mask & ADVERTISED_1000baseT_Half)
1807                         all_mask |= ADVERTISE_1000HALF;
1808                 if (mask & ADVERTISED_1000baseT_Full)
1809                         all_mask |= ADVERTISE_1000FULL;
1810
1811                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1812                         return 0;
1813
1814                 if ((tg3_ctrl & all_mask) != all_mask)
1815                         return 0;
1816         }
1817         return 1;
1818 }
1819
1820 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1821 {
1822         int current_link_up;
1823         u32 bmsr, dummy;
1824         u16 current_speed;
1825         u8 current_duplex;
1826         int i, err;
1827
1828         tw32(MAC_EVENT, 0);
1829
1830         tw32_f(MAC_STATUS,
1831              (MAC_STATUS_SYNC_CHANGED |
1832               MAC_STATUS_CFG_CHANGED |
1833               MAC_STATUS_MI_COMPLETION |
1834               MAC_STATUS_LNKSTATE_CHANGED));
1835         udelay(40);
1836
1837         tp->mi_mode = MAC_MI_MODE_BASE;
1838         tw32_f(MAC_MI_MODE, tp->mi_mode);
1839         udelay(80);
1840
1841         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1842
1843         /* Some third-party PHYs need to be reset on link going
1844          * down.
1845          */
1846         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1847              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1849             netif_carrier_ok(tp->dev)) {
1850                 tg3_readphy(tp, MII_BMSR, &bmsr);
1851                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1852                     !(bmsr & BMSR_LSTATUS))
1853                         force_reset = 1;
1854         }
1855         if (force_reset)
1856                 tg3_phy_reset(tp);
1857
1858         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1859                 tg3_readphy(tp, MII_BMSR, &bmsr);
1860                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1861                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1862                         bmsr = 0;
1863
1864                 if (!(bmsr & BMSR_LSTATUS)) {
1865                         err = tg3_init_5401phy_dsp(tp);
1866                         if (err)
1867                                 return err;
1868
1869                         tg3_readphy(tp, MII_BMSR, &bmsr);
1870                         for (i = 0; i < 1000; i++) {
1871                                 udelay(10);
1872                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1873                                     (bmsr & BMSR_LSTATUS)) {
1874                                         udelay(40);
1875                                         break;
1876                                 }
1877                         }
1878
1879                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1880                             !(bmsr & BMSR_LSTATUS) &&
1881                             tp->link_config.active_speed == SPEED_1000) {
1882                                 err = tg3_phy_reset(tp);
1883                                 if (!err)
1884                                         err = tg3_init_5401phy_dsp(tp);
1885                                 if (err)
1886                                         return err;
1887                         }
1888                 }
1889         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1890                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1891                 /* 5701 {A0,B0} CRC bug workaround */
1892                 tg3_writephy(tp, 0x15, 0x0a75);
1893                 tg3_writephy(tp, 0x1c, 0x8c68);
1894                 tg3_writephy(tp, 0x1c, 0x8d68);
1895                 tg3_writephy(tp, 0x1c, 0x8c68);
1896         }
1897
1898         /* Clear pending interrupts... */
1899         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1900         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1901
1902         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1903                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1904         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1905                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1906
1907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1909                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1910                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1911                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1912                 else
1913                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1914         }
1915
1916         current_link_up = 0;
1917         current_speed = SPEED_INVALID;
1918         current_duplex = DUPLEX_INVALID;
1919
1920         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1921                 u32 val;
1922
1923                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1924                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1925                 if (!(val & (1 << 10))) {
1926                         val |= (1 << 10);
1927                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1928                         goto relink;
1929                 }
1930         }
1931
1932         bmsr = 0;
1933         for (i = 0; i < 100; i++) {
1934                 tg3_readphy(tp, MII_BMSR, &bmsr);
1935                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1936                     (bmsr & BMSR_LSTATUS))
1937                         break;
1938                 udelay(40);
1939         }
1940
1941         if (bmsr & BMSR_LSTATUS) {
1942                 u32 aux_stat, bmcr;
1943
1944                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1945                 for (i = 0; i < 2000; i++) {
1946                         udelay(10);
1947                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1948                             aux_stat)
1949                                 break;
1950                 }
1951
1952                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1953                                              &current_speed,
1954                                              &current_duplex);
1955
1956                 bmcr = 0;
1957                 for (i = 0; i < 200; i++) {
1958                         tg3_readphy(tp, MII_BMCR, &bmcr);
1959                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1960                                 continue;
1961                         if (bmcr && bmcr != 0x7fff)
1962                                 break;
1963                         udelay(10);
1964                 }
1965
1966                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1967                         if (bmcr & BMCR_ANENABLE) {
1968                                 current_link_up = 1;
1969
1970                                 /* Force autoneg restart if we are exiting
1971                                  * low power mode.
1972                                  */
1973                                 if (!tg3_copper_is_advertising_all(tp,
1974                                                 tp->link_config.advertising))
1975                                         current_link_up = 0;
1976                         } else {
1977                                 current_link_up = 0;
1978                         }
1979                 } else {
1980                         if (!(bmcr & BMCR_ANENABLE) &&
1981                             tp->link_config.speed == current_speed &&
1982                             tp->link_config.duplex == current_duplex) {
1983                                 current_link_up = 1;
1984                         } else {
1985                                 current_link_up = 0;
1986                         }
1987                 }
1988
1989                 tp->link_config.active_speed = current_speed;
1990                 tp->link_config.active_duplex = current_duplex;
1991         }
1992
1993         if (current_link_up == 1 &&
1994             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1995             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1996                 u32 local_adv, remote_adv;
1997
1998                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1999                         local_adv = 0;
2000                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2001
2002                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2003                         remote_adv = 0;
2004
2005                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2006
2007                 /* If we are not advertising full pause capability,
2008                  * something is wrong.  Bring the link down and reconfigure.
2009                  */
2010                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2011                         current_link_up = 0;
2012                 } else {
2013                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2014                 }
2015         }
2016 relink:
2017         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2018                 u32 tmp;
2019
2020                 tg3_phy_copper_begin(tp);
2021
2022                 tg3_readphy(tp, MII_BMSR, &tmp);
2023                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2024                     (tmp & BMSR_LSTATUS))
2025                         current_link_up = 1;
2026         }
2027
2028         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2029         if (current_link_up == 1) {
2030                 if (tp->link_config.active_speed == SPEED_100 ||
2031                     tp->link_config.active_speed == SPEED_10)
2032                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2033                 else
2034                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2035         } else
2036                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2037
2038         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2039         if (tp->link_config.active_duplex == DUPLEX_HALF)
2040                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2041
2042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2043                 if (current_link_up == 1 &&
2044                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2045                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2046                 else
2047                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2048         }
2049
2050         /* ??? Without this setting Netgear GA302T PHY does not
2051          * ??? send/receive packets...
2052          */
2053         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2054             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2055                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2056                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2057                 udelay(80);
2058         }
2059
2060         tw32_f(MAC_MODE, tp->mac_mode);
2061         udelay(40);
2062
2063         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2064                 /* Polled via timer. */
2065                 tw32_f(MAC_EVENT, 0);
2066         } else {
2067                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2068         }
2069         udelay(40);
2070
2071         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2072             current_link_up == 1 &&
2073             tp->link_config.active_speed == SPEED_1000 &&
2074             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2075              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2076                 udelay(120);
2077                 tw32_f(MAC_STATUS,
2078                      (MAC_STATUS_SYNC_CHANGED |
2079                       MAC_STATUS_CFG_CHANGED));
2080                 udelay(40);
2081                 tg3_write_mem(tp,
2082                               NIC_SRAM_FIRMWARE_MBOX,
2083                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2084         }
2085
2086         if (current_link_up != netif_carrier_ok(tp->dev)) {
2087                 if (current_link_up)
2088                         netif_carrier_on(tp->dev);
2089                 else
2090                         netif_carrier_off(tp->dev);
2091                 tg3_link_report(tp);
2092         }
2093
2094         return 0;
2095 }
2096
2097 struct tg3_fiber_aneginfo {
2098         int state;
2099 #define ANEG_STATE_UNKNOWN              0
2100 #define ANEG_STATE_AN_ENABLE            1
2101 #define ANEG_STATE_RESTART_INIT         2
2102 #define ANEG_STATE_RESTART              3
2103 #define ANEG_STATE_DISABLE_LINK_OK      4
2104 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2105 #define ANEG_STATE_ABILITY_DETECT       6
2106 #define ANEG_STATE_ACK_DETECT_INIT      7
2107 #define ANEG_STATE_ACK_DETECT           8
2108 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2109 #define ANEG_STATE_COMPLETE_ACK         10
2110 #define ANEG_STATE_IDLE_DETECT_INIT     11
2111 #define ANEG_STATE_IDLE_DETECT          12
2112 #define ANEG_STATE_LINK_OK              13
2113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2114 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2115
2116         u32 flags;
2117 #define MR_AN_ENABLE            0x00000001
2118 #define MR_RESTART_AN           0x00000002
2119 #define MR_AN_COMPLETE          0x00000004
2120 #define MR_PAGE_RX              0x00000008
2121 #define MR_NP_LOADED            0x00000010
2122 #define MR_TOGGLE_TX            0x00000020
2123 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2124 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2125 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2126 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2129 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2130 #define MR_TOGGLE_RX            0x00002000
2131 #define MR_NP_RX                0x00004000
2132
2133 #define MR_LINK_OK              0x80000000
2134
2135         unsigned long link_time, cur_time;
2136
2137         u32 ability_match_cfg;
2138         int ability_match_count;
2139
2140         char ability_match, idle_match, ack_match;
2141
2142         u32 txconfig, rxconfig;
2143 #define ANEG_CFG_NP             0x00000080
2144 #define ANEG_CFG_ACK            0x00000040
2145 #define ANEG_CFG_RF2            0x00000020
2146 #define ANEG_CFG_RF1            0x00000010
2147 #define ANEG_CFG_PS2            0x00000001
2148 #define ANEG_CFG_PS1            0x00008000
2149 #define ANEG_CFG_HD             0x00004000
2150 #define ANEG_CFG_FD             0x00002000
2151 #define ANEG_CFG_INVAL          0x00001f06
2152
2153 };
2154 #define ANEG_OK         0
2155 #define ANEG_DONE       1
2156 #define ANEG_TIMER_ENAB 2
2157 #define ANEG_FAILED     -1
2158
2159 #define ANEG_STATE_SETTLE_TIME  10000
2160
2161 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2162                                    struct tg3_fiber_aneginfo *ap)
2163 {
2164         unsigned long delta;
2165         u32 rx_cfg_reg;
2166         int ret;
2167
2168         if (ap->state == ANEG_STATE_UNKNOWN) {
2169                 ap->rxconfig = 0;
2170                 ap->link_time = 0;
2171                 ap->cur_time = 0;
2172                 ap->ability_match_cfg = 0;
2173                 ap->ability_match_count = 0;
2174                 ap->ability_match = 0;
2175                 ap->idle_match = 0;
2176                 ap->ack_match = 0;
2177         }
2178         ap->cur_time++;
2179
2180         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2181                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2182
2183                 if (rx_cfg_reg != ap->ability_match_cfg) {
2184                         ap->ability_match_cfg = rx_cfg_reg;
2185                         ap->ability_match = 0;
2186                         ap->ability_match_count = 0;
2187                 } else {
2188                         if (++ap->ability_match_count > 1) {
2189                                 ap->ability_match = 1;
2190                                 ap->ability_match_cfg = rx_cfg_reg;
2191                         }
2192                 }
2193                 if (rx_cfg_reg & ANEG_CFG_ACK)
2194                         ap->ack_match = 1;
2195                 else
2196                         ap->ack_match = 0;
2197
2198                 ap->idle_match = 0;
2199         } else {
2200                 ap->idle_match = 1;
2201                 ap->ability_match_cfg = 0;
2202                 ap->ability_match_count = 0;
2203                 ap->ability_match = 0;
2204                 ap->ack_match = 0;
2205
2206                 rx_cfg_reg = 0;
2207         }
2208
2209         ap->rxconfig = rx_cfg_reg;
2210         ret = ANEG_OK;
2211
2212         switch(ap->state) {
2213         case ANEG_STATE_UNKNOWN:
2214                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2215                         ap->state = ANEG_STATE_AN_ENABLE;
2216
2217                 /* fallthru */
2218         case ANEG_STATE_AN_ENABLE:
2219                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2220                 if (ap->flags & MR_AN_ENABLE) {
2221                         ap->link_time = 0;
2222                         ap->cur_time = 0;
2223                         ap->ability_match_cfg = 0;
2224                         ap->ability_match_count = 0;
2225                         ap->ability_match = 0;
2226                         ap->idle_match = 0;
2227                         ap->ack_match = 0;
2228
2229                         ap->state = ANEG_STATE_RESTART_INIT;
2230                 } else {
2231                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2232                 }
2233                 break;
2234
2235         case ANEG_STATE_RESTART_INIT:
2236                 ap->link_time = ap->cur_time;
2237                 ap->flags &= ~(MR_NP_LOADED);
2238                 ap->txconfig = 0;
2239                 tw32(MAC_TX_AUTO_NEG, 0);
2240                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2241                 tw32_f(MAC_MODE, tp->mac_mode);
2242                 udelay(40);
2243
2244                 ret = ANEG_TIMER_ENAB;
2245                 ap->state = ANEG_STATE_RESTART;
2246
2247                 /* fallthru */
2248         case ANEG_STATE_RESTART:
2249                 delta = ap->cur_time - ap->link_time;
2250                 if (delta > ANEG_STATE_SETTLE_TIME) {
2251                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2252                 } else {
2253                         ret = ANEG_TIMER_ENAB;
2254                 }
2255                 break;
2256
2257         case ANEG_STATE_DISABLE_LINK_OK:
2258                 ret = ANEG_DONE;
2259                 break;
2260
2261         case ANEG_STATE_ABILITY_DETECT_INIT:
2262                 ap->flags &= ~(MR_TOGGLE_TX);
2263                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2264                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2265                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2266                 tw32_f(MAC_MODE, tp->mac_mode);
2267                 udelay(40);
2268
2269                 ap->state = ANEG_STATE_ABILITY_DETECT;
2270                 break;
2271
2272         case ANEG_STATE_ABILITY_DETECT:
2273                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2274                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2275                 }
2276                 break;
2277
2278         case ANEG_STATE_ACK_DETECT_INIT:
2279                 ap->txconfig |= ANEG_CFG_ACK;
2280                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2281                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2282                 tw32_f(MAC_MODE, tp->mac_mode);
2283                 udelay(40);
2284
2285                 ap->state = ANEG_STATE_ACK_DETECT;
2286
2287                 /* fallthru */
2288         case ANEG_STATE_ACK_DETECT:
2289                 if (ap->ack_match != 0) {
2290                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2291                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2292                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2293                         } else {
2294                                 ap->state = ANEG_STATE_AN_ENABLE;
2295                         }
2296                 } else if (ap->ability_match != 0 &&
2297                            ap->rxconfig == 0) {
2298                         ap->state = ANEG_STATE_AN_ENABLE;
2299                 }
2300                 break;
2301
2302         case ANEG_STATE_COMPLETE_ACK_INIT:
2303                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2304                         ret = ANEG_FAILED;
2305                         break;
2306                 }
2307                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2308                                MR_LP_ADV_HALF_DUPLEX |
2309                                MR_LP_ADV_SYM_PAUSE |
2310                                MR_LP_ADV_ASYM_PAUSE |
2311                                MR_LP_ADV_REMOTE_FAULT1 |
2312                                MR_LP_ADV_REMOTE_FAULT2 |
2313                                MR_LP_ADV_NEXT_PAGE |
2314                                MR_TOGGLE_RX |
2315                                MR_NP_RX);
2316                 if (ap->rxconfig & ANEG_CFG_FD)
2317                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2318                 if (ap->rxconfig & ANEG_CFG_HD)
2319                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2320                 if (ap->rxconfig & ANEG_CFG_PS1)
2321                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2322                 if (ap->rxconfig & ANEG_CFG_PS2)
2323                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2324                 if (ap->rxconfig & ANEG_CFG_RF1)
2325                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2326                 if (ap->rxconfig & ANEG_CFG_RF2)
2327                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2328                 if (ap->rxconfig & ANEG_CFG_NP)
2329                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2330
2331                 ap->link_time = ap->cur_time;
2332
2333                 ap->flags ^= (MR_TOGGLE_TX);
2334                 if (ap->rxconfig & 0x0008)
2335                         ap->flags |= MR_TOGGLE_RX;
2336                 if (ap->rxconfig & ANEG_CFG_NP)
2337                         ap->flags |= MR_NP_RX;
2338                 ap->flags |= MR_PAGE_RX;
2339
2340                 ap->state = ANEG_STATE_COMPLETE_ACK;
2341                 ret = ANEG_TIMER_ENAB;
2342                 break;
2343
2344         case ANEG_STATE_COMPLETE_ACK:
2345                 if (ap->ability_match != 0 &&
2346                     ap->rxconfig == 0) {
2347                         ap->state = ANEG_STATE_AN_ENABLE;
2348                         break;
2349                 }
2350                 delta = ap->cur_time - ap->link_time;
2351                 if (delta > ANEG_STATE_SETTLE_TIME) {
2352                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2353                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2354                         } else {
2355                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2356                                     !(ap->flags & MR_NP_RX)) {
2357                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2358                                 } else {
2359                                         ret = ANEG_FAILED;
2360                                 }
2361                         }
2362                 }
2363                 break;
2364
2365         case ANEG_STATE_IDLE_DETECT_INIT:
2366                 ap->link_time = ap->cur_time;
2367                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2368                 tw32_f(MAC_MODE, tp->mac_mode);
2369                 udelay(40);
2370
2371                 ap->state = ANEG_STATE_IDLE_DETECT;
2372                 ret = ANEG_TIMER_ENAB;
2373                 break;
2374
2375         case ANEG_STATE_IDLE_DETECT:
2376                 if (ap->ability_match != 0 &&
2377                     ap->rxconfig == 0) {
2378                         ap->state = ANEG_STATE_AN_ENABLE;
2379                         break;
2380                 }
2381                 delta = ap->cur_time - ap->link_time;
2382                 if (delta > ANEG_STATE_SETTLE_TIME) {
2383                         /* XXX another gem from the Broadcom driver :( */
2384                         ap->state = ANEG_STATE_LINK_OK;
2385                 }
2386                 break;
2387
2388         case ANEG_STATE_LINK_OK:
2389                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2390                 ret = ANEG_DONE;
2391                 break;
2392
2393         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2394                 /* ??? unimplemented */
2395                 break;
2396
2397         case ANEG_STATE_NEXT_PAGE_WAIT:
2398                 /* ??? unimplemented */
2399                 break;
2400
2401         default:
2402                 ret = ANEG_FAILED;
2403                 break;
2404         };
2405
2406         return ret;
2407 }
2408
2409 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2410 {
2411         int res = 0;
2412         struct tg3_fiber_aneginfo aninfo;
2413         int status = ANEG_FAILED;
2414         unsigned int tick;
2415         u32 tmp;
2416
2417         tw32_f(MAC_TX_AUTO_NEG, 0);
2418
2419         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2420         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2421         udelay(40);
2422
2423         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2424         udelay(40);
2425
2426         memset(&aninfo, 0, sizeof(aninfo));
2427         aninfo.flags |= MR_AN_ENABLE;
2428         aninfo.state = ANEG_STATE_UNKNOWN;
2429         aninfo.cur_time = 0;
2430         tick = 0;
2431         while (++tick < 195000) {
2432                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2433                 if (status == ANEG_DONE || status == ANEG_FAILED)
2434                         break;
2435
2436                 udelay(1);
2437         }
2438
2439         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2440         tw32_f(MAC_MODE, tp->mac_mode);
2441         udelay(40);
2442
2443         *flags = aninfo.flags;
2444
2445         if (status == ANEG_DONE &&
2446             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2447                              MR_LP_ADV_FULL_DUPLEX)))
2448                 res = 1;
2449
2450         return res;
2451 }
2452
2453 static void tg3_init_bcm8002(struct tg3 *tp)
2454 {
2455         u32 mac_status = tr32(MAC_STATUS);
2456         int i;
2457
2458         /* Reset when initting first time or we have a link. */
2459         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2460             !(mac_status & MAC_STATUS_PCS_SYNCED))
2461                 return;
2462
2463         /* Set PLL lock range. */
2464         tg3_writephy(tp, 0x16, 0x8007);
2465
2466         /* SW reset */
2467         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2468
2469         /* Wait for reset to complete. */
2470         /* XXX schedule_timeout() ... */
2471         for (i = 0; i < 500; i++)
2472                 udelay(10);
2473
2474         /* Config mode; select PMA/Ch 1 regs. */
2475         tg3_writephy(tp, 0x10, 0x8411);
2476
2477         /* Enable auto-lock and comdet, select txclk for tx. */
2478         tg3_writephy(tp, 0x11, 0x0a10);
2479
2480         tg3_writephy(tp, 0x18, 0x00a0);
2481         tg3_writephy(tp, 0x16, 0x41ff);
2482
2483         /* Assert and deassert POR. */
2484         tg3_writephy(tp, 0x13, 0x0400);
2485         udelay(40);
2486         tg3_writephy(tp, 0x13, 0x0000);
2487
2488         tg3_writephy(tp, 0x11, 0x0a50);
2489         udelay(40);
2490         tg3_writephy(tp, 0x11, 0x0a10);
2491
2492         /* Wait for signal to stabilize */
2493         /* XXX schedule_timeout() ... */
2494         for (i = 0; i < 15000; i++)
2495                 udelay(10);
2496
2497         /* Deselect the channel register so we can read the PHYID
2498          * later.
2499          */
2500         tg3_writephy(tp, 0x10, 0x8011);
2501 }
2502
2503 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2504 {
2505         u32 sg_dig_ctrl, sg_dig_status;
2506         u32 serdes_cfg, expected_sg_dig_ctrl;
2507         int workaround, port_a;
2508         int current_link_up;
2509
2510         serdes_cfg = 0;
2511         expected_sg_dig_ctrl = 0;
2512         workaround = 0;
2513         port_a = 1;
2514         current_link_up = 0;
2515
2516         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2517             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2518                 workaround = 1;
2519                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2520                         port_a = 0;
2521
2522                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2523                 /* preserve bits 20-23 for voltage regulator */
2524                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2525         }
2526
2527         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2528
2529         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2530                 if (sg_dig_ctrl & (1 << 31)) {
2531                         if (workaround) {
2532                                 u32 val = serdes_cfg;
2533
2534                                 if (port_a)
2535                                         val |= 0xc010000;
2536                                 else
2537                                         val |= 0x4010000;
2538                                 tw32_f(MAC_SERDES_CFG, val);
2539                         }
2540                         tw32_f(SG_DIG_CTRL, 0x01388400);
2541                 }
2542                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2543                         tg3_setup_flow_control(tp, 0, 0);
2544                         current_link_up = 1;
2545                 }
2546                 goto out;
2547         }
2548
2549         /* Want auto-negotiation.  */
2550         expected_sg_dig_ctrl = 0x81388400;
2551
2552         /* Pause capability */
2553         expected_sg_dig_ctrl |= (1 << 11);
2554
2555         /* Asymettric pause */
2556         expected_sg_dig_ctrl |= (1 << 12);
2557
2558         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2559                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2560                     tp->serdes_counter &&
2561                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2562                                     MAC_STATUS_RCVD_CFG)) ==
2563                      MAC_STATUS_PCS_SYNCED)) {
2564                         tp->serdes_counter--;
2565                         current_link_up = 1;
2566                         goto out;
2567                 }
2568 restart_autoneg:
2569                 if (workaround)
2570                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2571                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2572                 udelay(5);
2573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2574
2575                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2576                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2577         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2578                                  MAC_STATUS_SIGNAL_DET)) {
2579                 sg_dig_status = tr32(SG_DIG_STATUS);
2580                 mac_status = tr32(MAC_STATUS);
2581
2582                 if ((sg_dig_status & (1 << 1)) &&
2583                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2584                         u32 local_adv, remote_adv;
2585
2586                         local_adv = ADVERTISE_PAUSE_CAP;
2587                         remote_adv = 0;
2588                         if (sg_dig_status & (1 << 19))
2589                                 remote_adv |= LPA_PAUSE_CAP;
2590                         if (sg_dig_status & (1 << 20))
2591                                 remote_adv |= LPA_PAUSE_ASYM;
2592
2593                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2594                         current_link_up = 1;
2595                         tp->serdes_counter = 0;
2596                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2597                 } else if (!(sg_dig_status & (1 << 1))) {
2598                         if (tp->serdes_counter)
2599                                 tp->serdes_counter--;
2600                         else {
2601                                 if (workaround) {
2602                                         u32 val = serdes_cfg;
2603
2604                                         if (port_a)
2605                                                 val |= 0xc010000;
2606                                         else
2607                                                 val |= 0x4010000;
2608
2609                                         tw32_f(MAC_SERDES_CFG, val);
2610                                 }
2611
2612                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2613                                 udelay(40);
2614
2615                                 /* Link parallel detection - link is up */
2616                                 /* only if we have PCS_SYNC and not */
2617                                 /* receiving config code words */
2618                                 mac_status = tr32(MAC_STATUS);
2619                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2620                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2621                                         tg3_setup_flow_control(tp, 0, 0);
2622                                         current_link_up = 1;
2623                                         tp->tg3_flags2 |=
2624                                                 TG3_FLG2_PARALLEL_DETECT;
2625                                         tp->serdes_counter =
2626                                                 SERDES_PARALLEL_DET_TIMEOUT;
2627                                 } else
2628                                         goto restart_autoneg;
2629                         }
2630                 }
2631         } else {
2632                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2633                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2634         }
2635
2636 out:
2637         return current_link_up;
2638 }
2639
2640 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2641 {
2642         int current_link_up = 0;
2643
2644         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2645                 goto out;
2646
2647         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2648                 u32 flags;
2649                 int i;
2650
2651                 if (fiber_autoneg(tp, &flags)) {
2652                         u32 local_adv, remote_adv;
2653
2654                         local_adv = ADVERTISE_PAUSE_CAP;
2655                         remote_adv = 0;
2656                         if (flags & MR_LP_ADV_SYM_PAUSE)
2657                                 remote_adv |= LPA_PAUSE_CAP;
2658                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2659                                 remote_adv |= LPA_PAUSE_ASYM;
2660
2661                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2662
2663                         current_link_up = 1;
2664                 }
2665                 for (i = 0; i < 30; i++) {
2666                         udelay(20);
2667                         tw32_f(MAC_STATUS,
2668                                (MAC_STATUS_SYNC_CHANGED |
2669                                 MAC_STATUS_CFG_CHANGED));
2670                         udelay(40);
2671                         if ((tr32(MAC_STATUS) &
2672                              (MAC_STATUS_SYNC_CHANGED |
2673                               MAC_STATUS_CFG_CHANGED)) == 0)
2674                                 break;
2675                 }
2676
2677                 mac_status = tr32(MAC_STATUS);
2678                 if (current_link_up == 0 &&
2679                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2680                     !(mac_status & MAC_STATUS_RCVD_CFG))
2681                         current_link_up = 1;
2682         } else {
2683                 /* Forcing 1000FD link up. */
2684                 current_link_up = 1;
2685
2686                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2687                 udelay(40);
2688
2689                 tw32_f(MAC_MODE, tp->mac_mode);
2690                 udelay(40);
2691         }
2692
2693 out:
2694         return current_link_up;
2695 }
2696
2697 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2698 {
2699         u32 orig_pause_cfg;
2700         u16 orig_active_speed;
2701         u8 orig_active_duplex;
2702         u32 mac_status;
2703         int current_link_up;
2704         int i;
2705
2706         orig_pause_cfg =
2707                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2708                                   TG3_FLAG_TX_PAUSE));
2709         orig_active_speed = tp->link_config.active_speed;
2710         orig_active_duplex = tp->link_config.active_duplex;
2711
2712         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2713             netif_carrier_ok(tp->dev) &&
2714             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2715                 mac_status = tr32(MAC_STATUS);
2716                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2717                                MAC_STATUS_SIGNAL_DET |
2718                                MAC_STATUS_CFG_CHANGED |
2719                                MAC_STATUS_RCVD_CFG);
2720                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2721                                    MAC_STATUS_SIGNAL_DET)) {
2722                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2723                                             MAC_STATUS_CFG_CHANGED));
2724                         return 0;
2725                 }
2726         }
2727
2728         tw32_f(MAC_TX_AUTO_NEG, 0);
2729
2730         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2731         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2732         tw32_f(MAC_MODE, tp->mac_mode);
2733         udelay(40);
2734
2735         if (tp->phy_id == PHY_ID_BCM8002)
2736                 tg3_init_bcm8002(tp);
2737
2738         /* Enable link change event even when serdes polling.  */
2739         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2740         udelay(40);
2741
2742         current_link_up = 0;
2743         mac_status = tr32(MAC_STATUS);
2744
2745         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2746                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2747         else
2748                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2749
2750         tp->hw_status->status =
2751                 (SD_STATUS_UPDATED |
2752                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2753
2754         for (i = 0; i < 100; i++) {
2755                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2756                                     MAC_STATUS_CFG_CHANGED));
2757                 udelay(5);
2758                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2759                                          MAC_STATUS_CFG_CHANGED |
2760                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2761                         break;
2762         }
2763
2764         mac_status = tr32(MAC_STATUS);
2765         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2766                 current_link_up = 0;
2767                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2768                     tp->serdes_counter == 0) {
2769                         tw32_f(MAC_MODE, (tp->mac_mode |
2770                                           MAC_MODE_SEND_CONFIGS));
2771                         udelay(1);
2772                         tw32_f(MAC_MODE, tp->mac_mode);
2773                 }
2774         }
2775
2776         if (current_link_up == 1) {
2777                 tp->link_config.active_speed = SPEED_1000;
2778                 tp->link_config.active_duplex = DUPLEX_FULL;
2779                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2780                                     LED_CTRL_LNKLED_OVERRIDE |
2781                                     LED_CTRL_1000MBPS_ON));
2782         } else {
2783                 tp->link_config.active_speed = SPEED_INVALID;
2784                 tp->link_config.active_duplex = DUPLEX_INVALID;
2785                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2786                                     LED_CTRL_LNKLED_OVERRIDE |
2787                                     LED_CTRL_TRAFFIC_OVERRIDE));
2788         }
2789
2790         if (current_link_up != netif_carrier_ok(tp->dev)) {
2791                 if (current_link_up)
2792                         netif_carrier_on(tp->dev);
2793                 else
2794                         netif_carrier_off(tp->dev);
2795                 tg3_link_report(tp);
2796         } else {
2797                 u32 now_pause_cfg =
2798                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2799                                          TG3_FLAG_TX_PAUSE);
2800                 if (orig_pause_cfg != now_pause_cfg ||
2801                     orig_active_speed != tp->link_config.active_speed ||
2802                     orig_active_duplex != tp->link_config.active_duplex)
2803                         tg3_link_report(tp);
2804         }
2805
2806         return 0;
2807 }
2808
2809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2810 {
2811         int current_link_up, err = 0;
2812         u32 bmsr, bmcr;
2813         u16 current_speed;
2814         u8 current_duplex;
2815
2816         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2817         tw32_f(MAC_MODE, tp->mac_mode);
2818         udelay(40);
2819
2820         tw32(MAC_EVENT, 0);
2821
2822         tw32_f(MAC_STATUS,
2823              (MAC_STATUS_SYNC_CHANGED |
2824               MAC_STATUS_CFG_CHANGED |
2825               MAC_STATUS_MI_COMPLETION |
2826               MAC_STATUS_LNKSTATE_CHANGED));
2827         udelay(40);
2828
2829         if (force_reset)
2830                 tg3_phy_reset(tp);
2831
2832         current_link_up = 0;
2833         current_speed = SPEED_INVALID;
2834         current_duplex = DUPLEX_INVALID;
2835
2836         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2837         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2839                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2840                         bmsr |= BMSR_LSTATUS;
2841                 else
2842                         bmsr &= ~BMSR_LSTATUS;
2843         }
2844
2845         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2846
2847         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2848             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2849                 /* do nothing, just check for link up at the end */
2850         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2851                 u32 adv, new_adv;
2852
2853                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2854                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2855                                   ADVERTISE_1000XPAUSE |
2856                                   ADVERTISE_1000XPSE_ASYM |
2857                                   ADVERTISE_SLCT);
2858
2859                 /* Always advertise symmetric PAUSE just like copper */
2860                 new_adv |= ADVERTISE_1000XPAUSE;
2861
2862                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2863                         new_adv |= ADVERTISE_1000XHALF;
2864                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2865                         new_adv |= ADVERTISE_1000XFULL;
2866
2867                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2868                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2869                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2870                         tg3_writephy(tp, MII_BMCR, bmcr);
2871
2872                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2873                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2874                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2875
2876                         return err;
2877                 }
2878         } else {
2879                 u32 new_bmcr;
2880
2881                 bmcr &= ~BMCR_SPEED1000;
2882                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2883
2884                 if (tp->link_config.duplex == DUPLEX_FULL)
2885                         new_bmcr |= BMCR_FULLDPLX;
2886
2887                 if (new_bmcr != bmcr) {
2888                         /* BMCR_SPEED1000 is a reserved bit that needs
2889                          * to be set on write.
2890                          */
2891                         new_bmcr |= BMCR_SPEED1000;
2892
2893                         /* Force a linkdown */
2894                         if (netif_carrier_ok(tp->dev)) {
2895                                 u32 adv;
2896
2897                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2898                                 adv &= ~(ADVERTISE_1000XFULL |
2899                                          ADVERTISE_1000XHALF |
2900                                          ADVERTISE_SLCT);
2901                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2902                                 tg3_writephy(tp, MII_BMCR, bmcr |
2903                                                            BMCR_ANRESTART |
2904                                                            BMCR_ANENABLE);
2905                                 udelay(10);
2906                                 netif_carrier_off(tp->dev);
2907                         }
2908                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2909                         bmcr = new_bmcr;
2910                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2911                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2912                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2913                             ASIC_REV_5714) {
2914                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2915                                         bmsr |= BMSR_LSTATUS;
2916                                 else
2917                                         bmsr &= ~BMSR_LSTATUS;
2918                         }
2919                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2920                 }
2921         }
2922
2923         if (bmsr & BMSR_LSTATUS) {
2924                 current_speed = SPEED_1000;
2925                 current_link_up = 1;
2926                 if (bmcr & BMCR_FULLDPLX)
2927                         current_duplex = DUPLEX_FULL;
2928                 else
2929                         current_duplex = DUPLEX_HALF;
2930
2931                 if (bmcr & BMCR_ANENABLE) {
2932                         u32 local_adv, remote_adv, common;
2933
2934                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2935                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2936                         common = local_adv & remote_adv;
2937                         if (common & (ADVERTISE_1000XHALF |
2938                                       ADVERTISE_1000XFULL)) {
2939                                 if (common & ADVERTISE_1000XFULL)
2940                                         current_duplex = DUPLEX_FULL;
2941                                 else
2942                                         current_duplex = DUPLEX_HALF;
2943
2944                                 tg3_setup_flow_control(tp, local_adv,
2945                                                        remote_adv);
2946                         }
2947                         else
2948                                 current_link_up = 0;
2949                 }
2950         }
2951
2952         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2953         if (tp->link_config.active_duplex == DUPLEX_HALF)
2954                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2955
2956         tw32_f(MAC_MODE, tp->mac_mode);
2957         udelay(40);
2958
2959         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2960
2961         tp->link_config.active_speed = current_speed;
2962         tp->link_config.active_duplex = current_duplex;
2963
2964         if (current_link_up != netif_carrier_ok(tp->dev)) {
2965                 if (current_link_up)
2966                         netif_carrier_on(tp->dev);
2967                 else {
2968                         netif_carrier_off(tp->dev);
2969                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2970                 }
2971                 tg3_link_report(tp);
2972         }
2973         return err;
2974 }
2975
2976 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2977 {
2978         if (tp->serdes_counter) {
2979                 /* Give autoneg time to complete. */
2980                 tp->serdes_counter--;
2981                 return;
2982         }
2983         if (!netif_carrier_ok(tp->dev) &&
2984             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2985                 u32 bmcr;
2986
2987                 tg3_readphy(tp, MII_BMCR, &bmcr);
2988                 if (bmcr & BMCR_ANENABLE) {
2989                         u32 phy1, phy2;
2990
2991                         /* Select shadow register 0x1f */
2992                         tg3_writephy(tp, 0x1c, 0x7c00);
2993                         tg3_readphy(tp, 0x1c, &phy1);
2994
2995                         /* Select expansion interrupt status register */
2996                         tg3_writephy(tp, 0x17, 0x0f01);
2997                         tg3_readphy(tp, 0x15, &phy2);
2998                         tg3_readphy(tp, 0x15, &phy2);
2999
3000                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3001                                 /* We have signal detect and not receiving
3002                                  * config code words, link is up by parallel
3003                                  * detection.
3004                                  */
3005
3006                                 bmcr &= ~BMCR_ANENABLE;
3007                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3008                                 tg3_writephy(tp, MII_BMCR, bmcr);
3009                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3010                         }
3011                 }
3012         }
3013         else if (netif_carrier_ok(tp->dev) &&
3014                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3015                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3016                 u32 phy2;
3017
3018                 /* Select expansion interrupt status register */
3019                 tg3_writephy(tp, 0x17, 0x0f01);
3020                 tg3_readphy(tp, 0x15, &phy2);
3021                 if (phy2 & 0x20) {
3022                         u32 bmcr;
3023
3024                         /* Config code words received, turn on autoneg. */
3025                         tg3_readphy(tp, MII_BMCR, &bmcr);
3026                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3027
3028                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3029
3030                 }
3031         }
3032 }
3033
3034 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3035 {
3036         int err;
3037
3038         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3039                 err = tg3_setup_fiber_phy(tp, force_reset);
3040         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3041                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3042         } else {
3043                 err = tg3_setup_copper_phy(tp, force_reset);
3044         }
3045
3046         if (tp->link_config.active_speed == SPEED_1000 &&
3047             tp->link_config.active_duplex == DUPLEX_HALF)
3048                 tw32(MAC_TX_LENGTHS,
3049                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3050                       (6 << TX_LENGTHS_IPG_SHIFT) |
3051                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3052         else
3053                 tw32(MAC_TX_LENGTHS,
3054                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3055                       (6 << TX_LENGTHS_IPG_SHIFT) |
3056                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3057
3058         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3059                 if (netif_carrier_ok(tp->dev)) {
3060                         tw32(HOSTCC_STAT_COAL_TICKS,
3061                              tp->coal.stats_block_coalesce_usecs);
3062                 } else {
3063                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3064                 }
3065         }
3066
3067         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3068                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3069                 if (!netif_carrier_ok(tp->dev))
3070                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3071                               tp->pwrmgmt_thresh;
3072                 else
3073                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3074                 tw32(PCIE_PWR_MGMT_THRESH, val);
3075         }
3076
3077         return err;
3078 }
3079
3080 /* This is called whenever we suspect that the system chipset is re-
3081  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3082  * is bogus tx completions. We try to recover by setting the
3083  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3084  * in the workqueue.
3085  */
3086 static void tg3_tx_recover(struct tg3 *tp)
3087 {
3088         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3089                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3090
3091         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3092                "mapped I/O cycles to the network device, attempting to "
3093                "recover. Please report the problem to the driver maintainer "
3094                "and include system chipset information.\n", tp->dev->name);
3095
3096         spin_lock(&tp->lock);
3097         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3098         spin_unlock(&tp->lock);
3099 }
3100
3101 static inline u32 tg3_tx_avail(struct tg3 *tp)
3102 {
3103         smp_mb();
3104         return (tp->tx_pending -
3105                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3106 }
3107
3108 /* Tigon3 never reports partial packet sends.  So we do not
3109  * need special logic to handle SKBs that have not had all
3110  * of their frags sent yet, like SunGEM does.
3111  */
3112 static void tg3_tx(struct tg3 *tp)
3113 {
3114         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3115         u32 sw_idx = tp->tx_cons;
3116
3117         while (sw_idx != hw_idx) {
3118                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3119                 struct sk_buff *skb = ri->skb;
3120                 int i, tx_bug = 0;
3121
3122                 if (unlikely(skb == NULL)) {
3123                         tg3_tx_recover(tp);
3124                         return;
3125                 }
3126
3127                 pci_unmap_single(tp->pdev,
3128                                  pci_unmap_addr(ri, mapping),
3129                                  skb_headlen(skb),
3130                                  PCI_DMA_TODEVICE);
3131
3132                 ri->skb = NULL;
3133
3134                 sw_idx = NEXT_TX(sw_idx);
3135
3136                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3137                         ri = &tp->tx_buffers[sw_idx];
3138                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3139                                 tx_bug = 1;
3140
3141                         pci_unmap_page(tp->pdev,
3142                                        pci_unmap_addr(ri, mapping),
3143                                        skb_shinfo(skb)->frags[i].size,
3144                                        PCI_DMA_TODEVICE);
3145
3146                         sw_idx = NEXT_TX(sw_idx);
3147                 }
3148
3149                 dev_kfree_skb(skb);
3150
3151                 if (unlikely(tx_bug)) {
3152                         tg3_tx_recover(tp);
3153                         return;
3154                 }
3155         }
3156
3157         tp->tx_cons = sw_idx;
3158
3159         /* Need to make the tx_cons update visible to tg3_start_xmit()
3160          * before checking for netif_queue_stopped().  Without the
3161          * memory barrier, there is a small possibility that tg3_start_xmit()
3162          * will miss it and cause the queue to be stopped forever.
3163          */
3164         smp_mb();
3165
3166         if (unlikely(netif_queue_stopped(tp->dev) &&
3167                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3168                 netif_tx_lock(tp->dev);
3169                 if (netif_queue_stopped(tp->dev) &&
3170                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3171                         netif_wake_queue(tp->dev);
3172                 netif_tx_unlock(tp->dev);
3173         }
3174 }
3175
3176 /* Returns size of skb allocated or < 0 on error.
3177  *
3178  * We only need to fill in the address because the other members
3179  * of the RX descriptor are invariant, see tg3_init_rings.
3180  *
3181  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3182  * posting buffers we only dirty the first cache line of the RX
3183  * descriptor (containing the address).  Whereas for the RX status
3184  * buffers the cpu only reads the last cacheline of the RX descriptor
3185  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3186  */
3187 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3188                             int src_idx, u32 dest_idx_unmasked)
3189 {
3190         struct tg3_rx_buffer_desc *desc;
3191         struct ring_info *map, *src_map;
3192         struct sk_buff *skb;
3193         dma_addr_t mapping;
3194         int skb_size, dest_idx;
3195
3196         src_map = NULL;
3197         switch (opaque_key) {
3198         case RXD_OPAQUE_RING_STD:
3199                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3200                 desc = &tp->rx_std[dest_idx];
3201                 map = &tp->rx_std_buffers[dest_idx];
3202                 if (src_idx >= 0)
3203                         src_map = &tp->rx_std_buffers[src_idx];
3204                 skb_size = tp->rx_pkt_buf_sz;
3205                 break;
3206
3207         case RXD_OPAQUE_RING_JUMBO:
3208                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3209                 desc = &tp->rx_jumbo[dest_idx];
3210                 map = &tp->rx_jumbo_buffers[dest_idx];
3211                 if (src_idx >= 0)
3212                         src_map = &tp->rx_jumbo_buffers[src_idx];
3213                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3214                 break;
3215
3216         default:
3217                 return -EINVAL;
3218         };
3219
3220         /* Do not overwrite any of the map or rp information
3221          * until we are sure we can commit to a new buffer.
3222          *
3223          * Callers depend upon this behavior and assume that
3224          * we leave everything unchanged if we fail.
3225          */
3226         skb = netdev_alloc_skb(tp->dev, skb_size);
3227         if (skb == NULL)
3228                 return -ENOMEM;
3229
3230         skb_reserve(skb, tp->rx_offset);
3231
3232         mapping = pci_map_single(tp->pdev, skb->data,
3233                                  skb_size - tp->rx_offset,
3234                                  PCI_DMA_FROMDEVICE);
3235
3236         map->skb = skb;
3237         pci_unmap_addr_set(map, mapping, mapping);
3238
3239         if (src_map != NULL)
3240                 src_map->skb = NULL;
3241
3242         desc->addr_hi = ((u64)mapping >> 32);
3243         desc->addr_lo = ((u64)mapping & 0xffffffff);
3244
3245         return skb_size;
3246 }
3247
3248 /* We only need to move over in the address because the other
3249  * members of the RX descriptor are invariant.  See notes above
3250  * tg3_alloc_rx_skb for full details.
3251  */
3252 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3253                            int src_idx, u32 dest_idx_unmasked)
3254 {
3255         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3256         struct ring_info *src_map, *dest_map;
3257         int dest_idx;
3258
3259         switch (opaque_key) {
3260         case RXD_OPAQUE_RING_STD:
3261                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3262                 dest_desc = &tp->rx_std[dest_idx];
3263                 dest_map = &tp->rx_std_buffers[dest_idx];
3264                 src_desc = &tp->rx_std[src_idx];
3265                 src_map = &tp->rx_std_buffers[src_idx];
3266                 break;
3267
3268         case RXD_OPAQUE_RING_JUMBO:
3269                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3270                 dest_desc = &tp->rx_jumbo[dest_idx];
3271                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3272                 src_desc = &tp->rx_jumbo[src_idx];
3273                 src_map = &tp->rx_jumbo_buffers[src_idx];
3274                 break;
3275
3276         default:
3277                 return;
3278         };
3279
3280         dest_map->skb = src_map->skb;
3281         pci_unmap_addr_set(dest_map, mapping,
3282                            pci_unmap_addr(src_map, mapping));
3283         dest_desc->addr_hi = src_desc->addr_hi;
3284         dest_desc->addr_lo = src_desc->addr_lo;
3285
3286         src_map->skb = NULL;
3287 }
3288
3289 #if TG3_VLAN_TAG_USED
3290 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3291 {
3292         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3293 }
3294 #endif
3295
3296 /* The RX ring scheme is composed of multiple rings which post fresh
3297  * buffers to the chip, and one special ring the chip uses to report
3298  * status back to the host.
3299  *
3300  * The special ring reports the status of received packets to the
3301  * host.  The chip does not write into the original descriptor the
3302  * RX buffer was obtained from.  The chip simply takes the original
3303  * descriptor as provided by the host, updates the status and length
3304  * field, then writes this into the next status ring entry.
3305  *
3306  * Each ring the host uses to post buffers to the chip is described
3307  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3308  * it is first placed into the on-chip ram.  When the packet's length
3309  * is known, it walks down the TG3_BDINFO entries to select the ring.
3310  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3311  * which is within the range of the new packet's length is chosen.
3312  *
3313  * The "separate ring for rx status" scheme may sound queer, but it makes
3314  * sense from a cache coherency perspective.  If only the host writes
3315  * to the buffer post rings, and only the chip writes to the rx status
3316  * rings, then cache lines never move beyond shared-modified state.
3317  * If both the host and chip were to write into the same ring, cache line
3318  * eviction could occur since both entities want it in an exclusive state.
3319  */
3320 static int tg3_rx(struct tg3 *tp, int budget)
3321 {
3322         u32 work_mask, rx_std_posted = 0;
3323         u32 sw_idx = tp->rx_rcb_ptr;
3324         u16 hw_idx;
3325         int received;
3326
3327         hw_idx = tp->hw_status->idx[0].rx_producer;
3328         /*
3329          * We need to order the read of hw_idx and the read of
3330          * the opaque cookie.
3331          */
3332         rmb();
3333         work_mask = 0;
3334         received = 0;
3335         while (sw_idx != hw_idx && budget > 0) {
3336                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3337                 unsigned int len;
3338                 struct sk_buff *skb;
3339                 dma_addr_t dma_addr;
3340                 u32 opaque_key, desc_idx, *post_ptr;
3341
3342                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3343                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3344                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3345                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3346                                                   mapping);
3347                         skb = tp->rx_std_buffers[desc_idx].skb;
3348                         post_ptr = &tp->rx_std_ptr;
3349                         rx_std_posted++;
3350                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3351                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3352                                                   mapping);
3353                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3354                         post_ptr = &tp->rx_jumbo_ptr;
3355                 }
3356                 else {
3357                         goto next_pkt_nopost;
3358                 }
3359
3360                 work_mask |= opaque_key;
3361
3362                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3363                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3364                 drop_it:
3365                         tg3_recycle_rx(tp, opaque_key,
3366                                        desc_idx, *post_ptr);
3367                 drop_it_no_recycle:
3368                         /* Other statistics kept track of by card. */
3369                         tp->net_stats.rx_dropped++;
3370                         goto next_pkt;
3371                 }
3372
3373                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3374
3375                 if (len > RX_COPY_THRESHOLD
3376                         && tp->rx_offset == 2
3377                         /* rx_offset != 2 iff this is a 5701 card running
3378                          * in PCI-X mode [see tg3_get_invariants()] */
3379                 ) {
3380                         int skb_size;
3381
3382                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3383                                                     desc_idx, *post_ptr);
3384                         if (skb_size < 0)
3385                                 goto drop_it;
3386
3387                         pci_unmap_single(tp->pdev, dma_addr,
3388                                          skb_size - tp->rx_offset,
3389                                          PCI_DMA_FROMDEVICE);
3390
3391                         skb_put(skb, len);
3392                 } else {
3393                         struct sk_buff *copy_skb;
3394
3395                         tg3_recycle_rx(tp, opaque_key,
3396                                        desc_idx, *post_ptr);
3397
3398                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3399                         if (copy_skb == NULL)
3400                                 goto drop_it_no_recycle;
3401
3402                         skb_reserve(copy_skb, 2);
3403                         skb_put(copy_skb, len);
3404                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3405                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3406                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3407
3408                         /* We'll reuse the original ring buffer. */
3409                         skb = copy_skb;
3410                 }
3411
3412                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3413                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3414                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3415                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3416                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3417                 else
3418                         skb->ip_summed = CHECKSUM_NONE;
3419
3420                 skb->protocol = eth_type_trans(skb, tp->dev);
3421 #if TG3_VLAN_TAG_USED
3422                 if (tp->vlgrp != NULL &&
3423                     desc->type_flags & RXD_FLAG_VLAN) {
3424                         tg3_vlan_rx(tp, skb,
3425                                     desc->err_vlan & RXD_VLAN_MASK);
3426                 } else
3427 #endif
3428                         netif_receive_skb(skb);
3429
3430                 tp->dev->last_rx = jiffies;
3431                 received++;
3432                 budget--;
3433
3434 next_pkt:
3435                 (*post_ptr)++;
3436
3437                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3438                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3439
3440                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3441                                      TG3_64BIT_REG_LOW, idx);
3442                         work_mask &= ~RXD_OPAQUE_RING_STD;
3443                         rx_std_posted = 0;
3444                 }
3445 next_pkt_nopost:
3446                 sw_idx++;
3447                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3448
3449                 /* Refresh hw_idx to see if there is new work */
3450                 if (sw_idx == hw_idx) {
3451                         hw_idx = tp->hw_status->idx[0].rx_producer;
3452                         rmb();
3453                 }
3454         }
3455
3456         /* ACK the status ring. */
3457         tp->rx_rcb_ptr = sw_idx;
3458         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3459
3460         /* Refill RX ring(s). */
3461         if (work_mask & RXD_OPAQUE_RING_STD) {
3462                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3463                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3464                              sw_idx);
3465         }
3466         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3467                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3468                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3469                              sw_idx);
3470         }
3471         mmiowb();
3472
3473         return received;
3474 }
3475
3476 static int tg3_poll(struct napi_struct *napi, int budget)
3477 {
3478         struct tg3 *tp = container_of(napi, struct tg3, napi);
3479         struct net_device *netdev = tp->dev;
3480         struct tg3_hw_status *sblk = tp->hw_status;
3481         int work_done = 0;
3482
3483         /* handle link change and other phy events */
3484         if (!(tp->tg3_flags &
3485               (TG3_FLAG_USE_LINKCHG_REG |
3486                TG3_FLAG_POLL_SERDES))) {
3487                 if (sblk->status & SD_STATUS_LINK_CHG) {
3488                         sblk->status = SD_STATUS_UPDATED |
3489                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3490                         spin_lock(&tp->lock);
3491                         tg3_setup_phy(tp, 0);
3492                         spin_unlock(&tp->lock);
3493                 }
3494         }
3495
3496         /* run TX completion thread */
3497         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3498                 tg3_tx(tp);
3499                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3500                         netif_rx_complete(netdev, napi);
3501                         schedule_work(&tp->reset_task);
3502                         return 0;
3503                 }
3504         }
3505
3506         /* run RX thread, within the bounds set by NAPI.
3507          * All RX "locking" is done by ensuring outside
3508          * code synchronizes with tg3->napi.poll()
3509          */
3510         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3511                 work_done = tg3_rx(tp, budget);
3512
3513         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3514                 tp->last_tag = sblk->status_tag;
3515                 rmb();
3516         } else
3517                 sblk->status &= ~SD_STATUS_UPDATED;
3518
3519         /* if no more work, tell net stack and NIC we're done */
3520         if (!tg3_has_work(tp)) {
3521                 netif_rx_complete(netdev, napi);
3522                 tg3_restart_ints(tp);
3523         }
3524
3525         return work_done;
3526 }
3527
3528 static void tg3_irq_quiesce(struct tg3 *tp)
3529 {
3530         BUG_ON(tp->irq_sync);
3531
3532         tp->irq_sync = 1;
3533         smp_mb();
3534
3535         synchronize_irq(tp->pdev->irq);
3536 }
3537
3538 static inline int tg3_irq_sync(struct tg3 *tp)
3539 {
3540         return tp->irq_sync;
3541 }
3542
3543 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3544  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3545  * with as well.  Most of the time, this is not necessary except when
3546  * shutting down the device.
3547  */
3548 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3549 {
3550         spin_lock_bh(&tp->lock);
3551         if (irq_sync)
3552                 tg3_irq_quiesce(tp);
3553 }
3554
3555 static inline void tg3_full_unlock(struct tg3 *tp)
3556 {
3557         spin_unlock_bh(&tp->lock);
3558 }
3559
3560 /* One-shot MSI handler - Chip automatically disables interrupt
3561  * after sending MSI so driver doesn't have to do it.
3562  */
3563 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3564 {
3565         struct net_device *dev = dev_id;
3566         struct tg3 *tp = netdev_priv(dev);
3567
3568         prefetch(tp->hw_status);
3569         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3570
3571         if (likely(!tg3_irq_sync(tp)))
3572                 netif_rx_schedule(dev, &tp->napi);
3573
3574         return IRQ_HANDLED;
3575 }
3576
3577 /* MSI ISR - No need to check for interrupt sharing and no need to
3578  * flush status block and interrupt mailbox. PCI ordering rules
3579  * guarantee that MSI will arrive after the status block.
3580  */
3581 static irqreturn_t tg3_msi(int irq, void *dev_id)
3582 {
3583         struct net_device *dev = dev_id;
3584         struct tg3 *tp = netdev_priv(dev);
3585
3586         prefetch(tp->hw_status);
3587         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3588         /*
3589          * Writing any value to intr-mbox-0 clears PCI INTA# and
3590          * chip-internal interrupt pending events.
3591          * Writing non-zero to intr-mbox-0 additional tells the
3592          * NIC to stop sending us irqs, engaging "in-intr-handler"
3593          * event coalescing.
3594          */
3595         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3596         if (likely(!tg3_irq_sync(tp)))
3597                 netif_rx_schedule(dev, &tp->napi);
3598
3599         return IRQ_RETVAL(1);
3600 }
3601
3602 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3603 {
3604         struct net_device *dev = dev_id;
3605         struct tg3 *tp = netdev_priv(dev);
3606         struct tg3_hw_status *sblk = tp->hw_status;
3607         unsigned int handled = 1;
3608
3609         /* In INTx mode, it is possible for the interrupt to arrive at
3610          * the CPU before the status block posted prior to the interrupt.
3611          * Reading the PCI State register will confirm whether the
3612          * interrupt is ours and will flush the status block.
3613          */
3614         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3615                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3616                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3617                         handled = 0;
3618                         goto out;
3619                 }
3620         }
3621
3622         /*
3623          * Writing any value to intr-mbox-0 clears PCI INTA# and
3624          * chip-internal interrupt pending events.
3625          * Writing non-zero to intr-mbox-0 additional tells the
3626          * NIC to stop sending us irqs, engaging "in-intr-handler"
3627          * event coalescing.
3628          *
3629          * Flush the mailbox to de-assert the IRQ immediately to prevent
3630          * spurious interrupts.  The flush impacts performance but
3631          * excessive spurious interrupts can be worse in some cases.
3632          */
3633         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3634         if (tg3_irq_sync(tp))
3635                 goto out;
3636         sblk->status &= ~SD_STATUS_UPDATED;
3637         if (likely(tg3_has_work(tp))) {
3638                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3639                 netif_rx_schedule(dev, &tp->napi);
3640         } else {
3641                 /* No work, shared interrupt perhaps?  re-enable
3642                  * interrupts, and flush that PCI write
3643                  */
3644                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3645                                0x00000000);
3646         }
3647 out:
3648         return IRQ_RETVAL(handled);
3649 }
3650
3651 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3652 {
3653         struct net_device *dev = dev_id;
3654         struct tg3 *tp = netdev_priv(dev);
3655         struct tg3_hw_status *sblk = tp->hw_status;
3656         unsigned int handled = 1;
3657
3658         /* In INTx mode, it is possible for the interrupt to arrive at
3659          * the CPU before the status block posted prior to the interrupt.
3660          * Reading the PCI State register will confirm whether the
3661          * interrupt is ours and will flush the status block.
3662          */
3663         if (unlikely(sblk->status_tag == tp->last_tag)) {
3664                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3665                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3666                         handled = 0;
3667                         goto out;
3668                 }
3669         }
3670
3671         /*
3672          * writing any value to intr-mbox-0 clears PCI INTA# and
3673          * chip-internal interrupt pending events.
3674          * writing non-zero to intr-mbox-0 additional tells the
3675          * NIC to stop sending us irqs, engaging "in-intr-handler"
3676          * event coalescing.
3677          *
3678          * Flush the mailbox to de-assert the IRQ immediately to prevent
3679          * spurious interrupts.  The flush impacts performance but
3680          * excessive spurious interrupts can be worse in some cases.
3681          */
3682         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3683         if (tg3_irq_sync(tp))
3684                 goto out;
3685         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3686                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3687                 /* Update last_tag to mark that this status has been
3688                  * seen. Because interrupt may be shared, we may be
3689                  * racing with tg3_poll(), so only update last_tag
3690                  * if tg3_poll() is not scheduled.
3691                  */
3692                 tp->last_tag = sblk->status_tag;
3693                 __netif_rx_schedule(dev, &tp->napi);
3694         }
3695 out:
3696         return IRQ_RETVAL(handled);
3697 }
3698
3699 /* ISR for interrupt test */
3700 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3701 {
3702         struct net_device *dev = dev_id;
3703         struct tg3 *tp = netdev_priv(dev);
3704         struct tg3_hw_status *sblk = tp->hw_status;
3705
3706         if ((sblk->status & SD_STATUS_UPDATED) ||
3707             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3708                 tg3_disable_ints(tp);
3709                 return IRQ_RETVAL(1);
3710         }
3711         return IRQ_RETVAL(0);
3712 }
3713
3714 static int tg3_init_hw(struct tg3 *, int);
3715 static int tg3_halt(struct tg3 *, int, int);
3716
3717 /* Restart hardware after configuration changes, self-test, etc.
3718  * Invoked with tp->lock held.
3719  */
3720 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3721 {
3722         int err;
3723
3724         err = tg3_init_hw(tp, reset_phy);
3725         if (err) {
3726                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3727                        "aborting.\n", tp->dev->name);
3728                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3729                 tg3_full_unlock(tp);
3730                 del_timer_sync(&tp->timer);
3731                 tp->irq_sync = 0;
3732                 napi_enable(&tp->napi);
3733                 dev_close(tp->dev);
3734                 tg3_full_lock(tp, 0);
3735         }
3736         return err;
3737 }
3738
3739 #ifdef CONFIG_NET_POLL_CONTROLLER
3740 static void tg3_poll_controller(struct net_device *dev)
3741 {
3742         struct tg3 *tp = netdev_priv(dev);
3743
3744         tg3_interrupt(tp->pdev->irq, dev);
3745 }
3746 #endif
3747
3748 static void tg3_reset_task(struct work_struct *work)
3749 {
3750         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3751         unsigned int restart_timer;
3752
3753         tg3_full_lock(tp, 0);
3754
3755         if (!netif_running(tp->dev)) {
3756                 tg3_full_unlock(tp);
3757                 return;
3758         }
3759
3760         tg3_full_unlock(tp);
3761
3762         tg3_netif_stop(tp);
3763
3764         tg3_full_lock(tp, 1);
3765
3766         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3767         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3768
3769         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3770                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3771                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3772                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3773                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3774         }
3775
3776         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3777         if (tg3_init_hw(tp, 1))
3778                 goto out;
3779
3780         tg3_netif_start(tp);
3781
3782         if (restart_timer)
3783                 mod_timer(&tp->timer, jiffies + 1);
3784
3785 out:
3786         tg3_full_unlock(tp);
3787 }
3788
3789 static void tg3_dump_short_state(struct tg3 *tp)
3790 {
3791         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3792                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3793         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3794                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3795 }
3796
3797 static void tg3_tx_timeout(struct net_device *dev)
3798 {
3799         struct tg3 *tp = netdev_priv(dev);
3800
3801         if (netif_msg_tx_err(tp)) {
3802                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3803                        dev->name);
3804                 tg3_dump_short_state(tp);
3805         }
3806
3807         schedule_work(&tp->reset_task);
3808 }
3809
3810 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3811 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3812 {
3813         u32 base = (u32) mapping & 0xffffffff;
3814
3815         return ((base > 0xffffdcc0) &&
3816                 (base + len + 8 < base));
3817 }
3818
3819 /* Test for DMA addresses > 40-bit */
3820 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3821                                           int len)
3822 {
3823 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3824         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3825                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3826         return 0;
3827 #else
3828         return 0;
3829 #endif
3830 }
3831
3832 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3833
3834 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3835 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3836                                        u32 last_plus_one, u32 *start,
3837                                        u32 base_flags, u32 mss)
3838 {
3839         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3840         dma_addr_t new_addr = 0;
3841         u32 entry = *start;
3842         int i, ret = 0;
3843
3844         if (!new_skb) {
3845                 ret = -1;
3846         } else {
3847                 /* New SKB is guaranteed to be linear. */
3848                 entry = *start;
3849                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3850                                           PCI_DMA_TODEVICE);
3851                 /* Make sure new skb does not cross any 4G boundaries.
3852                  * Drop the packet if it does.
3853                  */
3854                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3855                         ret = -1;
3856                         dev_kfree_skb(new_skb);
3857                         new_skb = NULL;
3858                 } else {
3859                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3860                                     base_flags, 1 | (mss << 1));
3861                         *start = NEXT_TX(entry);
3862                 }
3863         }
3864
3865         /* Now clean up the sw ring entries. */
3866         i = 0;
3867         while (entry != last_plus_one) {
3868                 int len;
3869
3870                 if (i == 0)
3871                         len = skb_headlen(skb);
3872                 else
3873                         len = skb_shinfo(skb)->frags[i-1].size;
3874                 pci_unmap_single(tp->pdev,
3875                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3876                                  len, PCI_DMA_TODEVICE);
3877                 if (i == 0) {
3878                         tp->tx_buffers[entry].skb = new_skb;
3879                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3880                 } else {
3881                         tp->tx_buffers[entry].skb = NULL;
3882                 }
3883                 entry = NEXT_TX(entry);
3884                 i++;
3885         }
3886
3887         dev_kfree_skb(skb);
3888
3889         return ret;
3890 }
3891
3892 static void tg3_set_txd(struct tg3 *tp, int entry,
3893                         dma_addr_t mapping, int len, u32 flags,
3894                         u32 mss_and_is_end)
3895 {
3896         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3897         int is_end = (mss_and_is_end & 0x1);
3898         u32 mss = (mss_and_is_end >> 1);
3899         u32 vlan_tag = 0;
3900
3901         if (is_end)
3902                 flags |= TXD_FLAG_END;
3903         if (flags & TXD_FLAG_VLAN) {
3904                 vlan_tag = flags >> 16;
3905                 flags &= 0xffff;
3906         }
3907         vlan_tag |= (mss << TXD_MSS_SHIFT);
3908
3909         txd->addr_hi = ((u64) mapping >> 32);
3910         txd->addr_lo = ((u64) mapping & 0xffffffff);
3911         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3912         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3913 }
3914
3915 /* hard_start_xmit for devices that don't have any bugs and
3916  * support TG3_FLG2_HW_TSO_2 only.
3917  */
3918 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3919 {
3920         struct tg3 *tp = netdev_priv(dev);
3921         dma_addr_t mapping;
3922         u32 len, entry, base_flags, mss;
3923
3924         len = skb_headlen(skb);
3925
3926         /* We are running in BH disabled context with netif_tx_lock
3927          * and TX reclaim runs via tp->napi.poll inside of a software
3928          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3929          * no IRQ context deadlocks to worry about either.  Rejoice!
3930          */
3931         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3932                 if (!netif_queue_stopped(dev)) {
3933                         netif_stop_queue(dev);
3934
3935                         /* This is a hard error, log it. */
3936                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3937                                "queue awake!\n", dev->name);
3938                 }
3939                 return NETDEV_TX_BUSY;
3940         }
3941
3942         entry = tp->tx_prod;
3943         base_flags = 0;
3944         mss = 0;
3945         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3946                 int tcp_opt_len, ip_tcp_len;
3947
3948                 if (skb_header_cloned(skb) &&
3949                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3950                         dev_kfree_skb(skb);
3951                         goto out_unlock;
3952                 }
3953
3954                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3955                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3956                 else {
3957                         struct iphdr *iph = ip_hdr(skb);
3958
3959                         tcp_opt_len = tcp_optlen(skb);
3960                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3961
3962                         iph->check = 0;
3963                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3964                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3965                 }
3966
3967                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3968                                TXD_FLAG_CPU_POST_DMA);
3969
3970                 tcp_hdr(skb)->check = 0;
3971
3972         }
3973         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3974                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3975 #if TG3_VLAN_TAG_USED
3976         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3977                 base_flags |= (TXD_FLAG_VLAN |
3978                                (vlan_tx_tag_get(skb) << 16));
3979 #endif
3980
3981         /* Queue skb data, a.k.a. the main skb fragment. */
3982         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3983
3984         tp->tx_buffers[entry].skb = skb;
3985         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3986
3987         tg3_set_txd(tp, entry, mapping, len, base_flags,
3988                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3989
3990         entry = NEXT_TX(entry);
3991
3992         /* Now loop through additional data fragments, and queue them. */
3993         if (skb_shinfo(skb)->nr_frags > 0) {
3994                 unsigned int i, last;
3995
3996                 last = skb_shinfo(skb)->nr_frags - 1;
3997                 for (i = 0; i <= last; i++) {
3998                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3999
4000                         len = frag->size;
4001                         mapping = pci_map_page(tp->pdev,
4002                                                frag->page,
4003                                                frag->page_offset,
4004                                                len, PCI_DMA_TODEVICE);
4005
4006                         tp->tx_buffers[entry].skb = NULL;
4007                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4008
4009                         tg3_set_txd(tp, entry, mapping, len,
4010                                     base_flags, (i == last) | (mss << 1));
4011
4012                         entry = NEXT_TX(entry);
4013                 }
4014         }
4015
4016         /* Packets are ready, update Tx producer idx local and on card. */
4017         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4018
4019         tp->tx_prod = entry;
4020         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4021                 netif_stop_queue(dev);
4022                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4023                         netif_wake_queue(tp->dev);
4024         }
4025
4026 out_unlock:
4027         mmiowb();
4028
4029         dev->trans_start = jiffies;
4030
4031         return NETDEV_TX_OK;
4032 }
4033
4034 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4035
4036 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4037  * TSO header is greater than 80 bytes.
4038  */
4039 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4040 {
4041         struct sk_buff *segs, *nskb;
4042
4043         /* Estimate the number of fragments in the worst case */
4044         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4045                 netif_stop_queue(tp->dev);
4046                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4047                         return NETDEV_TX_BUSY;
4048
4049                 netif_wake_queue(tp->dev);
4050         }
4051
4052         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4053         if (unlikely(IS_ERR(segs)))
4054                 goto tg3_tso_bug_end;
4055
4056         do {
4057                 nskb = segs;
4058                 segs = segs->next;
4059                 nskb->next = NULL;
4060                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4061         } while (segs);
4062
4063 tg3_tso_bug_end:
4064         dev_kfree_skb(skb);
4065
4066         return NETDEV_TX_OK;
4067 }
4068
4069 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4070  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4071  */
4072 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4073 {
4074         struct tg3 *tp = netdev_priv(dev);
4075         dma_addr_t mapping;
4076         u32 len, entry, base_flags, mss;
4077         int would_hit_hwbug;
4078
4079         len = skb_headlen(skb);
4080
4081         /* We are running in BH disabled context with netif_tx_lock
4082          * and TX reclaim runs via tp->napi.poll inside of a software
4083          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4084          * no IRQ context deadlocks to worry about either.  Rejoice!
4085          */
4086         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4087                 if (!netif_queue_stopped(dev)) {
4088                         netif_stop_queue(dev);
4089
4090                         /* This is a hard error, log it. */
4091                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4092                                "queue awake!\n", dev->name);
4093                 }
4094                 return NETDEV_TX_BUSY;
4095         }
4096
4097         entry = tp->tx_prod;
4098         base_flags = 0;
4099         if (skb->ip_summed == CHECKSUM_PARTIAL)
4100                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4101         mss = 0;
4102         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4103                 struct iphdr *iph;
4104                 int tcp_opt_len, ip_tcp_len, hdr_len;
4105
4106                 if (skb_header_cloned(skb) &&
4107                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4108                         dev_kfree_skb(skb);
4109                         goto out_unlock;
4110                 }
4111
4112                 tcp_opt_len = tcp_optlen(skb);
4113                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4114
4115                 hdr_len = ip_tcp_len + tcp_opt_len;
4116                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4117                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4118                         return (tg3_tso_bug(tp, skb));
4119
4120                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4121                                TXD_FLAG_CPU_POST_DMA);
4122
4123                 iph = ip_hdr(skb);
4124                 iph->check = 0;
4125                 iph->tot_len = htons(mss + hdr_len);
4126                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4127                         tcp_hdr(skb)->check = 0;
4128                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4129                 } else
4130                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4131                                                                  iph->daddr, 0,
4132                                                                  IPPROTO_TCP,
4133                                                                  0);
4134
4135                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4136                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4137                         if (tcp_opt_len || iph->ihl > 5) {
4138                                 int tsflags;
4139
4140                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4141                                 mss |= (tsflags << 11);
4142                         }
4143                 } else {
4144                         if (tcp_opt_len || iph->ihl > 5) {
4145                                 int tsflags;
4146
4147                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4148                                 base_flags |= tsflags << 12;
4149                         }
4150                 }
4151         }
4152 #if TG3_VLAN_TAG_USED
4153         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4154                 base_flags |= (TXD_FLAG_VLAN |
4155                                (vlan_tx_tag_get(skb) << 16));
4156 #endif
4157
4158         /* Queue skb data, a.k.a. the main skb fragment. */
4159         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4160
4161         tp->tx_buffers[entry].skb = skb;
4162         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4163
4164         would_hit_hwbug = 0;
4165
4166         if (tg3_4g_overflow_test(mapping, len))
4167                 would_hit_hwbug = 1;
4168
4169         tg3_set_txd(tp, entry, mapping, len, base_flags,
4170                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4171
4172         entry = NEXT_TX(entry);
4173
4174         /* Now loop through additional data fragments, and queue them. */
4175         if (skb_shinfo(skb)->nr_frags > 0) {
4176                 unsigned int i, last;
4177
4178                 last = skb_shinfo(skb)->nr_frags - 1;
4179                 for (i = 0; i <= last; i++) {
4180                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4181
4182                         len = frag->size;
4183                         mapping = pci_map_page(tp->pdev,
4184                                                frag->page,
4185                                                frag->page_offset,
4186                                                len, PCI_DMA_TODEVICE);
4187
4188                         tp->tx_buffers[entry].skb = NULL;
4189                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4190
4191                         if (tg3_4g_overflow_test(mapping, len))
4192                                 would_hit_hwbug = 1;
4193
4194                         if (tg3_40bit_overflow_test(tp, mapping, len))
4195                                 would_hit_hwbug = 1;
4196
4197                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4198                                 tg3_set_txd(tp, entry, mapping, len,
4199                                             base_flags, (i == last)|(mss << 1));
4200                         else
4201                                 tg3_set_txd(tp, entry, mapping, len,
4202                                             base_flags, (i == last));
4203
4204                         entry = NEXT_TX(entry);
4205                 }
4206         }
4207
4208         if (would_hit_hwbug) {
4209                 u32 last_plus_one = entry;
4210                 u32 start;
4211
4212                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4213                 start &= (TG3_TX_RING_SIZE - 1);
4214
4215                 /* If the workaround fails due to memory/mapping
4216                  * failure, silently drop this packet.
4217                  */
4218                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4219                                                 &start, base_flags, mss))
4220                         goto out_unlock;
4221
4222                 entry = start;
4223         }
4224
4225         /* Packets are ready, update Tx producer idx local and on card. */
4226         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4227
4228         tp->tx_prod = entry;
4229         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4230                 netif_stop_queue(dev);
4231                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4232                         netif_wake_queue(tp->dev);
4233         }
4234
4235 out_unlock:
4236         mmiowb();
4237
4238         dev->trans_start = jiffies;
4239
4240         return NETDEV_TX_OK;
4241 }
4242
4243 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4244                                int new_mtu)
4245 {
4246         dev->mtu = new_mtu;
4247
4248         if (new_mtu > ETH_DATA_LEN) {
4249                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4250                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4251                         ethtool_op_set_tso(dev, 0);
4252                 }
4253                 else
4254                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4255         } else {
4256                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4257                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4258                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4259         }
4260 }
4261
4262 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4263 {
4264         struct tg3 *tp = netdev_priv(dev);
4265         int err;
4266
4267         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4268                 return -EINVAL;
4269
4270         if (!netif_running(dev)) {
4271                 /* We'll just catch it later when the
4272                  * device is up'd.
4273                  */
4274                 tg3_set_mtu(dev, tp, new_mtu);
4275                 return 0;
4276         }
4277
4278         tg3_netif_stop(tp);
4279
4280         tg3_full_lock(tp, 1);
4281
4282         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4283
4284         tg3_set_mtu(dev, tp, new_mtu);
4285
4286         err = tg3_restart_hw(tp, 0);
4287
4288         if (!err)
4289                 tg3_netif_start(tp);
4290
4291         tg3_full_unlock(tp);
4292
4293         return err;
4294 }
4295
4296 /* Free up pending packets in all rx/tx rings.
4297  *
4298  * The chip has been shut down and the driver detached from
4299  * the networking, so no interrupts or new tx packets will
4300  * end up in the driver.  tp->{tx,}lock is not held and we are not
4301  * in an interrupt context and thus may sleep.
4302  */
4303 static void tg3_free_rings(struct tg3 *tp)
4304 {
4305         struct ring_info *rxp;
4306         int i;
4307
4308         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4309                 rxp = &tp->rx_std_buffers[i];
4310
4311                 if (rxp->skb == NULL)
4312                         continue;
4313                 pci_unmap_single(tp->pdev,
4314                                  pci_unmap_addr(rxp, mapping),
4315                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4316                                  PCI_DMA_FROMDEVICE);
4317                 dev_kfree_skb_any(rxp->skb);
4318                 rxp->skb = NULL;
4319         }
4320
4321         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4322                 rxp = &tp->rx_jumbo_buffers[i];
4323
4324                 if (rxp->skb == NULL)
4325                         continue;
4326                 pci_unmap_single(tp->pdev,
4327                                  pci_unmap_addr(rxp, mapping),
4328                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4329                                  PCI_DMA_FROMDEVICE);
4330                 dev_kfree_skb_any(rxp->skb);
4331                 rxp->skb = NULL;
4332         }
4333
4334         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4335                 struct tx_ring_info *txp;
4336                 struct sk_buff *skb;
4337                 int j;
4338
4339                 txp = &tp->tx_buffers[i];
4340                 skb = txp->skb;
4341
4342                 if (skb == NULL) {
4343                         i++;
4344                         continue;
4345                 }
4346
4347                 pci_unmap_single(tp->pdev,
4348                                  pci_unmap_addr(txp, mapping),
4349                                  skb_headlen(skb),
4350                                  PCI_DMA_TODEVICE);
4351                 txp->skb = NULL;
4352
4353                 i++;
4354
4355                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4356                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4357                         pci_unmap_page(tp->pdev,
4358                                        pci_unmap_addr(txp, mapping),
4359                                        skb_shinfo(skb)->frags[j].size,
4360                                        PCI_DMA_TODEVICE);
4361                         i++;
4362                 }
4363
4364                 dev_kfree_skb_any(skb);
4365         }
4366 }
4367
4368 /* Initialize tx/rx rings for packet processing.
4369  *
4370  * The chip has been shut down and the driver detached from
4371  * the networking, so no interrupts or new tx packets will
4372  * end up in the driver.  tp->{tx,}lock are held and thus
4373  * we may not sleep.
4374  */
4375 static int tg3_init_rings(struct tg3 *tp)
4376 {
4377         u32 i;
4378
4379         /* Free up all the SKBs. */
4380         tg3_free_rings(tp);
4381
4382         /* Zero out all descriptors. */
4383         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4384         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4385         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4386         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4387
4388         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4389         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4390             (tp->dev->mtu > ETH_DATA_LEN))
4391                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4392
4393         /* Initialize invariants of the rings, we only set this
4394          * stuff once.  This works because the card does not
4395          * write into the rx buffer posting rings.
4396          */
4397         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4398                 struct tg3_rx_buffer_desc *rxd;
4399
4400                 rxd = &tp->rx_std[i];
4401                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4402                         << RXD_LEN_SHIFT;
4403                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4404                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4405                                (i << RXD_OPAQUE_INDEX_SHIFT));
4406         }
4407
4408         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4409                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4410                         struct tg3_rx_buffer_desc *rxd;
4411
4412                         rxd = &tp->rx_jumbo[i];
4413                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4414                                 << RXD_LEN_SHIFT;
4415                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4416                                 RXD_FLAG_JUMBO;
4417                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4418                                (i << RXD_OPAQUE_INDEX_SHIFT));
4419                 }
4420         }
4421
4422         /* Now allocate fresh SKBs for each rx ring. */
4423         for (i = 0; i < tp->rx_pending; i++) {
4424                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4425                         printk(KERN_WARNING PFX
4426                                "%s: Using a smaller RX standard ring, "
4427                                "only %d out of %d buffers were allocated "
4428                                "successfully.\n",
4429                                tp->dev->name, i, tp->rx_pending);
4430                         if (i == 0)
4431                                 return -ENOMEM;
4432                         tp->rx_pending = i;
4433                         break;
4434                 }
4435         }
4436
4437         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4438                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4439                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4440                                              -1, i) < 0) {
4441                                 printk(KERN_WARNING PFX
4442                                        "%s: Using a smaller RX jumbo ring, "
4443                                        "only %d out of %d buffers were "
4444                                        "allocated successfully.\n",
4445                                        tp->dev->name, i, tp->rx_jumbo_pending);
4446                                 if (i == 0) {
4447                                         tg3_free_rings(tp);
4448                                         return -ENOMEM;
4449                                 }
4450                                 tp->rx_jumbo_pending = i;
4451                                 break;
4452                         }
4453                 }
4454         }
4455         return 0;
4456 }
4457
4458 /*
4459  * Must not be invoked with interrupt sources disabled and
4460  * the hardware shutdown down.
4461  */
4462 static void tg3_free_consistent(struct tg3 *tp)
4463 {
4464         kfree(tp->rx_std_buffers);
4465         tp->rx_std_buffers = NULL;
4466         if (tp->rx_std) {
4467                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4468                                     tp->rx_std, tp->rx_std_mapping);
4469                 tp->rx_std = NULL;
4470         }
4471         if (tp->rx_jumbo) {
4472                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4473                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4474                 tp->rx_jumbo = NULL;
4475         }
4476         if (tp->rx_rcb) {
4477                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4478                                     tp->rx_rcb, tp->rx_rcb_mapping);
4479                 tp->rx_rcb = NULL;
4480         }
4481         if (tp->tx_ring) {
4482                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4483                         tp->tx_ring, tp->tx_desc_mapping);
4484                 tp->tx_ring = NULL;
4485         }
4486         if (tp->hw_status) {
4487                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4488                                     tp->hw_status, tp->status_mapping);
4489                 tp->hw_status = NULL;
4490         }
4491         if (tp->hw_stats) {
4492                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4493                                     tp->hw_stats, tp->stats_mapping);
4494                 tp->hw_stats = NULL;
4495         }
4496 }
4497
4498 /*
4499  * Must not be invoked with interrupt sources disabled and
4500  * the hardware shutdown down.  Can sleep.
4501  */
4502 static int tg3_alloc_consistent(struct tg3 *tp)
4503 {
4504         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4505                                       (TG3_RX_RING_SIZE +
4506                                        TG3_RX_JUMBO_RING_SIZE)) +
4507                                      (sizeof(struct tx_ring_info) *
4508                                       TG3_TX_RING_SIZE),
4509                                      GFP_KERNEL);
4510         if (!tp->rx_std_buffers)
4511                 return -ENOMEM;
4512
4513         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4514         tp->tx_buffers = (struct tx_ring_info *)
4515                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4516
4517         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4518                                           &tp->rx_std_mapping);
4519         if (!tp->rx_std)
4520                 goto err_out;
4521
4522         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4523                                             &tp->rx_jumbo_mapping);
4524
4525         if (!tp->rx_jumbo)
4526                 goto err_out;
4527
4528         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4529                                           &tp->rx_rcb_mapping);
4530         if (!tp->rx_rcb)
4531                 goto err_out;
4532
4533         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4534                                            &tp->tx_desc_mapping);
4535         if (!tp->tx_ring)
4536                 goto err_out;
4537
4538         tp->hw_status = pci_alloc_consistent(tp->pdev,
4539                                              TG3_HW_STATUS_SIZE,
4540                                              &tp->status_mapping);
4541         if (!tp->hw_status)
4542                 goto err_out;
4543
4544         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4545                                             sizeof(struct tg3_hw_stats),
4546                                             &tp->stats_mapping);
4547         if (!tp->hw_stats)
4548                 goto err_out;
4549
4550         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4551         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4552
4553         return 0;
4554
4555 err_out:
4556         tg3_free_consistent(tp);
4557         return -ENOMEM;
4558 }
4559
4560 #define MAX_WAIT_CNT 1000
4561
4562 /* To stop a block, clear the enable bit and poll till it
4563  * clears.  tp->lock is held.
4564  */
4565 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4566 {
4567         unsigned int i;
4568         u32 val;
4569
4570         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4571                 switch (ofs) {
4572                 case RCVLSC_MODE:
4573                 case DMAC_MODE:
4574                 case MBFREE_MODE:
4575                 case BUFMGR_MODE:
4576                 case MEMARB_MODE:
4577                         /* We can't enable/disable these bits of the
4578                          * 5705/5750, just say success.
4579                          */
4580                         return 0;
4581
4582                 default:
4583                         break;
4584                 };
4585         }
4586
4587         val = tr32(ofs);
4588         val &= ~enable_bit;
4589         tw32_f(ofs, val);
4590
4591         for (i = 0; i < MAX_WAIT_CNT; i++) {
4592                 udelay(100);
4593                 val = tr32(ofs);
4594                 if ((val & enable_bit) == 0)
4595                         break;
4596         }
4597
4598         if (i == MAX_WAIT_CNT && !silent) {
4599                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4600                        "ofs=%lx enable_bit=%x\n",
4601                        ofs, enable_bit);
4602                 return -ENODEV;
4603         }
4604
4605         return 0;
4606 }
4607
4608 /* tp->lock is held. */
4609 static int tg3_abort_hw(struct tg3 *tp, int silent)
4610 {
4611         int i, err;
4612
4613         tg3_disable_ints(tp);
4614
4615         tp->rx_mode &= ~RX_MODE_ENABLE;
4616         tw32_f(MAC_RX_MODE, tp->rx_mode);
4617         udelay(10);
4618
4619         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4620         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4621         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4622         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4623         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4624         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4625
4626         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4627         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4628         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4629         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4630         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4631         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4632         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4633
4634         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4635         tw32_f(MAC_MODE, tp->mac_mode);
4636         udelay(40);
4637
4638         tp->tx_mode &= ~TX_MODE_ENABLE;
4639         tw32_f(MAC_TX_MODE, tp->tx_mode);
4640
4641         for (i = 0; i < MAX_WAIT_CNT; i++) {
4642                 udelay(100);
4643                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4644                         break;
4645         }
4646         if (i >= MAX_WAIT_CNT) {
4647                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4648                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4649                        tp->dev->name, tr32(MAC_TX_MODE));
4650                 err |= -ENODEV;
4651         }
4652
4653         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4654         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4655         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4656
4657         tw32(FTQ_RESET, 0xffffffff);
4658         tw32(FTQ_RESET, 0x00000000);
4659
4660         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4661         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4662
4663         if (tp->hw_status)
4664                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4665         if (tp->hw_stats)
4666                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4667
4668         return err;
4669 }
4670
4671 /* tp->lock is held. */
4672 static int tg3_nvram_lock(struct tg3 *tp)
4673 {
4674         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4675                 int i;
4676
4677                 if (tp->nvram_lock_cnt == 0) {
4678                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4679                         for (i = 0; i < 8000; i++) {
4680                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4681                                         break;
4682                                 udelay(20);
4683                         }
4684                         if (i == 8000) {
4685                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4686                                 return -ENODEV;
4687                         }
4688                 }
4689                 tp->nvram_lock_cnt++;
4690         }
4691         return 0;
4692 }
4693
4694 /* tp->lock is held. */
4695 static void tg3_nvram_unlock(struct tg3 *tp)
4696 {
4697         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4698                 if (tp->nvram_lock_cnt > 0)
4699                         tp->nvram_lock_cnt--;
4700                 if (tp->nvram_lock_cnt == 0)
4701                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4702         }
4703 }
4704
4705 /* tp->lock is held. */
4706 static void tg3_enable_nvram_access(struct tg3 *tp)
4707 {
4708         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4709             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4710                 u32 nvaccess = tr32(NVRAM_ACCESS);
4711
4712                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4713         }
4714 }
4715
4716 /* tp->lock is held. */
4717 static void tg3_disable_nvram_access(struct tg3 *tp)
4718 {
4719         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4720             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4721                 u32 nvaccess = tr32(NVRAM_ACCESS);
4722
4723                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4724         }
4725 }
4726
4727 /* tp->lock is held. */
4728 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4729 {
4730         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4731                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4732
4733         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4734                 switch (kind) {
4735                 case RESET_KIND_INIT:
4736                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4737                                       DRV_STATE_START);
4738                         break;
4739
4740                 case RESET_KIND_SHUTDOWN:
4741                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4742                                       DRV_STATE_UNLOAD);
4743                         break;
4744
4745                 case RESET_KIND_SUSPEND:
4746                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4747                                       DRV_STATE_SUSPEND);
4748                         break;
4749
4750                 default:
4751                         break;
4752                 };
4753         }
4754 }
4755
4756 /* tp->lock is held. */
4757 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4758 {
4759         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4760                 switch (kind) {
4761                 case RESET_KIND_INIT:
4762                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4763                                       DRV_STATE_START_DONE);
4764                         break;
4765
4766                 case RESET_KIND_SHUTDOWN:
4767                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4768                                       DRV_STATE_UNLOAD_DONE);
4769                         break;
4770
4771                 default:
4772                         break;
4773                 };
4774         }
4775 }
4776
4777 /* tp->lock is held. */
4778 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4779 {
4780         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4781                 switch (kind) {
4782                 case RESET_KIND_INIT:
4783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4784                                       DRV_STATE_START);
4785                         break;
4786
4787                 case RESET_KIND_SHUTDOWN:
4788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4789                                       DRV_STATE_UNLOAD);
4790                         break;
4791
4792                 case RESET_KIND_SUSPEND:
4793                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4794                                       DRV_STATE_SUSPEND);
4795                         break;
4796
4797                 default:
4798                         break;
4799                 };
4800         }
4801 }
4802
4803 static int tg3_poll_fw(struct tg3 *tp)
4804 {
4805         int i;
4806         u32 val;
4807
4808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4809                 /* Wait up to 20ms for init done. */
4810                 for (i = 0; i < 200; i++) {
4811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4812                                 return 0;
4813                         udelay(100);
4814                 }
4815                 return -ENODEV;
4816         }
4817
4818         /* Wait for firmware initialization to complete. */
4819         for (i = 0; i < 100000; i++) {
4820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4822                         break;
4823                 udelay(10);
4824         }
4825
4826         /* Chip might not be fitted with firmware.  Some Sun onboard
4827          * parts are configured like that.  So don't signal the timeout
4828          * of the above loop as an error, but do report the lack of
4829          * running firmware once.
4830          */
4831         if (i >= 100000 &&
4832             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4833                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4834
4835                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4836                        tp->dev->name);
4837         }
4838
4839         return 0;
4840 }
4841
4842 /* Save PCI command register before chip reset */
4843 static void tg3_save_pci_state(struct tg3 *tp)
4844 {
4845         u32 val;
4846
4847         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
4848         tp->pci_cmd = val;
4849 }
4850
4851 /* Restore PCI state after chip reset */
4852 static void tg3_restore_pci_state(struct tg3 *tp)
4853 {
4854         u32 val;
4855
4856         /* Re-enable indirect register accesses. */
4857         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4858                                tp->misc_host_ctrl);
4859
4860         /* Set MAX PCI retry to zero. */
4861         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4862         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4863             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4864                 val |= PCISTATE_RETRY_SAME_DMA;
4865         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4866
4867         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
4868
4869         /* Make sure PCI-X relaxed ordering bit is clear. */
4870         if (tp->pcix_cap) {
4871                 u16 pcix_cmd;
4872
4873                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4874                                      &pcix_cmd);
4875                 pcix_cmd &= ~PCI_X_CMD_ERO;
4876                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4877                                       pcix_cmd);
4878         }
4879
4880         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4881
4882                 /* Chip reset on 5780 will reset MSI enable bit,
4883                  * so need to restore it.
4884                  */
4885                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4886                         u16 ctrl;
4887
4888                         pci_read_config_word(tp->pdev,
4889                                              tp->msi_cap + PCI_MSI_FLAGS,
4890                                              &ctrl);
4891                         pci_write_config_word(tp->pdev,
4892                                               tp->msi_cap + PCI_MSI_FLAGS,
4893                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4894                         val = tr32(MSGINT_MODE);
4895                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4896                 }
4897         }
4898 }
4899
4900 static void tg3_stop_fw(struct tg3 *);
4901
4902 /* tp->lock is held. */
4903 static int tg3_chip_reset(struct tg3 *tp)
4904 {
4905         u32 val;
4906         void (*write_op)(struct tg3 *, u32, u32);
4907         int err;
4908
4909         tg3_nvram_lock(tp);
4910
4911         /* No matching tg3_nvram_unlock() after this because
4912          * chip reset below will undo the nvram lock.
4913          */
4914         tp->nvram_lock_cnt = 0;
4915
4916         /* GRC_MISC_CFG core clock reset will clear the memory
4917          * enable bit in PCI register 4 and the MSI enable bit
4918          * on some chips, so we save relevant registers here.
4919          */
4920         tg3_save_pci_state(tp);
4921
4922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4925                 tw32(GRC_FASTBOOT_PC, 0);
4926
4927         /*
4928          * We must avoid the readl() that normally takes place.
4929          * It locks machines, causes machine checks, and other
4930          * fun things.  So, temporarily disable the 5701
4931          * hardware workaround, while we do the reset.
4932          */
4933         write_op = tp->write32;
4934         if (write_op == tg3_write_flush_reg32)
4935                 tp->write32 = tg3_write32;
4936
4937         /* Prevent the irq handler from reading or writing PCI registers
4938          * during chip reset when the memory enable bit in the PCI command
4939          * register may be cleared.  The chip does not generate interrupt
4940          * at this time, but the irq handler may still be called due to irq
4941          * sharing or irqpoll.
4942          */
4943         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4944         if (tp->hw_status) {
4945                 tp->hw_status->status = 0;
4946                 tp->hw_status->status_tag = 0;
4947         }
4948         tp->last_tag = 0;
4949         smp_mb();
4950         synchronize_irq(tp->pdev->irq);
4951
4952         /* do the reset */
4953         val = GRC_MISC_CFG_CORECLK_RESET;
4954
4955         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4956                 if (tr32(0x7e2c) == 0x60) {
4957                         tw32(0x7e2c, 0x20);
4958                 }
4959                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4960                         tw32(GRC_MISC_CFG, (1 << 29));
4961                         val |= (1 << 29);
4962                 }
4963         }
4964
4965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4966                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4967                 tw32(GRC_VCPU_EXT_CTRL,
4968                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4969         }
4970
4971         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4972                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4973         tw32(GRC_MISC_CFG, val);
4974
4975         /* restore 5701 hardware bug workaround write method */
4976         tp->write32 = write_op;
4977
4978         /* Unfortunately, we have to delay before the PCI read back.
4979          * Some 575X chips even will not respond to a PCI cfg access
4980          * when the reset command is given to the chip.
4981          *
4982          * How do these hardware designers expect things to work
4983          * properly if the PCI write is posted for a long period
4984          * of time?  It is always necessary to have some method by
4985          * which a register read back can occur to push the write
4986          * out which does the reset.
4987          *
4988          * For most tg3 variants the trick below was working.
4989          * Ho hum...
4990          */
4991         udelay(120);
4992
4993         /* Flush PCI posted writes.  The normal MMIO registers
4994          * are inaccessible at this time so this is the only
4995          * way to make this reliably (actually, this is no longer
4996          * the case, see above).  I tried to use indirect
4997          * register read/write but this upset some 5701 variants.
4998          */
4999         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5000
5001         udelay(120);
5002
5003         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5004                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5005                         int i;
5006                         u32 cfg_val;
5007
5008                         /* Wait for link training to complete.  */
5009                         for (i = 0; i < 5000; i++)
5010                                 udelay(100);
5011
5012                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5013                         pci_write_config_dword(tp->pdev, 0xc4,
5014                                                cfg_val | (1 << 15));
5015                 }
5016                 /* Set PCIE max payload size and clear error status.  */
5017                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5018         }
5019
5020         tg3_restore_pci_state(tp);
5021
5022         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5023
5024         val = 0;
5025         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5026                 val = tr32(MEMARB_MODE);
5027         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5028
5029         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5030                 tg3_stop_fw(tp);
5031                 tw32(0x5000, 0x400);
5032         }
5033
5034         tw32(GRC_MODE, tp->grc_mode);
5035
5036         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5037                 val = tr32(0xc4);
5038
5039                 tw32(0xc4, val | (1 << 15));
5040         }
5041
5042         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5044                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5045                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5046                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5047                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5048         }
5049
5050         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5051                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5052                 tw32_f(MAC_MODE, tp->mac_mode);
5053         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5054                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5055                 tw32_f(MAC_MODE, tp->mac_mode);
5056         } else
5057                 tw32_f(MAC_MODE, 0);
5058         udelay(40);
5059
5060         err = tg3_poll_fw(tp);
5061         if (err)
5062                 return err;
5063
5064         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5065             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5066                 val = tr32(0x7c00);
5067
5068                 tw32(0x7c00, val | (1 << 25));
5069         }
5070
5071         /* Reprobe ASF enable state.  */
5072         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5073         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5074         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5075         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5076                 u32 nic_cfg;
5077
5078                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5079                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5080                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5081                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5082                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5083                 }
5084         }
5085
5086         return 0;
5087 }
5088
5089 /* tp->lock is held. */
5090 static void tg3_stop_fw(struct tg3 *tp)
5091 {
5092         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5093                 u32 val;
5094                 int i;
5095
5096                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5097                 val = tr32(GRC_RX_CPU_EVENT);
5098                 val |= (1 << 14);
5099                 tw32(GRC_RX_CPU_EVENT, val);
5100
5101                 /* Wait for RX cpu to ACK the event.  */
5102                 for (i = 0; i < 100; i++) {
5103                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5104                                 break;
5105                         udelay(1);
5106                 }
5107         }
5108 }
5109
5110 /* tp->lock is held. */
5111 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5112 {
5113         int err;
5114
5115         tg3_stop_fw(tp);
5116
5117         tg3_write_sig_pre_reset(tp, kind);
5118
5119         tg3_abort_hw(tp, silent);
5120         err = tg3_chip_reset(tp);
5121
5122         tg3_write_sig_legacy(tp, kind);
5123         tg3_write_sig_post_reset(tp, kind);
5124
5125         if (err)
5126                 return err;
5127
5128         return 0;
5129 }
5130
5131 #define TG3_FW_RELEASE_MAJOR    0x0
5132 #define TG3_FW_RELASE_MINOR     0x0
5133 #define TG3_FW_RELEASE_FIX      0x0
5134 #define TG3_FW_START_ADDR       0x08000000
5135 #define TG3_FW_TEXT_ADDR        0x08000000
5136 #define TG3_FW_TEXT_LEN         0x9c0
5137 #define TG3_FW_RODATA_ADDR      0x080009c0
5138 #define TG3_FW_RODATA_LEN       0x60
5139 #define TG3_FW_DATA_ADDR        0x08000a40
5140 #define TG3_FW_DATA_LEN         0x20
5141 #define TG3_FW_SBSS_ADDR        0x08000a60
5142 #define TG3_FW_SBSS_LEN         0xc
5143 #define TG3_FW_BSS_ADDR         0x08000a70
5144 #define TG3_FW_BSS_LEN          0x10
5145
5146 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5147         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5148         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5149         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5150         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5151         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5152         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5153         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5154         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5155         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5156         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5157         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5158         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5159         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5160         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5161         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5162         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5163         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5164         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5165         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5166         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5167         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5168         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5169         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5170         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5171         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5172         0, 0, 0, 0, 0, 0,
5173         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5174         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5175         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5176         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5177         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5178         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5179         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5180         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5181         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5182         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5183         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5184         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5185         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5186         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5187         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5188         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5189         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5190         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5191         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5192         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5193         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5194         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5195         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5196         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5197         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5198         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5199         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5200         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5201         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5202         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5203         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5204         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5205         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5206         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5207         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5208         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5209         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5210         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5211         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5212         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5213         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5214         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5215         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5216         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5217         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5218         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5219         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5220         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5221         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5222         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5223         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5224         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5225         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5226         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5227         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5228         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5229         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5230         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5231         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5232         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5233         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5234         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5235         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5236         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5237         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5238 };
5239
5240 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5241         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5242         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5243         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5244         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5245         0x00000000
5246 };
5247
5248 #if 0 /* All zeros, don't eat up space with it. */
5249 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5250         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5251         0x00000000, 0x00000000, 0x00000000, 0x00000000
5252 };
5253 #endif
5254
5255 #define RX_CPU_SCRATCH_BASE     0x30000
5256 #define RX_CPU_SCRATCH_SIZE     0x04000
5257 #define TX_CPU_SCRATCH_BASE     0x34000
5258 #define TX_CPU_SCRATCH_SIZE     0x04000
5259
5260 /* tp->lock is held. */
5261 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5262 {
5263         int i;
5264
5265         BUG_ON(offset == TX_CPU_BASE &&
5266             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5267
5268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5269                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5270
5271                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5272                 return 0;
5273         }
5274         if (offset == RX_CPU_BASE) {
5275                 for (i = 0; i < 10000; i++) {
5276                         tw32(offset + CPU_STATE, 0xffffffff);
5277                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5278                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5279                                 break;
5280                 }
5281
5282                 tw32(offset + CPU_STATE, 0xffffffff);
5283                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5284                 udelay(10);
5285         } else {
5286                 for (i = 0; i < 10000; i++) {
5287                         tw32(offset + CPU_STATE, 0xffffffff);
5288                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5289                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5290                                 break;
5291                 }
5292         }
5293
5294         if (i >= 10000) {
5295                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5296                        "and %s CPU\n",
5297                        tp->dev->name,
5298                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5299                 return -ENODEV;
5300         }
5301
5302         /* Clear firmware's nvram arbitration. */
5303         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5304                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5305         return 0;
5306 }
5307
5308 struct fw_info {
5309         unsigned int text_base;
5310         unsigned int text_len;
5311         const u32 *text_data;
5312         unsigned int rodata_base;
5313         unsigned int rodata_len;
5314         const u32 *rodata_data;
5315         unsigned int data_base;
5316         unsigned int data_len;
5317         const u32 *data_data;
5318 };
5319
5320 /* tp->lock is held. */
5321 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5322                                  int cpu_scratch_size, struct fw_info *info)
5323 {
5324         int err, lock_err, i;
5325         void (*write_op)(struct tg3 *, u32, u32);
5326
5327         if (cpu_base == TX_CPU_BASE &&
5328             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5329                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5330                        "TX cpu firmware on %s which is 5705.\n",
5331                        tp->dev->name);
5332                 return -EINVAL;
5333         }
5334
5335         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5336                 write_op = tg3_write_mem;
5337         else
5338                 write_op = tg3_write_indirect_reg32;
5339
5340         /* It is possible that bootcode is still loading at this point.
5341          * Get the nvram lock first before halting the cpu.
5342          */
5343         lock_err = tg3_nvram_lock(tp);
5344         err = tg3_halt_cpu(tp, cpu_base);
5345         if (!lock_err)
5346                 tg3_nvram_unlock(tp);
5347         if (err)
5348                 goto out;
5349
5350         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5351                 write_op(tp, cpu_scratch_base + i, 0);
5352         tw32(cpu_base + CPU_STATE, 0xffffffff);
5353         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5354         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5355                 write_op(tp, (cpu_scratch_base +
5356                               (info->text_base & 0xffff) +
5357                               (i * sizeof(u32))),
5358                          (info->text_data ?
5359                           info->text_data[i] : 0));
5360         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5361                 write_op(tp, (cpu_scratch_base +
5362                               (info->rodata_base & 0xffff) +
5363                               (i * sizeof(u32))),
5364                          (info->rodata_data ?
5365                           info->rodata_data[i] : 0));
5366         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5367                 write_op(tp, (cpu_scratch_base +
5368                               (info->data_base & 0xffff) +
5369                               (i * sizeof(u32))),
5370                          (info->data_data ?
5371                           info->data_data[i] : 0));
5372
5373         err = 0;
5374
5375 out:
5376         return err;
5377 }
5378
5379 /* tp->lock is held. */
5380 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5381 {
5382         struct fw_info info;
5383         int err, i;
5384
5385         info.text_base = TG3_FW_TEXT_ADDR;
5386         info.text_len = TG3_FW_TEXT_LEN;
5387         info.text_data = &tg3FwText[0];
5388         info.rodata_base = TG3_FW_RODATA_ADDR;
5389         info.rodata_len = TG3_FW_RODATA_LEN;
5390         info.rodata_data = &tg3FwRodata[0];
5391         info.data_base = TG3_FW_DATA_ADDR;
5392         info.data_len = TG3_FW_DATA_LEN;
5393         info.data_data = NULL;
5394
5395         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5396                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5397                                     &info);
5398         if (err)
5399                 return err;
5400
5401         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5402                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5403                                     &info);
5404         if (err)
5405                 return err;
5406
5407         /* Now startup only the RX cpu. */
5408         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5409         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5410
5411         for (i = 0; i < 5; i++) {
5412                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5413                         break;
5414                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5415                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5416                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5417                 udelay(1000);
5418         }
5419         if (i >= 5) {
5420                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5421                        "to set RX CPU PC, is %08x should be %08x\n",
5422                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5423                        TG3_FW_TEXT_ADDR);
5424                 return -ENODEV;
5425         }
5426         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5427         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5428
5429         return 0;
5430 }
5431
5432
5433 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5434 #define TG3_TSO_FW_RELASE_MINOR         0x6
5435 #define TG3_TSO_FW_RELEASE_FIX          0x0
5436 #define TG3_TSO_FW_START_ADDR           0x08000000
5437 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5438 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5439 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5440 #define TG3_TSO_FW_RODATA_LEN           0x60
5441 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5442 #define TG3_TSO_FW_DATA_LEN             0x30
5443 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5444 #define TG3_TSO_FW_SBSS_LEN             0x2c
5445 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5446 #define TG3_TSO_FW_BSS_LEN              0x894
5447
5448 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5449         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5450         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5451         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5452         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5453         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5454         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5455         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5456         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5457         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5458         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5459         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5460         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5461         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5462         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5463         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5464         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5465         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5466         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5467         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5468         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5469         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5470         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5471         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5472         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5473         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5474         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5475         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5476         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5477         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5478         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5479         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5480         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5481         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5482         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5483         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5484         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5485         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5486         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5487         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5488         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5489         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5490         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5491         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5492         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5493         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5494         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5495         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5496         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5497         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5498         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5499         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5500         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5501         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5502         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5503         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5504         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5505         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5506         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5507         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5508         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5509         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5510         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5511         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5512         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5513         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5514         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5515         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5516         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5517         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5518         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5519         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5520         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5521         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5522         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5523         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5524         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5525         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5526         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5527         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5528         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5529         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5530         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5531         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5532         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5533         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5534         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5535         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5536         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5537         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5538         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5539         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5540         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5541         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5542         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5543         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5544         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5545         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5546         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5547         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5548         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5549         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5550         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5551         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5552         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5553         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5554         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5555         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5556         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5557         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5558         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5559         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5560         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5561         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5562         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5563         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5564         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5565         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5566         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5567         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5568         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5569         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5570         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5571         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5572         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5573         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5574         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5575         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5576         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5577         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5578         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5579         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5580         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5581         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5582         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5583         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5584         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5585         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5586         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5587         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5588         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5589         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5590         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5591         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5592         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5593         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5594         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5595         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5596         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5597         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5598         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5599         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5600         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5601         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5602         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5603         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5604         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5605         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5606         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5607         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5608         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5609         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5610         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5611         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5612         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5613         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5614         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5615         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5616         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5617         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5618         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5619         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5620         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5621         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5622         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5623         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5624         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5625         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5626         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5627         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5628         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5629         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5630         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5631         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5632         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5633         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5634         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5635         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5636         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5637         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5638         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5639         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5640         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5641         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5642         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5643         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5644         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5645         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5646         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5647         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5648         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5649         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5650         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5651         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5652         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5653         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5654         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5655         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5656         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5657         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5658         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5659         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5660         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5661         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5662         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5663         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5664         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5665         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5666         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5667         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5668         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5669         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5670         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5671         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5672         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5673         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5674         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5675         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5676         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5677         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5678         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5679         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5680         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5681         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5682         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5683         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5684         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5685         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5686         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5687         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5688         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5689         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5690         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5691         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5692         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5693         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5694         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5695         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5696         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5697         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5698         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5699         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5700         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5701         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5702         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5703         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5704         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5705         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5706         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5707         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5708         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5709         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5710         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5711         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5712         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5713         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5714         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5715         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5716         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5717         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5718         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5719         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5720         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5721         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5722         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5723         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5724         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5725         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5726         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5727         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5728         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5729         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5730         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5731         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5732         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5733 };
5734
5735 static const u32 tg3TsoFwRodata[] = {
5736         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5737         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5738         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5739         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5740         0x00000000,
5741 };
5742
5743 static const u32 tg3TsoFwData[] = {
5744         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5745         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5746         0x00000000,
5747 };
5748
5749 /* 5705 needs a special version of the TSO firmware.  */
5750 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5751 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5752 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5753 #define TG3_TSO5_FW_START_ADDR          0x00010000
5754 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5755 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5756 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5757 #define TG3_TSO5_FW_RODATA_LEN          0x50
5758 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5759 #define TG3_TSO5_FW_DATA_LEN            0x20
5760 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5761 #define TG3_TSO5_FW_SBSS_LEN            0x28
5762 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5763 #define TG3_TSO5_FW_BSS_LEN             0x88
5764
5765 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5766         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5767         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5768         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5769         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5770         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5771         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5772         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5773         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5774         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5775         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5776         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5777         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5778         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5779         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5780         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5781         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5782         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5783         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5784         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5785         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5786         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5787         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5788         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5789         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5790         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5791         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5792         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5793         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5794         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5795         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5796         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5797         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5798         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5799         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5800         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5801         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5802         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5803         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5804         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5805         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5806         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5807         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5808         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5809         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5810         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5811         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5812         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5813         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5814         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5815         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5816         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5817         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5818         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5819         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5820         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5821         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5822         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5823         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5824         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5825         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5826         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5827         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5828         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5829         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5830         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5831         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5832         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5833         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5834         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5835         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5836         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5837         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5838         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5839         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5840         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5841         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5842         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5843         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5844         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5845         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5846         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5847         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5848         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5849         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5850         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5851         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5852         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5853         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5854         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5855         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5856         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5857         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5858         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5859         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5860         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5861         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5862         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5863         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5864         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5865         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5866         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5867         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5868         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5869         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5870         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5871         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5872         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5873         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5874         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5875         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5876         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5877         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5878         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5879         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5880         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5881         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5882         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5883         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5884         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5885         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5886         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5887         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5888         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5889         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5890         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5891         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5892         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5893         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5894         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5895         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5896         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5897         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5898         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5899         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5900         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5901         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5902         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5903         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5904         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5905         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5906         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5907         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5908         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5909         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5910         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5911         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5912         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5913         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5914         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5915         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5916         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5917         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5918         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5919         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5920         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5921         0x00000000, 0x00000000, 0x00000000,
5922 };
5923
5924 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5925         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5926         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5927         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5928         0x00000000, 0x00000000, 0x00000000,
5929 };
5930
5931 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5932         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5933         0x00000000, 0x00000000, 0x00000000,
5934 };
5935
5936 /* tp->lock is held. */
5937 static int tg3_load_tso_firmware(struct tg3 *tp)
5938 {
5939         struct fw_info info;
5940         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5941         int err, i;
5942
5943         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5944                 return 0;
5945
5946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5947                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5948                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5949                 info.text_data = &tg3Tso5FwText[0];
5950                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5951                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5952                 info.rodata_data = &tg3Tso5FwRodata[0];
5953                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5954                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5955                 info.data_data = &tg3Tso5FwData[0];
5956                 cpu_base = RX_CPU_BASE;
5957                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5958                 cpu_scratch_size = (info.text_len +
5959                                     info.rodata_len +
5960                                     info.data_len +
5961                                     TG3_TSO5_FW_SBSS_LEN +
5962                                     TG3_TSO5_FW_BSS_LEN);
5963         } else {
5964                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5965                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5966                 info.text_data = &tg3TsoFwText[0];
5967                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5968                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5969                 info.rodata_data = &tg3TsoFwRodata[0];
5970                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5971                 info.data_len = TG3_TSO_FW_DATA_LEN;
5972                 info.data_data = &tg3TsoFwData[0];
5973                 cpu_base = TX_CPU_BASE;
5974                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5975                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5976         }
5977
5978         err = tg3_load_firmware_cpu(tp, cpu_base,
5979                                     cpu_scratch_base, cpu_scratch_size,
5980                                     &info);
5981         if (err)
5982                 return err;
5983
5984         /* Now startup the cpu. */
5985         tw32(cpu_base + CPU_STATE, 0xffffffff);
5986         tw32_f(cpu_base + CPU_PC,    info.text_base);
5987
5988         for (i = 0; i < 5; i++) {
5989                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5990                         break;
5991                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5992                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5993                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5994                 udelay(1000);
5995         }
5996         if (i >= 5) {
5997                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5998                        "to set CPU PC, is %08x should be %08x\n",
5999                        tp->dev->name, tr32(cpu_base + CPU_PC),
6000                        info.text_base);
6001                 return -ENODEV;
6002         }
6003         tw32(cpu_base + CPU_STATE, 0xffffffff);
6004         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6005         return 0;
6006 }
6007
6008
6009 /* tp->lock is held. */
6010 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6011 {
6012         u32 addr_high, addr_low;
6013         int i;
6014
6015         addr_high = ((tp->dev->dev_addr[0] << 8) |
6016                      tp->dev->dev_addr[1]);
6017         addr_low = ((tp->dev->dev_addr[2] << 24) |
6018                     (tp->dev->dev_addr[3] << 16) |
6019                     (tp->dev->dev_addr[4] <<  8) |
6020                     (tp->dev->dev_addr[5] <<  0));
6021         for (i = 0; i < 4; i++) {
6022                 if (i == 1 && skip_mac_1)
6023                         continue;
6024                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6025                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6026         }
6027
6028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6029             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6030                 for (i = 0; i < 12; i++) {
6031                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6032                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6033                 }
6034         }
6035
6036         addr_high = (tp->dev->dev_addr[0] +
6037                      tp->dev->dev_addr[1] +
6038                      tp->dev->dev_addr[2] +
6039                      tp->dev->dev_addr[3] +
6040                      tp->dev->dev_addr[4] +
6041                      tp->dev->dev_addr[5]) &
6042                 TX_BACKOFF_SEED_MASK;
6043         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6044 }
6045
6046 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6047 {
6048         struct tg3 *tp = netdev_priv(dev);
6049         struct sockaddr *addr = p;
6050         int err = 0, skip_mac_1 = 0;
6051
6052         if (!is_valid_ether_addr(addr->sa_data))
6053                 return -EINVAL;
6054
6055         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6056
6057         if (!netif_running(dev))
6058                 return 0;
6059
6060         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6061                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6062
6063                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6064                 addr0_low = tr32(MAC_ADDR_0_LOW);
6065                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6066                 addr1_low = tr32(MAC_ADDR_1_LOW);
6067
6068                 /* Skip MAC addr 1 if ASF is using it. */
6069                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6070                     !(addr1_high == 0 && addr1_low == 0))
6071                         skip_mac_1 = 1;
6072         }
6073         spin_lock_bh(&tp->lock);
6074         __tg3_set_mac_addr(tp, skip_mac_1);
6075         spin_unlock_bh(&tp->lock);
6076
6077         return err;
6078 }
6079
6080 /* tp->lock is held. */
6081 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6082                            dma_addr_t mapping, u32 maxlen_flags,
6083                            u32 nic_addr)
6084 {
6085         tg3_write_mem(tp,
6086                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6087                       ((u64) mapping >> 32));
6088         tg3_write_mem(tp,
6089                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6090                       ((u64) mapping & 0xffffffff));
6091         tg3_write_mem(tp,
6092                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6093                        maxlen_flags);
6094
6095         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6096                 tg3_write_mem(tp,
6097                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6098                               nic_addr);
6099 }
6100
6101 static void __tg3_set_rx_mode(struct net_device *);
6102 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6103 {
6104         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6105         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6106         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6107         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6108         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6109                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6110                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6111         }
6112         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6113         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6114         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6115                 u32 val = ec->stats_block_coalesce_usecs;
6116
6117                 if (!netif_carrier_ok(tp->dev))
6118                         val = 0;
6119
6120                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6121         }
6122 }
6123
6124 /* tp->lock is held. */
6125 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6126 {
6127         u32 val, rdmac_mode;
6128         int i, err, limit;
6129
6130         tg3_disable_ints(tp);
6131
6132         tg3_stop_fw(tp);
6133
6134         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6135
6136         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6137                 tg3_abort_hw(tp, 1);
6138         }
6139
6140         if (reset_phy)
6141                 tg3_phy_reset(tp);
6142
6143         err = tg3_chip_reset(tp);
6144         if (err)
6145                 return err;
6146
6147         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6148
6149         /* This works around an issue with Athlon chipsets on
6150          * B3 tigon3 silicon.  This bit has no effect on any
6151          * other revision.  But do not set this on PCI Express
6152          * chips and don't even touch the clocks if the CPMU is present.
6153          */
6154         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6155                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6156                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6157                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6158         }
6159
6160         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6161             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6162                 val = tr32(TG3PCI_PCISTATE);
6163                 val |= PCISTATE_RETRY_SAME_DMA;
6164                 tw32(TG3PCI_PCISTATE, val);
6165         }
6166
6167         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6168                 /* Enable some hw fixes.  */
6169                 val = tr32(TG3PCI_MSI_DATA);
6170                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6171                 tw32(TG3PCI_MSI_DATA, val);
6172         }
6173
6174         /* Descriptor ring init may make accesses to the
6175          * NIC SRAM area to setup the TX descriptors, so we
6176          * can only do this after the hardware has been
6177          * successfully reset.
6178          */
6179         err = tg3_init_rings(tp);
6180         if (err)
6181                 return err;
6182
6183         /* This value is determined during the probe time DMA
6184          * engine test, tg3_test_dma.
6185          */
6186         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6187
6188         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6189                           GRC_MODE_4X_NIC_SEND_RINGS |
6190                           GRC_MODE_NO_TX_PHDR_CSUM |
6191                           GRC_MODE_NO_RX_PHDR_CSUM);
6192         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6193
6194         /* Pseudo-header checksum is done by hardware logic and not
6195          * the offload processers, so make the chip do the pseudo-
6196          * header checksums on receive.  For transmit it is more
6197          * convenient to do the pseudo-header checksum in software
6198          * as Linux does that on transmit for us in all cases.
6199          */
6200         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6201
6202         tw32(GRC_MODE,
6203              tp->grc_mode |
6204              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6205
6206         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6207         val = tr32(GRC_MISC_CFG);
6208         val &= ~0xff;
6209         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6210         tw32(GRC_MISC_CFG, val);
6211
6212         /* Initialize MBUF/DESC pool. */
6213         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6214                 /* Do nothing.  */
6215         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6216                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6218                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6219                 else
6220                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6221                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6222                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6223         }
6224         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6225                 int fw_len;
6226
6227                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6228                           TG3_TSO5_FW_RODATA_LEN +
6229                           TG3_TSO5_FW_DATA_LEN +
6230                           TG3_TSO5_FW_SBSS_LEN +
6231                           TG3_TSO5_FW_BSS_LEN);
6232                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6233                 tw32(BUFMGR_MB_POOL_ADDR,
6234                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6235                 tw32(BUFMGR_MB_POOL_SIZE,
6236                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6237         }
6238
6239         if (tp->dev->mtu <= ETH_DATA_LEN) {
6240                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6241                      tp->bufmgr_config.mbuf_read_dma_low_water);
6242                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6243                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6244                 tw32(BUFMGR_MB_HIGH_WATER,
6245                      tp->bufmgr_config.mbuf_high_water);
6246         } else {
6247                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6248                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6249                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6250                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6251                 tw32(BUFMGR_MB_HIGH_WATER,
6252                      tp->bufmgr_config.mbuf_high_water_jumbo);
6253         }
6254         tw32(BUFMGR_DMA_LOW_WATER,
6255              tp->bufmgr_config.dma_low_water);
6256         tw32(BUFMGR_DMA_HIGH_WATER,
6257              tp->bufmgr_config.dma_high_water);
6258
6259         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6260         for (i = 0; i < 2000; i++) {
6261                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6262                         break;
6263                 udelay(10);
6264         }
6265         if (i >= 2000) {
6266                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6267                        tp->dev->name);
6268                 return -ENODEV;
6269         }
6270
6271         /* Setup replenish threshold. */
6272         val = tp->rx_pending / 8;
6273         if (val == 0)
6274                 val = 1;
6275         else if (val > tp->rx_std_max_post)
6276                 val = tp->rx_std_max_post;
6277         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6278                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6279                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6280
6281                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6282                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6283         }
6284
6285         tw32(RCVBDI_STD_THRESH, val);
6286
6287         /* Initialize TG3_BDINFO's at:
6288          *  RCVDBDI_STD_BD:     standard eth size rx ring
6289          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6290          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6291          *
6292          * like so:
6293          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6294          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6295          *                              ring attribute flags
6296          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6297          *
6298          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6299          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6300          *
6301          * The size of each ring is fixed in the firmware, but the location is
6302          * configurable.
6303          */
6304         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6305              ((u64) tp->rx_std_mapping >> 32));
6306         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6307              ((u64) tp->rx_std_mapping & 0xffffffff));
6308         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6309              NIC_SRAM_RX_BUFFER_DESC);
6310
6311         /* Don't even try to program the JUMBO/MINI buffer descriptor
6312          * configs on 5705.
6313          */
6314         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6315                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6316                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6317         } else {
6318                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6319                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6320
6321                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6322                      BDINFO_FLAGS_DISABLED);
6323
6324                 /* Setup replenish threshold. */
6325                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6326
6327                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6328                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6329                              ((u64) tp->rx_jumbo_mapping >> 32));
6330                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6331                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6332                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6333                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6334                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6335                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6336                 } else {
6337                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6338                              BDINFO_FLAGS_DISABLED);
6339                 }
6340
6341         }
6342
6343         /* There is only one send ring on 5705/5750, no need to explicitly
6344          * disable the others.
6345          */
6346         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6347                 /* Clear out send RCB ring in SRAM. */
6348                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6349                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6350                                       BDINFO_FLAGS_DISABLED);
6351         }
6352
6353         tp->tx_prod = 0;
6354         tp->tx_cons = 0;
6355         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6356         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6357
6358         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6359                        tp->tx_desc_mapping,
6360                        (TG3_TX_RING_SIZE <<
6361                         BDINFO_FLAGS_MAXLEN_SHIFT),
6362                        NIC_SRAM_TX_BUFFER_DESC);
6363
6364         /* There is only one receive return ring on 5705/5750, no need
6365          * to explicitly disable the others.
6366          */
6367         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6368                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6369                      i += TG3_BDINFO_SIZE) {
6370                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6371                                       BDINFO_FLAGS_DISABLED);
6372                 }
6373         }
6374
6375         tp->rx_rcb_ptr = 0;
6376         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6377
6378         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6379                        tp->rx_rcb_mapping,
6380                        (TG3_RX_RCB_RING_SIZE(tp) <<
6381                         BDINFO_FLAGS_MAXLEN_SHIFT),
6382                        0);
6383
6384         tp->rx_std_ptr = tp->rx_pending;
6385         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6386                      tp->rx_std_ptr);
6387
6388         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6389                                                 tp->rx_jumbo_pending : 0;
6390         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6391                      tp->rx_jumbo_ptr);
6392
6393         /* Initialize MAC address and backoff seed. */
6394         __tg3_set_mac_addr(tp, 0);
6395
6396         /* MTU + ethernet header + FCS + optional VLAN tag */
6397         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6398
6399         /* The slot time is changed by tg3_setup_phy if we
6400          * run at gigabit with half duplex.
6401          */
6402         tw32(MAC_TX_LENGTHS,
6403              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6404              (6 << TX_LENGTHS_IPG_SHIFT) |
6405              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6406
6407         /* Receive rules. */
6408         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6409         tw32(RCVLPC_CONFIG, 0x0181);
6410
6411         /* Calculate RDMAC_MODE setting early, we need it to determine
6412          * the RCVLPC_STATE_ENABLE mask.
6413          */
6414         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6415                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6416                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6417                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6418                       RDMAC_MODE_LNGREAD_ENAB);
6419
6420         /* If statement applies to 5705 and 5750 PCI devices only */
6421         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6422              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6423             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6424                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6425                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6426                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6427                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6428                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6429                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6430                 }
6431         }
6432
6433         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6434                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6435
6436         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6437                 rdmac_mode |= (1 << 27);
6438
6439         /* Receive/send statistics. */
6440         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6441                 val = tr32(RCVLPC_STATS_ENABLE);
6442                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6443                 tw32(RCVLPC_STATS_ENABLE, val);
6444         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6445                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6446                 val = tr32(RCVLPC_STATS_ENABLE);
6447                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6448                 tw32(RCVLPC_STATS_ENABLE, val);
6449         } else {
6450                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6451         }
6452         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6453         tw32(SNDDATAI_STATSENAB, 0xffffff);
6454         tw32(SNDDATAI_STATSCTRL,
6455              (SNDDATAI_SCTRL_ENABLE |
6456               SNDDATAI_SCTRL_FASTUPD));
6457
6458         /* Setup host coalescing engine. */
6459         tw32(HOSTCC_MODE, 0);
6460         for (i = 0; i < 2000; i++) {
6461                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6462                         break;
6463                 udelay(10);
6464         }
6465
6466         __tg3_set_coalesce(tp, &tp->coal);
6467
6468         /* set status block DMA address */
6469         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6470              ((u64) tp->status_mapping >> 32));
6471         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6472              ((u64) tp->status_mapping & 0xffffffff));
6473
6474         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6475                 /* Status/statistics block address.  See tg3_timer,
6476                  * the tg3_periodic_fetch_stats call there, and
6477                  * tg3_get_stats to see how this works for 5705/5750 chips.
6478                  */
6479                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6480                      ((u64) tp->stats_mapping >> 32));
6481                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6482                      ((u64) tp->stats_mapping & 0xffffffff));
6483                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6484                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6485         }
6486
6487         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6488
6489         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6490         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6491         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6492                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6493
6494         /* Clear statistics/status block in chip, and status block in ram. */
6495         for (i = NIC_SRAM_STATS_BLK;
6496              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6497              i += sizeof(u32)) {
6498                 tg3_write_mem(tp, i, 0);
6499                 udelay(40);
6500         }
6501         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6502
6503         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6504                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6505                 /* reset to prevent losing 1st rx packet intermittently */
6506                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6507                 udelay(10);
6508         }
6509
6510         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6511                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6512         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6513             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6514             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6515                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6516         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6517         udelay(40);
6518
6519         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6520          * If TG3_FLG2_IS_NIC is zero, we should read the
6521          * register to preserve the GPIO settings for LOMs. The GPIOs,
6522          * whether used as inputs or outputs, are set by boot code after
6523          * reset.
6524          */
6525         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6526                 u32 gpio_mask;
6527
6528                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6529                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6530                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6531
6532                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6533                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6534                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6535
6536                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6537                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6538
6539                 tp->grc_local_ctrl &= ~gpio_mask;
6540                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6541
6542                 /* GPIO1 must be driven high for eeprom write protect */
6543                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6544                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6545                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6546         }
6547         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6548         udelay(100);
6549
6550         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6551         tp->last_tag = 0;
6552
6553         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6554                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6555                 udelay(40);
6556         }
6557
6558         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6559                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6560                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6561                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6562                WDMAC_MODE_LNGREAD_ENAB);
6563
6564         /* If statement applies to 5705 and 5750 PCI devices only */
6565         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6566              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6568                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6569                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6570                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6571                         /* nothing */
6572                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6573                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6574                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6575                         val |= WDMAC_MODE_RX_ACCEL;
6576                 }
6577         }
6578
6579         /* Enable host coalescing bug fix */
6580         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6581             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6582                 val |= (1 << 29);
6583
6584         tw32_f(WDMAC_MODE, val);
6585         udelay(40);
6586
6587         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6588                 u16 pcix_cmd;
6589
6590                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6591                                      &pcix_cmd);
6592                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6593                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6594                         pcix_cmd |= PCI_X_CMD_READ_2K;
6595                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6596                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6597                         pcix_cmd |= PCI_X_CMD_READ_2K;
6598                 }
6599                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6600                                       pcix_cmd);
6601         }
6602
6603         tw32_f(RDMAC_MODE, rdmac_mode);
6604         udelay(40);
6605
6606         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6607         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6608                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6609         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6610         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6611         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6612         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6613         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6614         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6615                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6616         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6617         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6618
6619         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6620                 err = tg3_load_5701_a0_firmware_fix(tp);
6621                 if (err)
6622                         return err;
6623         }
6624
6625         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6626                 err = tg3_load_tso_firmware(tp);
6627                 if (err)
6628                         return err;
6629         }
6630
6631         tp->tx_mode = TX_MODE_ENABLE;
6632         tw32_f(MAC_TX_MODE, tp->tx_mode);
6633         udelay(100);
6634
6635         tp->rx_mode = RX_MODE_ENABLE;
6636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6637                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6638
6639         tw32_f(MAC_RX_MODE, tp->rx_mode);
6640         udelay(10);
6641
6642         if (tp->link_config.phy_is_low_power) {
6643                 tp->link_config.phy_is_low_power = 0;
6644                 tp->link_config.speed = tp->link_config.orig_speed;
6645                 tp->link_config.duplex = tp->link_config.orig_duplex;
6646                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6647         }
6648
6649         tp->mi_mode = MAC_MI_MODE_BASE;
6650         tw32_f(MAC_MI_MODE, tp->mi_mode);
6651         udelay(80);
6652
6653         tw32(MAC_LED_CTRL, tp->led_ctrl);
6654
6655         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6656         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6657                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6658                 udelay(10);
6659         }
6660         tw32_f(MAC_RX_MODE, tp->rx_mode);
6661         udelay(10);
6662
6663         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6664                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6665                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6666                         /* Set drive transmission level to 1.2V  */
6667                         /* only if the signal pre-emphasis bit is not set  */
6668                         val = tr32(MAC_SERDES_CFG);
6669                         val &= 0xfffff000;
6670                         val |= 0x880;
6671                         tw32(MAC_SERDES_CFG, val);
6672                 }
6673                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6674                         tw32(MAC_SERDES_CFG, 0x616000);
6675         }
6676
6677         /* Prevent chip from dropping frames when flow control
6678          * is enabled.
6679          */
6680         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6681
6682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6683             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6684                 /* Use hardware link auto-negotiation */
6685                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6686         }
6687
6688         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6689             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6690                 u32 tmp;
6691
6692                 tmp = tr32(SERDES_RX_CTRL);
6693                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6694                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6695                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6696                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6697         }
6698
6699         err = tg3_setup_phy(tp, 0);
6700         if (err)
6701                 return err;
6702
6703         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6704             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6705                 u32 tmp;
6706
6707                 /* Clear CRC stats. */
6708                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6709                         tg3_writephy(tp, MII_TG3_TEST1,
6710                                      tmp | MII_TG3_TEST1_CRC_EN);
6711                         tg3_readphy(tp, 0x14, &tmp);
6712                 }
6713         }
6714
6715         __tg3_set_rx_mode(tp->dev);
6716
6717         /* Initialize receive rules. */
6718         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6719         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6720         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6721         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6722
6723         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6724             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6725                 limit = 8;
6726         else
6727                 limit = 16;
6728         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6729                 limit -= 4;
6730         switch (limit) {
6731         case 16:
6732                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6733         case 15:
6734                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6735         case 14:
6736                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6737         case 13:
6738                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6739         case 12:
6740                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6741         case 11:
6742                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6743         case 10:
6744                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6745         case 9:
6746                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6747         case 8:
6748                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6749         case 7:
6750                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6751         case 6:
6752                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6753         case 5:
6754                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6755         case 4:
6756                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6757         case 3:
6758                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6759         case 2:
6760         case 1:
6761
6762         default:
6763                 break;
6764         };
6765
6766         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6767
6768         return 0;
6769 }
6770
6771 /* Called at device open time to get the chip ready for
6772  * packet processing.  Invoked with tp->lock held.
6773  */
6774 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6775 {
6776         int err;
6777
6778         /* Force the chip into D0. */
6779         err = tg3_set_power_state(tp, PCI_D0);
6780         if (err)
6781                 goto out;
6782
6783         tg3_switch_clocks(tp);
6784
6785         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6786
6787         err = tg3_reset_hw(tp, reset_phy);
6788
6789 out:
6790         return err;
6791 }
6792
6793 #define TG3_STAT_ADD32(PSTAT, REG) \
6794 do {    u32 __val = tr32(REG); \
6795         (PSTAT)->low += __val; \
6796         if ((PSTAT)->low < __val) \
6797                 (PSTAT)->high += 1; \
6798 } while (0)
6799
6800 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6801 {
6802         struct tg3_hw_stats *sp = tp->hw_stats;
6803
6804         if (!netif_carrier_ok(tp->dev))
6805                 return;
6806
6807         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6808         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6809         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6810         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6811         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6812         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6813         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6814         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6815         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6816         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6817         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6818         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6819         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6820
6821         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6822         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6823         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6824         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6825         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6826         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6827         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6828         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6829         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6830         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6831         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6832         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6833         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6834         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6835
6836         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6837         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6838         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6839 }
6840
6841 static void tg3_timer(unsigned long __opaque)
6842 {
6843         struct tg3 *tp = (struct tg3 *) __opaque;
6844
6845         if (tp->irq_sync)
6846                 goto restart_timer;
6847
6848         spin_lock(&tp->lock);
6849
6850         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6851                 /* All of this garbage is because when using non-tagged
6852                  * IRQ status the mailbox/status_block protocol the chip
6853                  * uses with the cpu is race prone.
6854                  */
6855                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6856                         tw32(GRC_LOCAL_CTRL,
6857                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6858                 } else {
6859                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6860                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6861                 }
6862
6863                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6864                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6865                         spin_unlock(&tp->lock);
6866                         schedule_work(&tp->reset_task);
6867                         return;
6868                 }
6869         }
6870
6871         /* This part only runs once per second. */
6872         if (!--tp->timer_counter) {
6873                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6874                         tg3_periodic_fetch_stats(tp);
6875
6876                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6877                         u32 mac_stat;
6878                         int phy_event;
6879
6880                         mac_stat = tr32(MAC_STATUS);
6881
6882                         phy_event = 0;
6883                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6884                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6885                                         phy_event = 1;
6886                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6887                                 phy_event = 1;
6888
6889                         if (phy_event)
6890                                 tg3_setup_phy(tp, 0);
6891                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6892                         u32 mac_stat = tr32(MAC_STATUS);
6893                         int need_setup = 0;
6894
6895                         if (netif_carrier_ok(tp->dev) &&
6896                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6897                                 need_setup = 1;
6898                         }
6899                         if (! netif_carrier_ok(tp->dev) &&
6900                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6901                                          MAC_STATUS_SIGNAL_DET))) {
6902                                 need_setup = 1;
6903                         }
6904                         if (need_setup) {
6905                                 if (!tp->serdes_counter) {
6906                                         tw32_f(MAC_MODE,
6907                                              (tp->mac_mode &
6908                                               ~MAC_MODE_PORT_MODE_MASK));
6909                                         udelay(40);
6910                                         tw32_f(MAC_MODE, tp->mac_mode);
6911                                         udelay(40);
6912                                 }
6913                                 tg3_setup_phy(tp, 0);
6914                         }
6915                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6916                         tg3_serdes_parallel_detect(tp);
6917
6918                 tp->timer_counter = tp->timer_multiplier;
6919         }
6920
6921         /* Heartbeat is only sent once every 2 seconds.
6922          *
6923          * The heartbeat is to tell the ASF firmware that the host
6924          * driver is still alive.  In the event that the OS crashes,
6925          * ASF needs to reset the hardware to free up the FIFO space
6926          * that may be filled with rx packets destined for the host.
6927          * If the FIFO is full, ASF will no longer function properly.
6928          *
6929          * Unintended resets have been reported on real time kernels
6930          * where the timer doesn't run on time.  Netpoll will also have
6931          * same problem.
6932          *
6933          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6934          * to check the ring condition when the heartbeat is expiring
6935          * before doing the reset.  This will prevent most unintended
6936          * resets.
6937          */
6938         if (!--tp->asf_counter) {
6939                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6940                         u32 val;
6941
6942                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6943                                       FWCMD_NICDRV_ALIVE3);
6944                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6945                         /* 5 seconds timeout */
6946                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6947                         val = tr32(GRC_RX_CPU_EVENT);
6948                         val |= (1 << 14);
6949                         tw32(GRC_RX_CPU_EVENT, val);
6950                 }
6951                 tp->asf_counter = tp->asf_multiplier;
6952         }
6953
6954         spin_unlock(&tp->lock);
6955
6956 restart_timer:
6957         tp->timer.expires = jiffies + tp->timer_offset;
6958         add_timer(&tp->timer);
6959 }
6960
6961 static int tg3_request_irq(struct tg3 *tp)
6962 {
6963         irq_handler_t fn;
6964         unsigned long flags;
6965         struct net_device *dev = tp->dev;
6966
6967         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6968                 fn = tg3_msi;
6969                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6970                         fn = tg3_msi_1shot;
6971                 flags = IRQF_SAMPLE_RANDOM;
6972         } else {
6973                 fn = tg3_interrupt;
6974                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6975                         fn = tg3_interrupt_tagged;
6976                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6977         }
6978         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6979 }
6980
6981 static int tg3_test_interrupt(struct tg3 *tp)
6982 {
6983         struct net_device *dev = tp->dev;
6984         int err, i, intr_ok = 0;
6985
6986         if (!netif_running(dev))
6987                 return -ENODEV;
6988
6989         tg3_disable_ints(tp);
6990
6991         free_irq(tp->pdev->irq, dev);
6992
6993         err = request_irq(tp->pdev->irq, tg3_test_isr,
6994                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6995         if (err)
6996                 return err;
6997
6998         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6999         tg3_enable_ints(tp);
7000
7001         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7002                HOSTCC_MODE_NOW);
7003
7004         for (i = 0; i < 5; i++) {
7005                 u32 int_mbox, misc_host_ctrl;
7006
7007                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7008                                         TG3_64BIT_REG_LOW);
7009                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7010
7011                 if ((int_mbox != 0) ||
7012                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7013                         intr_ok = 1;
7014                         break;
7015                 }
7016
7017                 msleep(10);
7018         }
7019
7020         tg3_disable_ints(tp);
7021
7022         free_irq(tp->pdev->irq, dev);
7023
7024         err = tg3_request_irq(tp);
7025
7026         if (err)
7027                 return err;
7028
7029         if (intr_ok)
7030                 return 0;
7031
7032         return -EIO;
7033 }
7034
7035 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7036  * successfully restored
7037  */
7038 static int tg3_test_msi(struct tg3 *tp)
7039 {
7040         struct net_device *dev = tp->dev;
7041         int err;
7042         u16 pci_cmd;
7043
7044         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7045                 return 0;
7046
7047         /* Turn off SERR reporting in case MSI terminates with Master
7048          * Abort.
7049          */
7050         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7051         pci_write_config_word(tp->pdev, PCI_COMMAND,
7052                               pci_cmd & ~PCI_COMMAND_SERR);
7053
7054         err = tg3_test_interrupt(tp);
7055
7056         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7057
7058         if (!err)
7059                 return 0;
7060
7061         /* other failures */
7062         if (err != -EIO)
7063                 return err;
7064
7065         /* MSI test failed, go back to INTx mode */
7066         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7067                "switching to INTx mode. Please report this failure to "
7068                "the PCI maintainer and include system chipset information.\n",
7069                        tp->dev->name);
7070
7071         free_irq(tp->pdev->irq, dev);
7072         pci_disable_msi(tp->pdev);
7073
7074         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7075
7076         err = tg3_request_irq(tp);
7077         if (err)
7078                 return err;
7079
7080         /* Need to reset the chip because the MSI cycle may have terminated
7081          * with Master Abort.
7082          */
7083         tg3_full_lock(tp, 1);
7084
7085         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7086         err = tg3_init_hw(tp, 1);
7087
7088         tg3_full_unlock(tp);
7089
7090         if (err)
7091                 free_irq(tp->pdev->irq, dev);
7092
7093         return err;
7094 }
7095
7096 static int tg3_open(struct net_device *dev)
7097 {
7098         struct tg3 *tp = netdev_priv(dev);
7099         int err;
7100
7101         netif_carrier_off(tp->dev);
7102
7103         tg3_full_lock(tp, 0);
7104
7105         err = tg3_set_power_state(tp, PCI_D0);
7106         if (err) {
7107                 tg3_full_unlock(tp);
7108                 return err;
7109         }
7110
7111         tg3_disable_ints(tp);
7112         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7113
7114         tg3_full_unlock(tp);
7115
7116         /* The placement of this call is tied
7117          * to the setup and use of Host TX descriptors.
7118          */
7119         err = tg3_alloc_consistent(tp);
7120         if (err)
7121                 return err;
7122
7123         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7124                 /* All MSI supporting chips should support tagged
7125                  * status.  Assert that this is the case.
7126                  */
7127                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7128                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7129                                "Not using MSI.\n", tp->dev->name);
7130                 } else if (pci_enable_msi(tp->pdev) == 0) {
7131                         u32 msi_mode;
7132
7133                         /* Hardware bug - MSI won't work if INTX disabled. */
7134                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7135                                 pci_intx(tp->pdev, 1);
7136
7137                         msi_mode = tr32(MSGINT_MODE);
7138                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7139                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7140                 }
7141         }
7142         err = tg3_request_irq(tp);
7143
7144         if (err) {
7145                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7146                         pci_disable_msi(tp->pdev);
7147                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7148                 }
7149                 tg3_free_consistent(tp);
7150                 return err;
7151         }
7152
7153         napi_enable(&tp->napi);
7154
7155         tg3_full_lock(tp, 0);
7156
7157         err = tg3_init_hw(tp, 1);
7158         if (err) {
7159                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7160                 tg3_free_rings(tp);
7161         } else {
7162                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7163                         tp->timer_offset = HZ;
7164                 else
7165                         tp->timer_offset = HZ / 10;
7166
7167                 BUG_ON(tp->timer_offset > HZ);
7168                 tp->timer_counter = tp->timer_multiplier =
7169                         (HZ / tp->timer_offset);
7170                 tp->asf_counter = tp->asf_multiplier =
7171                         ((HZ / tp->timer_offset) * 2);
7172
7173                 init_timer(&tp->timer);
7174                 tp->timer.expires = jiffies + tp->timer_offset;
7175                 tp->timer.data = (unsigned long) tp;
7176                 tp->timer.function = tg3_timer;
7177         }
7178
7179         tg3_full_unlock(tp);
7180
7181         if (err) {
7182                 napi_disable(&tp->napi);
7183                 free_irq(tp->pdev->irq, dev);
7184                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7185                         pci_disable_msi(tp->pdev);
7186                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7187                 }
7188                 tg3_free_consistent(tp);
7189                 return err;
7190         }
7191
7192         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7193                 err = tg3_test_msi(tp);
7194
7195                 if (err) {
7196                         tg3_full_lock(tp, 0);
7197
7198                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7199                                 pci_disable_msi(tp->pdev);
7200                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7201                         }
7202                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7203                         tg3_free_rings(tp);
7204                         tg3_free_consistent(tp);
7205
7206                         tg3_full_unlock(tp);
7207
7208                         napi_disable(&tp->napi);
7209
7210                         return err;
7211                 }
7212
7213                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7214                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7215                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7216
7217                                 tw32(PCIE_TRANSACTION_CFG,
7218                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7219                         }
7220                 }
7221         }
7222
7223         tg3_full_lock(tp, 0);
7224
7225         add_timer(&tp->timer);
7226         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7227         tg3_enable_ints(tp);
7228
7229         tg3_full_unlock(tp);
7230
7231         netif_start_queue(dev);
7232
7233         return 0;
7234 }
7235
7236 #if 0
7237 /*static*/ void tg3_dump_state(struct tg3 *tp)
7238 {
7239         u32 val32, val32_2, val32_3, val32_4, val32_5;
7240         u16 val16;
7241         int i;
7242
7243         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7244         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7245         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7246                val16, val32);
7247
7248         /* MAC block */
7249         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7250                tr32(MAC_MODE), tr32(MAC_STATUS));
7251         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7252                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7253         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7254                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7255         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7256                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7257
7258         /* Send data initiator control block */
7259         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7260                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7261         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7262                tr32(SNDDATAI_STATSCTRL));
7263
7264         /* Send data completion control block */
7265         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7266
7267         /* Send BD ring selector block */
7268         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7269                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7270
7271         /* Send BD initiator control block */
7272         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7273                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7274
7275         /* Send BD completion control block */
7276         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7277
7278         /* Receive list placement control block */
7279         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7280                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7281         printk("       RCVLPC_STATSCTRL[%08x]\n",
7282                tr32(RCVLPC_STATSCTRL));
7283
7284         /* Receive data and receive BD initiator control block */
7285         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7286                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7287
7288         /* Receive data completion control block */
7289         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7290                tr32(RCVDCC_MODE));
7291
7292         /* Receive BD initiator control block */
7293         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7294                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7295
7296         /* Receive BD completion control block */
7297         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7298                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7299
7300         /* Receive list selector control block */
7301         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7302                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7303
7304         /* Mbuf cluster free block */
7305         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7306                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7307
7308         /* Host coalescing control block */
7309         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7310                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7311         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7312                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7313                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7314         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7315                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7316                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7317         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7318                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7319         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7320                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7321
7322         /* Memory arbiter control block */
7323         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7324                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7325
7326         /* Buffer manager control block */
7327         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7328                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7329         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7330                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7331         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7332                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7333                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7334                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7335
7336         /* Read DMA control block */
7337         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7338                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7339
7340         /* Write DMA control block */
7341         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7342                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7343
7344         /* DMA completion block */
7345         printk("DEBUG: DMAC_MODE[%08x]\n",
7346                tr32(DMAC_MODE));
7347
7348         /* GRC block */
7349         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7350                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7351         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7352                tr32(GRC_LOCAL_CTRL));
7353
7354         /* TG3_BDINFOs */
7355         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7356                tr32(RCVDBDI_JUMBO_BD + 0x0),
7357                tr32(RCVDBDI_JUMBO_BD + 0x4),
7358                tr32(RCVDBDI_JUMBO_BD + 0x8),
7359                tr32(RCVDBDI_JUMBO_BD + 0xc));
7360         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7361                tr32(RCVDBDI_STD_BD + 0x0),
7362                tr32(RCVDBDI_STD_BD + 0x4),
7363                tr32(RCVDBDI_STD_BD + 0x8),
7364                tr32(RCVDBDI_STD_BD + 0xc));
7365         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7366                tr32(RCVDBDI_MINI_BD + 0x0),
7367                tr32(RCVDBDI_MINI_BD + 0x4),
7368                tr32(RCVDBDI_MINI_BD + 0x8),
7369                tr32(RCVDBDI_MINI_BD + 0xc));
7370
7371         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7372         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7373         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7374         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7375         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7376                val32, val32_2, val32_3, val32_4);
7377
7378         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7379         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7380         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7381         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7382         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7383                val32, val32_2, val32_3, val32_4);
7384
7385         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7386         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7387         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7388         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7389         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7390         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7391                val32, val32_2, val32_3, val32_4, val32_5);
7392
7393         /* SW status block */
7394         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7395                tp->hw_status->status,
7396                tp->hw_status->status_tag,
7397                tp->hw_status->rx_jumbo_consumer,
7398                tp->hw_status->rx_consumer,
7399                tp->hw_status->rx_mini_consumer,
7400                tp->hw_status->idx[0].rx_producer,
7401                tp->hw_status->idx[0].tx_consumer);
7402
7403         /* SW statistics block */
7404         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7405                ((u32 *)tp->hw_stats)[0],
7406                ((u32 *)tp->hw_stats)[1],
7407                ((u32 *)tp->hw_stats)[2],
7408                ((u32 *)tp->hw_stats)[3]);
7409
7410         /* Mailboxes */
7411         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7412                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7413                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7414                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7415                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7416
7417         /* NIC side send descriptors. */
7418         for (i = 0; i < 6; i++) {
7419                 unsigned long txd;
7420
7421                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7422                         + (i * sizeof(struct tg3_tx_buffer_desc));
7423                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7424                        i,
7425                        readl(txd + 0x0), readl(txd + 0x4),
7426                        readl(txd + 0x8), readl(txd + 0xc));
7427         }
7428
7429         /* NIC side RX descriptors. */
7430         for (i = 0; i < 6; i++) {
7431                 unsigned long rxd;
7432
7433                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7434                         + (i * sizeof(struct tg3_rx_buffer_desc));
7435                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7436                        i,
7437                        readl(rxd + 0x0), readl(rxd + 0x4),
7438                        readl(rxd + 0x8), readl(rxd + 0xc));
7439                 rxd += (4 * sizeof(u32));
7440                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7441                        i,
7442                        readl(rxd + 0x0), readl(rxd + 0x4),
7443                        readl(rxd + 0x8), readl(rxd + 0xc));
7444         }
7445
7446         for (i = 0; i < 6; i++) {
7447                 unsigned long rxd;
7448
7449                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7450                         + (i * sizeof(struct tg3_rx_buffer_desc));
7451                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7452                        i,
7453                        readl(rxd + 0x0), readl(rxd + 0x4),
7454                        readl(rxd + 0x8), readl(rxd + 0xc));
7455                 rxd += (4 * sizeof(u32));
7456                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7457                        i,
7458                        readl(rxd + 0x0), readl(rxd + 0x4),
7459                        readl(rxd + 0x8), readl(rxd + 0xc));
7460         }
7461 }
7462 #endif
7463
7464 static struct net_device_stats *tg3_get_stats(struct net_device *);
7465 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7466
7467 static int tg3_close(struct net_device *dev)
7468 {
7469         struct tg3 *tp = netdev_priv(dev);
7470
7471         napi_disable(&tp->napi);
7472         cancel_work_sync(&tp->reset_task);
7473
7474         netif_stop_queue(dev);
7475
7476         del_timer_sync(&tp->timer);
7477
7478         tg3_full_lock(tp, 1);
7479 #if 0
7480         tg3_dump_state(tp);
7481 #endif
7482
7483         tg3_disable_ints(tp);
7484
7485         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7486         tg3_free_rings(tp);
7487         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7488
7489         tg3_full_unlock(tp);
7490
7491         free_irq(tp->pdev->irq, dev);
7492         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7493                 pci_disable_msi(tp->pdev);
7494                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7495         }
7496
7497         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7498                sizeof(tp->net_stats_prev));
7499         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7500                sizeof(tp->estats_prev));
7501
7502         tg3_free_consistent(tp);
7503
7504         tg3_set_power_state(tp, PCI_D3hot);
7505
7506         netif_carrier_off(tp->dev);
7507
7508         return 0;
7509 }
7510
7511 static inline unsigned long get_stat64(tg3_stat64_t *val)
7512 {
7513         unsigned long ret;
7514
7515 #if (BITS_PER_LONG == 32)
7516         ret = val->low;
7517 #else
7518         ret = ((u64)val->high << 32) | ((u64)val->low);
7519 #endif
7520         return ret;
7521 }
7522
7523 static unsigned long calc_crc_errors(struct tg3 *tp)
7524 {
7525         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7526
7527         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7528             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7529              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7530                 u32 val;
7531
7532                 spin_lock_bh(&tp->lock);
7533                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7534                         tg3_writephy(tp, MII_TG3_TEST1,
7535                                      val | MII_TG3_TEST1_CRC_EN);
7536                         tg3_readphy(tp, 0x14, &val);
7537                 } else
7538                         val = 0;
7539                 spin_unlock_bh(&tp->lock);
7540
7541                 tp->phy_crc_errors += val;
7542
7543                 return tp->phy_crc_errors;
7544         }
7545
7546         return get_stat64(&hw_stats->rx_fcs_errors);
7547 }
7548
7549 #define ESTAT_ADD(member) \
7550         estats->member =        old_estats->member + \
7551                                 get_stat64(&hw_stats->member)
7552
7553 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7554 {
7555         struct tg3_ethtool_stats *estats = &tp->estats;
7556         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7557         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7558
7559         if (!hw_stats)
7560                 return old_estats;
7561
7562         ESTAT_ADD(rx_octets);
7563         ESTAT_ADD(rx_fragments);
7564         ESTAT_ADD(rx_ucast_packets);
7565         ESTAT_ADD(rx_mcast_packets);
7566         ESTAT_ADD(rx_bcast_packets);
7567         ESTAT_ADD(rx_fcs_errors);
7568         ESTAT_ADD(rx_align_errors);
7569         ESTAT_ADD(rx_xon_pause_rcvd);
7570         ESTAT_ADD(rx_xoff_pause_rcvd);
7571         ESTAT_ADD(rx_mac_ctrl_rcvd);
7572         ESTAT_ADD(rx_xoff_entered);
7573         ESTAT_ADD(rx_frame_too_long_errors);
7574         ESTAT_ADD(rx_jabbers);
7575         ESTAT_ADD(rx_undersize_packets);
7576         ESTAT_ADD(rx_in_length_errors);
7577         ESTAT_ADD(rx_out_length_errors);
7578         ESTAT_ADD(rx_64_or_less_octet_packets);
7579         ESTAT_ADD(rx_65_to_127_octet_packets);
7580         ESTAT_ADD(rx_128_to_255_octet_packets);
7581         ESTAT_ADD(rx_256_to_511_octet_packets);
7582         ESTAT_ADD(rx_512_to_1023_octet_packets);
7583         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7584         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7585         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7586         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7587         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7588
7589         ESTAT_ADD(tx_octets);
7590         ESTAT_ADD(tx_collisions);
7591         ESTAT_ADD(tx_xon_sent);
7592         ESTAT_ADD(tx_xoff_sent);
7593         ESTAT_ADD(tx_flow_control);
7594         ESTAT_ADD(tx_mac_errors);
7595         ESTAT_ADD(tx_single_collisions);
7596         ESTAT_ADD(tx_mult_collisions);
7597         ESTAT_ADD(tx_deferred);
7598         ESTAT_ADD(tx_excessive_collisions);
7599         ESTAT_ADD(tx_late_collisions);
7600         ESTAT_ADD(tx_collide_2times);
7601         ESTAT_ADD(tx_collide_3times);
7602         ESTAT_ADD(tx_collide_4times);
7603         ESTAT_ADD(tx_collide_5times);
7604         ESTAT_ADD(tx_collide_6times);
7605         ESTAT_ADD(tx_collide_7times);
7606         ESTAT_ADD(tx_collide_8times);
7607         ESTAT_ADD(tx_collide_9times);
7608         ESTAT_ADD(tx_collide_10times);
7609         ESTAT_ADD(tx_collide_11times);
7610         ESTAT_ADD(tx_collide_12times);
7611         ESTAT_ADD(tx_collide_13times);
7612         ESTAT_ADD(tx_collide_14times);
7613         ESTAT_ADD(tx_collide_15times);
7614         ESTAT_ADD(tx_ucast_packets);
7615         ESTAT_ADD(tx_mcast_packets);
7616         ESTAT_ADD(tx_bcast_packets);
7617         ESTAT_ADD(tx_carrier_sense_errors);
7618         ESTAT_ADD(tx_discards);
7619         ESTAT_ADD(tx_errors);
7620
7621         ESTAT_ADD(dma_writeq_full);
7622         ESTAT_ADD(dma_write_prioq_full);
7623         ESTAT_ADD(rxbds_empty);
7624         ESTAT_ADD(rx_discards);
7625         ESTAT_ADD(rx_errors);
7626         ESTAT_ADD(rx_threshold_hit);
7627
7628         ESTAT_ADD(dma_readq_full);
7629         ESTAT_ADD(dma_read_prioq_full);
7630         ESTAT_ADD(tx_comp_queue_full);
7631
7632         ESTAT_ADD(ring_set_send_prod_index);
7633         ESTAT_ADD(ring_status_update);
7634         ESTAT_ADD(nic_irqs);
7635         ESTAT_ADD(nic_avoided_irqs);
7636         ESTAT_ADD(nic_tx_threshold_hit);
7637
7638         return estats;
7639 }
7640
7641 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7642 {
7643         struct tg3 *tp = netdev_priv(dev);
7644         struct net_device_stats *stats = &tp->net_stats;
7645         struct net_device_stats *old_stats = &tp->net_stats_prev;
7646         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7647
7648         if (!hw_stats)
7649                 return old_stats;
7650
7651         stats->rx_packets = old_stats->rx_packets +
7652                 get_stat64(&hw_stats->rx_ucast_packets) +
7653                 get_stat64(&hw_stats->rx_mcast_packets) +
7654                 get_stat64(&hw_stats->rx_bcast_packets);
7655
7656         stats->tx_packets = old_stats->tx_packets +
7657                 get_stat64(&hw_stats->tx_ucast_packets) +
7658                 get_stat64(&hw_stats->tx_mcast_packets) +
7659                 get_stat64(&hw_stats->tx_bcast_packets);
7660
7661         stats->rx_bytes = old_stats->rx_bytes +
7662                 get_stat64(&hw_stats->rx_octets);
7663         stats->tx_bytes = old_stats->tx_bytes +
7664                 get_stat64(&hw_stats->tx_octets);
7665
7666         stats->rx_errors = old_stats->rx_errors +
7667                 get_stat64(&hw_stats->rx_errors);
7668         stats->tx_errors = old_stats->tx_errors +
7669                 get_stat64(&hw_stats->tx_errors) +
7670                 get_stat64(&hw_stats->tx_mac_errors) +
7671                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7672                 get_stat64(&hw_stats->tx_discards);
7673
7674         stats->multicast = old_stats->multicast +
7675                 get_stat64(&hw_stats->rx_mcast_packets);
7676         stats->collisions = old_stats->collisions +
7677                 get_stat64(&hw_stats->tx_collisions);
7678
7679         stats->rx_length_errors = old_stats->rx_length_errors +
7680                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7681                 get_stat64(&hw_stats->rx_undersize_packets);
7682
7683         stats->rx_over_errors = old_stats->rx_over_errors +
7684                 get_stat64(&hw_stats->rxbds_empty);
7685         stats->rx_frame_errors = old_stats->rx_frame_errors +
7686                 get_stat64(&hw_stats->rx_align_errors);
7687         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7688                 get_stat64(&hw_stats->tx_discards);
7689         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7690                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7691
7692         stats->rx_crc_errors = old_stats->rx_crc_errors +
7693                 calc_crc_errors(tp);
7694
7695         stats->rx_missed_errors = old_stats->rx_missed_errors +
7696                 get_stat64(&hw_stats->rx_discards);
7697
7698         return stats;
7699 }
7700
7701 static inline u32 calc_crc(unsigned char *buf, int len)
7702 {
7703         u32 reg;
7704         u32 tmp;
7705         int j, k;
7706
7707         reg = 0xffffffff;
7708
7709         for (j = 0; j < len; j++) {
7710                 reg ^= buf[j];
7711
7712                 for (k = 0; k < 8; k++) {
7713                         tmp = reg & 0x01;
7714
7715                         reg >>= 1;
7716
7717                         if (tmp) {
7718                                 reg ^= 0xedb88320;
7719                         }
7720                 }
7721         }
7722
7723         return ~reg;
7724 }
7725
7726 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7727 {
7728         /* accept or reject all multicast frames */
7729         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7730         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7731         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7732         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7733 }
7734
7735 static void __tg3_set_rx_mode(struct net_device *dev)
7736 {
7737         struct tg3 *tp = netdev_priv(dev);
7738         u32 rx_mode;
7739
7740         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7741                                   RX_MODE_KEEP_VLAN_TAG);
7742
7743         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7744          * flag clear.
7745          */
7746 #if TG3_VLAN_TAG_USED
7747         if (!tp->vlgrp &&
7748             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7749                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7750 #else
7751         /* By definition, VLAN is disabled always in this
7752          * case.
7753          */
7754         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7755                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7756 #endif
7757
7758         if (dev->flags & IFF_PROMISC) {
7759                 /* Promiscuous mode. */
7760                 rx_mode |= RX_MODE_PROMISC;
7761         } else if (dev->flags & IFF_ALLMULTI) {
7762                 /* Accept all multicast. */
7763                 tg3_set_multi (tp, 1);
7764         } else if (dev->mc_count < 1) {
7765                 /* Reject all multicast. */
7766                 tg3_set_multi (tp, 0);
7767         } else {
7768                 /* Accept one or more multicast(s). */
7769                 struct dev_mc_list *mclist;
7770                 unsigned int i;
7771                 u32 mc_filter[4] = { 0, };
7772                 u32 regidx;
7773                 u32 bit;
7774                 u32 crc;
7775
7776                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7777                      i++, mclist = mclist->next) {
7778
7779                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7780                         bit = ~crc & 0x7f;
7781                         regidx = (bit & 0x60) >> 5;
7782                         bit &= 0x1f;
7783                         mc_filter[regidx] |= (1 << bit);
7784                 }
7785
7786                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7787                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7788                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7789                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7790         }
7791
7792         if (rx_mode != tp->rx_mode) {
7793                 tp->rx_mode = rx_mode;
7794                 tw32_f(MAC_RX_MODE, rx_mode);
7795                 udelay(10);
7796         }
7797 }
7798
7799 static void tg3_set_rx_mode(struct net_device *dev)
7800 {
7801         struct tg3 *tp = netdev_priv(dev);
7802
7803         if (!netif_running(dev))
7804                 return;
7805
7806         tg3_full_lock(tp, 0);
7807         __tg3_set_rx_mode(dev);
7808         tg3_full_unlock(tp);
7809 }
7810
7811 #define TG3_REGDUMP_LEN         (32 * 1024)
7812
7813 static int tg3_get_regs_len(struct net_device *dev)
7814 {
7815         return TG3_REGDUMP_LEN;
7816 }
7817
7818 static void tg3_get_regs(struct net_device *dev,
7819                 struct ethtool_regs *regs, void *_p)
7820 {
7821         u32 *p = _p;
7822         struct tg3 *tp = netdev_priv(dev);
7823         u8 *orig_p = _p;
7824         int i;
7825
7826         regs->version = 0;
7827
7828         memset(p, 0, TG3_REGDUMP_LEN);
7829
7830         if (tp->link_config.phy_is_low_power)
7831                 return;
7832
7833         tg3_full_lock(tp, 0);
7834
7835 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7836 #define GET_REG32_LOOP(base,len)                \
7837 do {    p = (u32 *)(orig_p + (base));           \
7838         for (i = 0; i < len; i += 4)            \
7839                 __GET_REG32((base) + i);        \
7840 } while (0)
7841 #define GET_REG32_1(reg)                        \
7842 do {    p = (u32 *)(orig_p + (reg));            \
7843         __GET_REG32((reg));                     \
7844 } while (0)
7845
7846         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7847         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7848         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7849         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7850         GET_REG32_1(SNDDATAC_MODE);
7851         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7852         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7853         GET_REG32_1(SNDBDC_MODE);
7854         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7855         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7856         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7857         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7858         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7859         GET_REG32_1(RCVDCC_MODE);
7860         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7861         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7862         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7863         GET_REG32_1(MBFREE_MODE);
7864         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7865         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7866         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7867         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7868         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7869         GET_REG32_1(RX_CPU_MODE);
7870         GET_REG32_1(RX_CPU_STATE);
7871         GET_REG32_1(RX_CPU_PGMCTR);
7872         GET_REG32_1(RX_CPU_HWBKPT);
7873         GET_REG32_1(TX_CPU_MODE);
7874         GET_REG32_1(TX_CPU_STATE);
7875         GET_REG32_1(TX_CPU_PGMCTR);
7876         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7877         GET_REG32_LOOP(FTQ_RESET, 0x120);
7878         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7879         GET_REG32_1(DMAC_MODE);
7880         GET_REG32_LOOP(GRC_MODE, 0x4c);
7881         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7882                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7883
7884 #undef __GET_REG32
7885 #undef GET_REG32_LOOP
7886 #undef GET_REG32_1
7887
7888         tg3_full_unlock(tp);
7889 }
7890
7891 static int tg3_get_eeprom_len(struct net_device *dev)
7892 {
7893         struct tg3 *tp = netdev_priv(dev);
7894
7895         return tp->nvram_size;
7896 }
7897
7898 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7899 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7900
7901 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904         int ret;
7905         u8  *pd;
7906         u32 i, offset, len, val, b_offset, b_count;
7907
7908         if (tp->link_config.phy_is_low_power)
7909                 return -EAGAIN;
7910
7911         offset = eeprom->offset;
7912         len = eeprom->len;
7913         eeprom->len = 0;
7914
7915         eeprom->magic = TG3_EEPROM_MAGIC;
7916
7917         if (offset & 3) {
7918                 /* adjustments to start on required 4 byte boundary */
7919                 b_offset = offset & 3;
7920                 b_count = 4 - b_offset;
7921                 if (b_count > len) {
7922                         /* i.e. offset=1 len=2 */
7923                         b_count = len;
7924                 }
7925                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7926                 if (ret)
7927                         return ret;
7928                 val = cpu_to_le32(val);
7929                 memcpy(data, ((char*)&val) + b_offset, b_count);
7930                 len -= b_count;
7931                 offset += b_count;
7932                 eeprom->len += b_count;
7933         }
7934
7935         /* read bytes upto the last 4 byte boundary */
7936         pd = &data[eeprom->len];
7937         for (i = 0; i < (len - (len & 3)); i += 4) {
7938                 ret = tg3_nvram_read(tp, offset + i, &val);
7939                 if (ret) {
7940                         eeprom->len += i;
7941                         return ret;
7942                 }
7943                 val = cpu_to_le32(val);
7944                 memcpy(pd + i, &val, 4);
7945         }
7946         eeprom->len += i;
7947
7948         if (len & 3) {
7949                 /* read last bytes not ending on 4 byte boundary */
7950                 pd = &data[eeprom->len];
7951                 b_count = len & 3;
7952                 b_offset = offset + len - b_count;
7953                 ret = tg3_nvram_read(tp, b_offset, &val);
7954                 if (ret)
7955                         return ret;
7956                 val = cpu_to_le32(val);
7957                 memcpy(pd, ((char*)&val), b_count);
7958                 eeprom->len += b_count;
7959         }
7960         return 0;
7961 }
7962
7963 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7964
7965 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7966 {
7967         struct tg3 *tp = netdev_priv(dev);
7968         int ret;
7969         u32 offset, len, b_offset, odd_len, start, end;
7970         u8 *buf;
7971
7972         if (tp->link_config.phy_is_low_power)
7973                 return -EAGAIN;
7974
7975         if (eeprom->magic != TG3_EEPROM_MAGIC)
7976                 return -EINVAL;
7977
7978         offset = eeprom->offset;
7979         len = eeprom->len;
7980
7981         if ((b_offset = (offset & 3))) {
7982                 /* adjustments to start on required 4 byte boundary */
7983                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7984                 if (ret)
7985                         return ret;
7986                 start = cpu_to_le32(start);
7987                 len += b_offset;
7988                 offset &= ~3;
7989                 if (len < 4)
7990                         len = 4;
7991         }
7992
7993         odd_len = 0;
7994         if (len & 3) {
7995                 /* adjustments to end on required 4 byte boundary */
7996                 odd_len = 1;
7997                 len = (len + 3) & ~3;
7998                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7999                 if (ret)
8000                         return ret;
8001                 end = cpu_to_le32(end);
8002         }
8003
8004         buf = data;
8005         if (b_offset || odd_len) {
8006                 buf = kmalloc(len, GFP_KERNEL);
8007                 if (!buf)
8008                         return -ENOMEM;
8009                 if (b_offset)
8010                         memcpy(buf, &start, 4);
8011                 if (odd_len)
8012                         memcpy(buf+len-4, &end, 4);
8013                 memcpy(buf + b_offset, data, eeprom->len);
8014         }
8015
8016         ret = tg3_nvram_write_block(tp, offset, len, buf);
8017
8018         if (buf != data)
8019                 kfree(buf);
8020
8021         return ret;
8022 }
8023
8024 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8025 {
8026         struct tg3 *tp = netdev_priv(dev);
8027
8028         cmd->supported = (SUPPORTED_Autoneg);
8029
8030         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8031                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8032                                    SUPPORTED_1000baseT_Full);
8033
8034         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8035                 cmd->supported |= (SUPPORTED_100baseT_Half |
8036                                   SUPPORTED_100baseT_Full |
8037                                   SUPPORTED_10baseT_Half |
8038                                   SUPPORTED_10baseT_Full |
8039                                   SUPPORTED_MII);
8040                 cmd->port = PORT_TP;
8041         } else {
8042                 cmd->supported |= SUPPORTED_FIBRE;
8043                 cmd->port = PORT_FIBRE;
8044         }
8045
8046         cmd->advertising = tp->link_config.advertising;
8047         if (netif_running(dev)) {
8048                 cmd->speed = tp->link_config.active_speed;
8049                 cmd->duplex = tp->link_config.active_duplex;
8050         }
8051         cmd->phy_address = PHY_ADDR;
8052         cmd->transceiver = 0;
8053         cmd->autoneg = tp->link_config.autoneg;
8054         cmd->maxtxpkt = 0;
8055         cmd->maxrxpkt = 0;
8056         return 0;
8057 }
8058
8059 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8060 {
8061         struct tg3 *tp = netdev_priv(dev);
8062
8063         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8064                 /* These are the only valid advertisement bits allowed.  */
8065                 if (cmd->autoneg == AUTONEG_ENABLE &&
8066                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8067                                           ADVERTISED_1000baseT_Full |
8068                                           ADVERTISED_Autoneg |
8069                                           ADVERTISED_FIBRE)))
8070                         return -EINVAL;
8071                 /* Fiber can only do SPEED_1000.  */
8072                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8073                          (cmd->speed != SPEED_1000))
8074                         return -EINVAL;
8075         /* Copper cannot force SPEED_1000.  */
8076         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8077                    (cmd->speed == SPEED_1000))
8078                 return -EINVAL;
8079         else if ((cmd->speed == SPEED_1000) &&
8080                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8081                 return -EINVAL;
8082
8083         tg3_full_lock(tp, 0);
8084
8085         tp->link_config.autoneg = cmd->autoneg;
8086         if (cmd->autoneg == AUTONEG_ENABLE) {
8087                 tp->link_config.advertising = cmd->advertising;
8088                 tp->link_config.speed = SPEED_INVALID;
8089                 tp->link_config.duplex = DUPLEX_INVALID;
8090         } else {
8091                 tp->link_config.advertising = 0;
8092                 tp->link_config.speed = cmd->speed;
8093                 tp->link_config.duplex = cmd->duplex;
8094         }
8095
8096         tp->link_config.orig_speed = tp->link_config.speed;
8097         tp->link_config.orig_duplex = tp->link_config.duplex;
8098         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8099
8100         if (netif_running(dev))
8101                 tg3_setup_phy(tp, 1);
8102
8103         tg3_full_unlock(tp);
8104
8105         return 0;
8106 }
8107
8108 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8109 {
8110         struct tg3 *tp = netdev_priv(dev);
8111
8112         strcpy(info->driver, DRV_MODULE_NAME);
8113         strcpy(info->version, DRV_MODULE_VERSION);
8114         strcpy(info->fw_version, tp->fw_ver);
8115         strcpy(info->bus_info, pci_name(tp->pdev));
8116 }
8117
8118 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8119 {
8120         struct tg3 *tp = netdev_priv(dev);
8121
8122         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8123                 wol->supported = WAKE_MAGIC;
8124         else
8125                 wol->supported = 0;
8126         wol->wolopts = 0;
8127         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8128                 wol->wolopts = WAKE_MAGIC;
8129         memset(&wol->sopass, 0, sizeof(wol->sopass));
8130 }
8131
8132 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8133 {
8134         struct tg3 *tp = netdev_priv(dev);
8135
8136         if (wol->wolopts & ~WAKE_MAGIC)
8137                 return -EINVAL;
8138         if ((wol->wolopts & WAKE_MAGIC) &&
8139             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8140                 return -EINVAL;
8141
8142         spin_lock_bh(&tp->lock);
8143         if (wol->wolopts & WAKE_MAGIC)
8144                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8145         else
8146                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8147         spin_unlock_bh(&tp->lock);
8148
8149         return 0;
8150 }
8151
8152 static u32 tg3_get_msglevel(struct net_device *dev)
8153 {
8154         struct tg3 *tp = netdev_priv(dev);
8155         return tp->msg_enable;
8156 }
8157
8158 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8159 {
8160         struct tg3 *tp = netdev_priv(dev);
8161         tp->msg_enable = value;
8162 }
8163
8164 static int tg3_set_tso(struct net_device *dev, u32 value)
8165 {
8166         struct tg3 *tp = netdev_priv(dev);
8167
8168         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8169                 if (value)
8170                         return -EINVAL;
8171                 return 0;
8172         }
8173         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8174             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8175                 if (value)
8176                         dev->features |= NETIF_F_TSO6;
8177                 else
8178                         dev->features &= ~NETIF_F_TSO6;
8179         }
8180         return ethtool_op_set_tso(dev, value);
8181 }
8182
8183 static int tg3_nway_reset(struct net_device *dev)
8184 {
8185         struct tg3 *tp = netdev_priv(dev);
8186         u32 bmcr;
8187         int r;
8188
8189         if (!netif_running(dev))
8190                 return -EAGAIN;
8191
8192         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8193                 return -EINVAL;
8194
8195         spin_lock_bh(&tp->lock);
8196         r = -EINVAL;
8197         tg3_readphy(tp, MII_BMCR, &bmcr);
8198         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8199             ((bmcr & BMCR_ANENABLE) ||
8200              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8201                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8202                                            BMCR_ANENABLE);
8203                 r = 0;
8204         }
8205         spin_unlock_bh(&tp->lock);
8206
8207         return r;
8208 }
8209
8210 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8211 {
8212         struct tg3 *tp = netdev_priv(dev);
8213
8214         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8215         ering->rx_mini_max_pending = 0;
8216         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8217                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8218         else
8219                 ering->rx_jumbo_max_pending = 0;
8220
8221         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8222
8223         ering->rx_pending = tp->rx_pending;
8224         ering->rx_mini_pending = 0;
8225         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8226                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8227         else
8228                 ering->rx_jumbo_pending = 0;
8229
8230         ering->tx_pending = tp->tx_pending;
8231 }
8232
8233 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8234 {
8235         struct tg3 *tp = netdev_priv(dev);
8236         int irq_sync = 0, err = 0;
8237
8238         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8239             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8240             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8241             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8242             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8243              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8244                 return -EINVAL;
8245
8246         if (netif_running(dev)) {
8247                 tg3_netif_stop(tp);
8248                 irq_sync = 1;
8249         }
8250
8251         tg3_full_lock(tp, irq_sync);
8252
8253         tp->rx_pending = ering->rx_pending;
8254
8255         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8256             tp->rx_pending > 63)
8257                 tp->rx_pending = 63;
8258         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8259         tp->tx_pending = ering->tx_pending;
8260
8261         if (netif_running(dev)) {
8262                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8263                 err = tg3_restart_hw(tp, 1);
8264                 if (!err)
8265                         tg3_netif_start(tp);
8266         }
8267
8268         tg3_full_unlock(tp);
8269
8270         return err;
8271 }
8272
8273 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8274 {
8275         struct tg3 *tp = netdev_priv(dev);
8276
8277         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8278         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8279         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8280 }
8281
8282 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8283 {
8284         struct tg3 *tp = netdev_priv(dev);
8285         int irq_sync = 0, err = 0;
8286
8287         if (netif_running(dev)) {
8288                 tg3_netif_stop(tp);
8289                 irq_sync = 1;
8290         }
8291
8292         tg3_full_lock(tp, irq_sync);
8293
8294         if (epause->autoneg)
8295                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8296         else
8297                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8298         if (epause->rx_pause)
8299                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8300         else
8301                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8302         if (epause->tx_pause)
8303                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8304         else
8305                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8306
8307         if (netif_running(dev)) {
8308                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8309                 err = tg3_restart_hw(tp, 1);
8310                 if (!err)
8311                         tg3_netif_start(tp);
8312         }
8313
8314         tg3_full_unlock(tp);
8315
8316         return err;
8317 }
8318
8319 static u32 tg3_get_rx_csum(struct net_device *dev)
8320 {
8321         struct tg3 *tp = netdev_priv(dev);
8322         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8323 }
8324
8325 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8326 {
8327         struct tg3 *tp = netdev_priv(dev);
8328
8329         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8330                 if (data != 0)
8331                         return -EINVAL;
8332                 return 0;
8333         }
8334
8335         spin_lock_bh(&tp->lock);
8336         if (data)
8337                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8338         else
8339                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8340         spin_unlock_bh(&tp->lock);
8341
8342         return 0;
8343 }
8344
8345 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8346 {
8347         struct tg3 *tp = netdev_priv(dev);
8348
8349         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8350                 if (data != 0)
8351                         return -EINVAL;
8352                 return 0;
8353         }
8354
8355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8356             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8357                 ethtool_op_set_tx_ipv6_csum(dev, data);
8358         else
8359                 ethtool_op_set_tx_csum(dev, data);
8360
8361         return 0;
8362 }
8363
8364 static int tg3_get_sset_count (struct net_device *dev, int sset)
8365 {
8366         switch (sset) {
8367         case ETH_SS_TEST:
8368                 return TG3_NUM_TEST;
8369         case ETH_SS_STATS:
8370                 return TG3_NUM_STATS;
8371         default:
8372                 return -EOPNOTSUPP;
8373         }
8374 }
8375
8376 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8377 {
8378         switch (stringset) {
8379         case ETH_SS_STATS:
8380                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8381                 break;
8382         case ETH_SS_TEST:
8383                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8384                 break;
8385         default:
8386                 WARN_ON(1);     /* we need a WARN() */
8387                 break;
8388         }
8389 }
8390
8391 static int tg3_phys_id(struct net_device *dev, u32 data)
8392 {
8393         struct tg3 *tp = netdev_priv(dev);
8394         int i;
8395
8396         if (!netif_running(tp->dev))
8397                 return -EAGAIN;
8398
8399         if (data == 0)
8400                 data = 2;
8401
8402         for (i = 0; i < (data * 2); i++) {
8403                 if ((i % 2) == 0)
8404                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8405                                            LED_CTRL_1000MBPS_ON |
8406                                            LED_CTRL_100MBPS_ON |
8407                                            LED_CTRL_10MBPS_ON |
8408                                            LED_CTRL_TRAFFIC_OVERRIDE |
8409                                            LED_CTRL_TRAFFIC_BLINK |
8410                                            LED_CTRL_TRAFFIC_LED);
8411
8412                 else
8413                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8414                                            LED_CTRL_TRAFFIC_OVERRIDE);
8415
8416                 if (msleep_interruptible(500))
8417                         break;
8418         }
8419         tw32(MAC_LED_CTRL, tp->led_ctrl);
8420         return 0;
8421 }
8422
8423 static void tg3_get_ethtool_stats (struct net_device *dev,
8424                                    struct ethtool_stats *estats, u64 *tmp_stats)
8425 {
8426         struct tg3 *tp = netdev_priv(dev);
8427         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8428 }
8429
8430 #define NVRAM_TEST_SIZE 0x100
8431 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8432 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8433 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8434
8435 static int tg3_test_nvram(struct tg3 *tp)
8436 {
8437         u32 *buf, csum, magic;
8438         int i, j, k, err = 0, size;
8439
8440         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8441                 return -EIO;
8442
8443         if (magic == TG3_EEPROM_MAGIC)
8444                 size = NVRAM_TEST_SIZE;
8445         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8446                 if ((magic & 0xe00000) == 0x200000)
8447                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8448                 else
8449                         return 0;
8450         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8451                 size = NVRAM_SELFBOOT_HW_SIZE;
8452         else
8453                 return -EIO;
8454
8455         buf = kmalloc(size, GFP_KERNEL);
8456         if (buf == NULL)
8457                 return -ENOMEM;
8458
8459         err = -EIO;
8460         for (i = 0, j = 0; i < size; i += 4, j++) {
8461                 u32 val;
8462
8463                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8464                         break;
8465                 buf[j] = cpu_to_le32(val);
8466         }
8467         if (i < size)
8468                 goto out;
8469
8470         /* Selfboot format */
8471         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8472             TG3_EEPROM_MAGIC_FW) {
8473                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8474
8475                 for (i = 0; i < size; i++)
8476                         csum8 += buf8[i];
8477
8478                 if (csum8 == 0) {
8479                         err = 0;
8480                         goto out;
8481                 }
8482
8483                 err = -EIO;
8484                 goto out;
8485         }
8486
8487         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8488             TG3_EEPROM_MAGIC_HW) {
8489                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8490                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8491                 u8 *buf8 = (u8 *) buf;
8492
8493                 /* Separate the parity bits and the data bytes.  */
8494                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8495                         if ((i == 0) || (i == 8)) {
8496                                 int l;
8497                                 u8 msk;
8498
8499                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8500                                         parity[k++] = buf8[i] & msk;
8501                                 i++;
8502                         }
8503                         else if (i == 16) {
8504                                 int l;
8505                                 u8 msk;
8506
8507                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8508                                         parity[k++] = buf8[i] & msk;
8509                                 i++;
8510
8511                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8512                                         parity[k++] = buf8[i] & msk;
8513                                 i++;
8514                         }
8515                         data[j++] = buf8[i];
8516                 }
8517
8518                 err = -EIO;
8519                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8520                         u8 hw8 = hweight8(data[i]);
8521
8522                         if ((hw8 & 0x1) && parity[i])
8523                                 goto out;
8524                         else if (!(hw8 & 0x1) && !parity[i])
8525                                 goto out;
8526                 }
8527                 err = 0;
8528                 goto out;
8529         }
8530
8531         /* Bootstrap checksum at offset 0x10 */
8532         csum = calc_crc((unsigned char *) buf, 0x10);
8533         if(csum != cpu_to_le32(buf[0x10/4]))
8534                 goto out;
8535
8536         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8537         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8538         if (csum != cpu_to_le32(buf[0xfc/4]))
8539                  goto out;
8540
8541         err = 0;
8542
8543 out:
8544         kfree(buf);
8545         return err;
8546 }
8547
8548 #define TG3_SERDES_TIMEOUT_SEC  2
8549 #define TG3_COPPER_TIMEOUT_SEC  6
8550
8551 static int tg3_test_link(struct tg3 *tp)
8552 {
8553         int i, max;
8554
8555         if (!netif_running(tp->dev))
8556                 return -ENODEV;
8557
8558         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8559                 max = TG3_SERDES_TIMEOUT_SEC;
8560         else
8561                 max = TG3_COPPER_TIMEOUT_SEC;
8562
8563         for (i = 0; i < max; i++) {
8564                 if (netif_carrier_ok(tp->dev))
8565                         return 0;
8566
8567                 if (msleep_interruptible(1000))
8568                         break;
8569         }
8570
8571         return -EIO;
8572 }
8573
8574 /* Only test the commonly used registers */
8575 static int tg3_test_registers(struct tg3 *tp)
8576 {
8577         int i, is_5705, is_5750;
8578         u32 offset, read_mask, write_mask, val, save_val, read_val;
8579         static struct {
8580                 u16 offset;
8581                 u16 flags;
8582 #define TG3_FL_5705     0x1
8583 #define TG3_FL_NOT_5705 0x2
8584 #define TG3_FL_NOT_5788 0x4
8585 #define TG3_FL_NOT_5750 0x8
8586                 u32 read_mask;
8587                 u32 write_mask;
8588         } reg_tbl[] = {
8589                 /* MAC Control Registers */
8590                 { MAC_MODE, TG3_FL_NOT_5705,
8591                         0x00000000, 0x00ef6f8c },
8592                 { MAC_MODE, TG3_FL_5705,
8593                         0x00000000, 0x01ef6b8c },
8594                 { MAC_STATUS, TG3_FL_NOT_5705,
8595                         0x03800107, 0x00000000 },
8596                 { MAC_STATUS, TG3_FL_5705,
8597                         0x03800100, 0x00000000 },
8598                 { MAC_ADDR_0_HIGH, 0x0000,
8599                         0x00000000, 0x0000ffff },
8600                 { MAC_ADDR_0_LOW, 0x0000,
8601                         0x00000000, 0xffffffff },
8602                 { MAC_RX_MTU_SIZE, 0x0000,
8603                         0x00000000, 0x0000ffff },
8604                 { MAC_TX_MODE, 0x0000,
8605                         0x00000000, 0x00000070 },
8606                 { MAC_TX_LENGTHS, 0x0000,
8607                         0x00000000, 0x00003fff },
8608                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8609                         0x00000000, 0x000007fc },
8610                 { MAC_RX_MODE, TG3_FL_5705,
8611                         0x00000000, 0x000007dc },
8612                 { MAC_HASH_REG_0, 0x0000,
8613                         0x00000000, 0xffffffff },
8614                 { MAC_HASH_REG_1, 0x0000,
8615                         0x00000000, 0xffffffff },
8616                 { MAC_HASH_REG_2, 0x0000,
8617                         0x00000000, 0xffffffff },
8618                 { MAC_HASH_REG_3, 0x0000,
8619                         0x00000000, 0xffffffff },
8620
8621                 /* Receive Data and Receive BD Initiator Control Registers. */
8622                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8623                         0x00000000, 0xffffffff },
8624                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8625                         0x00000000, 0xffffffff },
8626                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8627                         0x00000000, 0x00000003 },
8628                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8629                         0x00000000, 0xffffffff },
8630                 { RCVDBDI_STD_BD+0, 0x0000,
8631                         0x00000000, 0xffffffff },
8632                 { RCVDBDI_STD_BD+4, 0x0000,
8633                         0x00000000, 0xffffffff },
8634                 { RCVDBDI_STD_BD+8, 0x0000,
8635                         0x00000000, 0xffff0002 },
8636                 { RCVDBDI_STD_BD+0xc, 0x0000,
8637                         0x00000000, 0xffffffff },
8638
8639                 /* Receive BD Initiator Control Registers. */
8640                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8641                         0x00000000, 0xffffffff },
8642                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8643                         0x00000000, 0x000003ff },
8644                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8645                         0x00000000, 0xffffffff },
8646
8647                 /* Host Coalescing Control Registers. */
8648                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8649                         0x00000000, 0x00000004 },
8650                 { HOSTCC_MODE, TG3_FL_5705,
8651                         0x00000000, 0x000000f6 },
8652                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8653                         0x00000000, 0xffffffff },
8654                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8655                         0x00000000, 0x000003ff },
8656                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8657                         0x00000000, 0xffffffff },
8658                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8659                         0x00000000, 0x000003ff },
8660                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8661                         0x00000000, 0xffffffff },
8662                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8663                         0x00000000, 0x000000ff },
8664                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8665                         0x00000000, 0xffffffff },
8666                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8667                         0x00000000, 0x000000ff },
8668                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8669                         0x00000000, 0xffffffff },
8670                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8671                         0x00000000, 0xffffffff },
8672                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8673                         0x00000000, 0xffffffff },
8674                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8675                         0x00000000, 0x000000ff },
8676                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8677                         0x00000000, 0xffffffff },
8678                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8679                         0x00000000, 0x000000ff },
8680                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8681                         0x00000000, 0xffffffff },
8682                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8683                         0x00000000, 0xffffffff },
8684                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8685                         0x00000000, 0xffffffff },
8686                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8687                         0x00000000, 0xffffffff },
8688                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8689                         0x00000000, 0xffffffff },
8690                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8691                         0xffffffff, 0x00000000 },
8692                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8693                         0xffffffff, 0x00000000 },
8694
8695                 /* Buffer Manager Control Registers. */
8696                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8697                         0x00000000, 0x007fff80 },
8698                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8699                         0x00000000, 0x007fffff },
8700                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8701                         0x00000000, 0x0000003f },
8702                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8703                         0x00000000, 0x000001ff },
8704                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8705                         0x00000000, 0x000001ff },
8706                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8707                         0xffffffff, 0x00000000 },
8708                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8709                         0xffffffff, 0x00000000 },
8710
8711                 /* Mailbox Registers */
8712                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8713                         0x00000000, 0x000001ff },
8714                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8715                         0x00000000, 0x000001ff },
8716                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8717                         0x00000000, 0x000007ff },
8718                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8719                         0x00000000, 0x000001ff },
8720
8721                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8722         };
8723
8724         is_5705 = is_5750 = 0;
8725         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8726                 is_5705 = 1;
8727                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8728                         is_5750 = 1;
8729         }
8730
8731         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8732                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8733                         continue;
8734
8735                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8736                         continue;
8737
8738                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8739                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8740                         continue;
8741
8742                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8743                         continue;
8744
8745                 offset = (u32) reg_tbl[i].offset;
8746                 read_mask = reg_tbl[i].read_mask;
8747                 write_mask = reg_tbl[i].write_mask;
8748
8749                 /* Save the original register content */
8750                 save_val = tr32(offset);
8751
8752                 /* Determine the read-only value. */
8753                 read_val = save_val & read_mask;
8754
8755                 /* Write zero to the register, then make sure the read-only bits
8756                  * are not changed and the read/write bits are all zeros.
8757                  */
8758                 tw32(offset, 0);
8759
8760                 val = tr32(offset);
8761
8762                 /* Test the read-only and read/write bits. */
8763                 if (((val & read_mask) != read_val) || (val & write_mask))
8764                         goto out;
8765
8766                 /* Write ones to all the bits defined by RdMask and WrMask, then
8767                  * make sure the read-only bits are not changed and the
8768                  * read/write bits are all ones.
8769                  */
8770                 tw32(offset, read_mask | write_mask);
8771
8772                 val = tr32(offset);
8773
8774                 /* Test the read-only bits. */
8775                 if ((val & read_mask) != read_val)
8776                         goto out;
8777
8778                 /* Test the read/write bits. */
8779                 if ((val & write_mask) != write_mask)
8780                         goto out;
8781
8782                 tw32(offset, save_val);
8783         }
8784
8785         return 0;
8786
8787 out:
8788         if (netif_msg_hw(tp))
8789                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8790                        offset);
8791         tw32(offset, save_val);
8792         return -EIO;
8793 }
8794
8795 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8796 {
8797         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8798         int i;
8799         u32 j;
8800
8801         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8802                 for (j = 0; j < len; j += 4) {
8803                         u32 val;
8804
8805                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8806                         tg3_read_mem(tp, offset + j, &val);
8807                         if (val != test_pattern[i])
8808                                 return -EIO;
8809                 }
8810         }
8811         return 0;
8812 }
8813
8814 static int tg3_test_memory(struct tg3 *tp)
8815 {
8816         static struct mem_entry {
8817                 u32 offset;
8818                 u32 len;
8819         } mem_tbl_570x[] = {
8820                 { 0x00000000, 0x00b50},
8821                 { 0x00002000, 0x1c000},
8822                 { 0xffffffff, 0x00000}
8823         }, mem_tbl_5705[] = {
8824                 { 0x00000100, 0x0000c},
8825                 { 0x00000200, 0x00008},
8826                 { 0x00004000, 0x00800},
8827                 { 0x00006000, 0x01000},
8828                 { 0x00008000, 0x02000},
8829                 { 0x00010000, 0x0e000},
8830                 { 0xffffffff, 0x00000}
8831         }, mem_tbl_5755[] = {
8832                 { 0x00000200, 0x00008},
8833                 { 0x00004000, 0x00800},
8834                 { 0x00006000, 0x00800},
8835                 { 0x00008000, 0x02000},
8836                 { 0x00010000, 0x0c000},
8837                 { 0xffffffff, 0x00000}
8838         }, mem_tbl_5906[] = {
8839                 { 0x00000200, 0x00008},
8840                 { 0x00004000, 0x00400},
8841                 { 0x00006000, 0x00400},
8842                 { 0x00008000, 0x01000},
8843                 { 0x00010000, 0x01000},
8844                 { 0xffffffff, 0x00000}
8845         };
8846         struct mem_entry *mem_tbl;
8847         int err = 0;
8848         int i;
8849
8850         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8852                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8853                         mem_tbl = mem_tbl_5755;
8854                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8855                         mem_tbl = mem_tbl_5906;
8856                 else
8857                         mem_tbl = mem_tbl_5705;
8858         } else
8859                 mem_tbl = mem_tbl_570x;
8860
8861         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8862                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8863                     mem_tbl[i].len)) != 0)
8864                         break;
8865         }
8866
8867         return err;
8868 }
8869
8870 #define TG3_MAC_LOOPBACK        0
8871 #define TG3_PHY_LOOPBACK        1
8872
8873 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8874 {
8875         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8876         u32 desc_idx;
8877         struct sk_buff *skb, *rx_skb;
8878         u8 *tx_data;
8879         dma_addr_t map;
8880         int num_pkts, tx_len, rx_len, i, err;
8881         struct tg3_rx_buffer_desc *desc;
8882
8883         if (loopback_mode == TG3_MAC_LOOPBACK) {
8884                 /* HW errata - mac loopback fails in some cases on 5780.
8885                  * Normal traffic and PHY loopback are not affected by
8886                  * errata.
8887                  */
8888                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8889                         return 0;
8890
8891                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8892                            MAC_MODE_PORT_INT_LPBACK;
8893                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8894                         mac_mode |= MAC_MODE_LINK_POLARITY;
8895                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8896                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8897                 else
8898                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8899                 tw32(MAC_MODE, mac_mode);
8900         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8901                 u32 val;
8902
8903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8904                         u32 phytest;
8905
8906                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8907                                 u32 phy;
8908
8909                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8910                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8911                                 if (!tg3_readphy(tp, 0x1b, &phy))
8912                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8913                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8914                         }
8915                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8916                 } else
8917                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8918
8919                 tg3_phy_toggle_automdix(tp, 0);
8920
8921                 tg3_writephy(tp, MII_BMCR, val);
8922                 udelay(40);
8923
8924                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8925                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8926                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8927                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8928                 } else
8929                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8930
8931                 /* reset to prevent losing 1st rx packet intermittently */
8932                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8933                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8934                         udelay(10);
8935                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8936                 }
8937                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8938                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8939                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8940                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8941                                 mac_mode |= MAC_MODE_LINK_POLARITY;
8942                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8943                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8944                 }
8945                 tw32(MAC_MODE, mac_mode);
8946         }
8947         else
8948                 return -EINVAL;
8949
8950         err = -EIO;
8951
8952         tx_len = 1514;
8953         skb = netdev_alloc_skb(tp->dev, tx_len);
8954         if (!skb)
8955                 return -ENOMEM;
8956
8957         tx_data = skb_put(skb, tx_len);
8958         memcpy(tx_data, tp->dev->dev_addr, 6);
8959         memset(tx_data + 6, 0x0, 8);
8960
8961         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8962
8963         for (i = 14; i < tx_len; i++)
8964                 tx_data[i] = (u8) (i & 0xff);
8965
8966         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8967
8968         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8969              HOSTCC_MODE_NOW);
8970
8971         udelay(10);
8972
8973         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8974
8975         num_pkts = 0;
8976
8977         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8978
8979         tp->tx_prod++;
8980         num_pkts++;
8981
8982         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8983                      tp->tx_prod);
8984         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8985
8986         udelay(10);
8987
8988         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8989         for (i = 0; i < 25; i++) {
8990                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8991                        HOSTCC_MODE_NOW);
8992
8993                 udelay(10);
8994
8995                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8996                 rx_idx = tp->hw_status->idx[0].rx_producer;
8997                 if ((tx_idx == tp->tx_prod) &&
8998                     (rx_idx == (rx_start_idx + num_pkts)))
8999                         break;
9000         }
9001
9002         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9003         dev_kfree_skb(skb);
9004
9005         if (tx_idx != tp->tx_prod)
9006                 goto out;
9007
9008         if (rx_idx != rx_start_idx + num_pkts)
9009                 goto out;
9010
9011         desc = &tp->rx_rcb[rx_start_idx];
9012         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9013         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9014         if (opaque_key != RXD_OPAQUE_RING_STD)
9015                 goto out;
9016
9017         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9018             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9019                 goto out;
9020
9021         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9022         if (rx_len != tx_len)
9023                 goto out;
9024
9025         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9026
9027         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9028         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9029
9030         for (i = 14; i < tx_len; i++) {
9031                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9032                         goto out;
9033         }
9034         err = 0;
9035
9036         /* tg3_free_rings will unmap and free the rx_skb */
9037 out:
9038         return err;
9039 }
9040
9041 #define TG3_MAC_LOOPBACK_FAILED         1
9042 #define TG3_PHY_LOOPBACK_FAILED         2
9043 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9044                                          TG3_PHY_LOOPBACK_FAILED)
9045
9046 static int tg3_test_loopback(struct tg3 *tp)
9047 {
9048         int err = 0;
9049
9050         if (!netif_running(tp->dev))
9051                 return TG3_LOOPBACK_FAILED;
9052
9053         err = tg3_reset_hw(tp, 1);
9054         if (err)
9055                 return TG3_LOOPBACK_FAILED;
9056
9057         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9058                 err |= TG3_MAC_LOOPBACK_FAILED;
9059         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9060                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9061                         err |= TG3_PHY_LOOPBACK_FAILED;
9062         }
9063
9064         return err;
9065 }
9066
9067 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9068                           u64 *data)
9069 {
9070         struct tg3 *tp = netdev_priv(dev);
9071
9072         if (tp->link_config.phy_is_low_power)
9073                 tg3_set_power_state(tp, PCI_D0);
9074
9075         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9076
9077         if (tg3_test_nvram(tp) != 0) {
9078                 etest->flags |= ETH_TEST_FL_FAILED;
9079                 data[0] = 1;
9080         }
9081         if (tg3_test_link(tp) != 0) {
9082                 etest->flags |= ETH_TEST_FL_FAILED;
9083                 data[1] = 1;
9084         }
9085         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9086                 int err, irq_sync = 0;
9087
9088                 if (netif_running(dev)) {
9089                         tg3_netif_stop(tp);
9090                         irq_sync = 1;
9091                 }
9092
9093                 tg3_full_lock(tp, irq_sync);
9094
9095                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9096                 err = tg3_nvram_lock(tp);
9097                 tg3_halt_cpu(tp, RX_CPU_BASE);
9098                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9099                         tg3_halt_cpu(tp, TX_CPU_BASE);
9100                 if (!err)
9101                         tg3_nvram_unlock(tp);
9102
9103                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9104                         tg3_phy_reset(tp);
9105
9106                 if (tg3_test_registers(tp) != 0) {
9107                         etest->flags |= ETH_TEST_FL_FAILED;
9108                         data[2] = 1;
9109                 }
9110                 if (tg3_test_memory(tp) != 0) {
9111                         etest->flags |= ETH_TEST_FL_FAILED;
9112                         data[3] = 1;
9113                 }
9114                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9115                         etest->flags |= ETH_TEST_FL_FAILED;
9116
9117                 tg3_full_unlock(tp);
9118
9119                 if (tg3_test_interrupt(tp) != 0) {
9120                         etest->flags |= ETH_TEST_FL_FAILED;
9121                         data[5] = 1;
9122                 }
9123
9124                 tg3_full_lock(tp, 0);
9125
9126                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9127                 if (netif_running(dev)) {
9128                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9129                         if (!tg3_restart_hw(tp, 1))
9130                                 tg3_netif_start(tp);
9131                 }
9132
9133                 tg3_full_unlock(tp);
9134         }
9135         if (tp->link_config.phy_is_low_power)
9136                 tg3_set_power_state(tp, PCI_D3hot);
9137
9138 }
9139
9140 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9141 {
9142         struct mii_ioctl_data *data = if_mii(ifr);
9143         struct tg3 *tp = netdev_priv(dev);
9144         int err;
9145
9146         switch(cmd) {
9147         case SIOCGMIIPHY:
9148                 data->phy_id = PHY_ADDR;
9149
9150                 /* fallthru */
9151         case SIOCGMIIREG: {
9152                 u32 mii_regval;
9153
9154                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9155                         break;                  /* We have no PHY */
9156
9157                 if (tp->link_config.phy_is_low_power)
9158                         return -EAGAIN;
9159
9160                 spin_lock_bh(&tp->lock);
9161                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9162                 spin_unlock_bh(&tp->lock);
9163
9164                 data->val_out = mii_regval;
9165
9166                 return err;
9167         }
9168
9169         case SIOCSMIIREG:
9170                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9171                         break;                  /* We have no PHY */
9172
9173                 if (!capable(CAP_NET_ADMIN))
9174                         return -EPERM;
9175
9176                 if (tp->link_config.phy_is_low_power)
9177                         return -EAGAIN;
9178
9179                 spin_lock_bh(&tp->lock);
9180                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9181                 spin_unlock_bh(&tp->lock);
9182
9183                 return err;
9184
9185         default:
9186                 /* do nothing */
9187                 break;
9188         }
9189         return -EOPNOTSUPP;
9190 }
9191
9192 #if TG3_VLAN_TAG_USED
9193 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9194 {
9195         struct tg3 *tp = netdev_priv(dev);
9196
9197         if (netif_running(dev))
9198                 tg3_netif_stop(tp);
9199
9200         tg3_full_lock(tp, 0);
9201
9202         tp->vlgrp = grp;
9203
9204         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9205         __tg3_set_rx_mode(dev);
9206
9207         if (netif_running(dev))
9208                 tg3_netif_start(tp);
9209
9210         tg3_full_unlock(tp);
9211 }
9212 #endif
9213
9214 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9215 {
9216         struct tg3 *tp = netdev_priv(dev);
9217
9218         memcpy(ec, &tp->coal, sizeof(*ec));
9219         return 0;
9220 }
9221
9222 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9223 {
9224         struct tg3 *tp = netdev_priv(dev);
9225         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9226         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9227
9228         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9229                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9230                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9231                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9232                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9233         }
9234
9235         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9236             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9237             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9238             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9239             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9240             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9241             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9242             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9243             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9244             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9245                 return -EINVAL;
9246
9247         /* No rx interrupts will be generated if both are zero */
9248         if ((ec->rx_coalesce_usecs == 0) &&
9249             (ec->rx_max_coalesced_frames == 0))
9250                 return -EINVAL;
9251
9252         /* No tx interrupts will be generated if both are zero */
9253         if ((ec->tx_coalesce_usecs == 0) &&
9254             (ec->tx_max_coalesced_frames == 0))
9255                 return -EINVAL;
9256
9257         /* Only copy relevant parameters, ignore all others. */
9258         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9259         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9260         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9261         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9262         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9263         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9264         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9265         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9266         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9267
9268         if (netif_running(dev)) {
9269                 tg3_full_lock(tp, 0);
9270                 __tg3_set_coalesce(tp, &tp->coal);
9271                 tg3_full_unlock(tp);
9272         }
9273         return 0;
9274 }
9275
9276 static const struct ethtool_ops tg3_ethtool_ops = {
9277         .get_settings           = tg3_get_settings,
9278         .set_settings           = tg3_set_settings,
9279         .get_drvinfo            = tg3_get_drvinfo,
9280         .get_regs_len           = tg3_get_regs_len,
9281         .get_regs               = tg3_get_regs,
9282         .get_wol                = tg3_get_wol,
9283         .set_wol                = tg3_set_wol,
9284         .get_msglevel           = tg3_get_msglevel,
9285         .set_msglevel           = tg3_set_msglevel,
9286         .nway_reset             = tg3_nway_reset,
9287         .get_link               = ethtool_op_get_link,
9288         .get_eeprom_len         = tg3_get_eeprom_len,
9289         .get_eeprom             = tg3_get_eeprom,
9290         .set_eeprom             = tg3_set_eeprom,
9291         .get_ringparam          = tg3_get_ringparam,
9292         .set_ringparam          = tg3_set_ringparam,
9293         .get_pauseparam         = tg3_get_pauseparam,
9294         .set_pauseparam         = tg3_set_pauseparam,
9295         .get_rx_csum            = tg3_get_rx_csum,
9296         .set_rx_csum            = tg3_set_rx_csum,
9297         .set_tx_csum            = tg3_set_tx_csum,
9298         .set_sg                 = ethtool_op_set_sg,
9299         .set_tso                = tg3_set_tso,
9300         .self_test              = tg3_self_test,
9301         .get_strings            = tg3_get_strings,
9302         .phys_id                = tg3_phys_id,
9303         .get_ethtool_stats      = tg3_get_ethtool_stats,
9304         .get_coalesce           = tg3_get_coalesce,
9305         .set_coalesce           = tg3_set_coalesce,
9306         .get_sset_count         = tg3_get_sset_count,
9307 };
9308
9309 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9310 {
9311         u32 cursize, val, magic;
9312
9313         tp->nvram_size = EEPROM_CHIP_SIZE;
9314
9315         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9316                 return;
9317
9318         if ((magic != TG3_EEPROM_MAGIC) &&
9319             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9320             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9321                 return;
9322
9323         /*
9324          * Size the chip by reading offsets at increasing powers of two.
9325          * When we encounter our validation signature, we know the addressing
9326          * has wrapped around, and thus have our chip size.
9327          */
9328         cursize = 0x10;
9329
9330         while (cursize < tp->nvram_size) {
9331                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9332                         return;
9333
9334                 if (val == magic)
9335                         break;
9336
9337                 cursize <<= 1;
9338         }
9339
9340         tp->nvram_size = cursize;
9341 }
9342
9343 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9344 {
9345         u32 val;
9346
9347         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9348                 return;
9349
9350         /* Selfboot format */
9351         if (val != TG3_EEPROM_MAGIC) {
9352                 tg3_get_eeprom_size(tp);
9353                 return;
9354         }
9355
9356         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9357                 if (val != 0) {
9358                         tp->nvram_size = (val >> 16) * 1024;
9359                         return;
9360                 }
9361         }
9362         tp->nvram_size = 0x80000;
9363 }
9364
9365 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9366 {
9367         u32 nvcfg1;
9368
9369         nvcfg1 = tr32(NVRAM_CFG1);
9370         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9371                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9372         }
9373         else {
9374                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9375                 tw32(NVRAM_CFG1, nvcfg1);
9376         }
9377
9378         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9379             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9380                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9381                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9382                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9383                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9384                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9385                                 break;
9386                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9387                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9388                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9389                                 break;
9390                         case FLASH_VENDOR_ATMEL_EEPROM:
9391                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9392                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9393                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9394                                 break;
9395                         case FLASH_VENDOR_ST:
9396                                 tp->nvram_jedecnum = JEDEC_ST;
9397                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9398                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9399                                 break;
9400                         case FLASH_VENDOR_SAIFUN:
9401                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9402                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9403                                 break;
9404                         case FLASH_VENDOR_SST_SMALL:
9405                         case FLASH_VENDOR_SST_LARGE:
9406                                 tp->nvram_jedecnum = JEDEC_SST;
9407                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9408                                 break;
9409                 }
9410         }
9411         else {
9412                 tp->nvram_jedecnum = JEDEC_ATMEL;
9413                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9414                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9415         }
9416 }
9417
9418 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9419 {
9420         u32 nvcfg1;
9421
9422         nvcfg1 = tr32(NVRAM_CFG1);
9423
9424         /* NVRAM protection for TPM */
9425         if (nvcfg1 & (1 << 27))
9426                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9427
9428         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9429                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9430                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9431                         tp->nvram_jedecnum = JEDEC_ATMEL;
9432                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9433                         break;
9434                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9435                         tp->nvram_jedecnum = JEDEC_ATMEL;
9436                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9437                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9438                         break;
9439                 case FLASH_5752VENDOR_ST_M45PE10:
9440                 case FLASH_5752VENDOR_ST_M45PE20:
9441                 case FLASH_5752VENDOR_ST_M45PE40:
9442                         tp->nvram_jedecnum = JEDEC_ST;
9443                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9444                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9445                         break;
9446         }
9447
9448         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9449                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9450                         case FLASH_5752PAGE_SIZE_256:
9451                                 tp->nvram_pagesize = 256;
9452                                 break;
9453                         case FLASH_5752PAGE_SIZE_512:
9454                                 tp->nvram_pagesize = 512;
9455                                 break;
9456                         case FLASH_5752PAGE_SIZE_1K:
9457                                 tp->nvram_pagesize = 1024;
9458                                 break;
9459                         case FLASH_5752PAGE_SIZE_2K:
9460                                 tp->nvram_pagesize = 2048;
9461                                 break;
9462                         case FLASH_5752PAGE_SIZE_4K:
9463                                 tp->nvram_pagesize = 4096;
9464                                 break;
9465                         case FLASH_5752PAGE_SIZE_264:
9466                                 tp->nvram_pagesize = 264;
9467                                 break;
9468                 }
9469         }
9470         else {
9471                 /* For eeprom, set pagesize to maximum eeprom size */
9472                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9473
9474                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9475                 tw32(NVRAM_CFG1, nvcfg1);
9476         }
9477 }
9478
9479 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9480 {
9481         u32 nvcfg1, protect = 0;
9482
9483         nvcfg1 = tr32(NVRAM_CFG1);
9484
9485         /* NVRAM protection for TPM */
9486         if (nvcfg1 & (1 << 27)) {
9487                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9488                 protect = 1;
9489         }
9490
9491         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9492         switch (nvcfg1) {
9493                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9494                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9495                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9496                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9497                         tp->nvram_jedecnum = JEDEC_ATMEL;
9498                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9499                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9500                         tp->nvram_pagesize = 264;
9501                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9502                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9503                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9504                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9505                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9506                         else
9507                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9508                         break;
9509                 case FLASH_5752VENDOR_ST_M45PE10:
9510                 case FLASH_5752VENDOR_ST_M45PE20:
9511                 case FLASH_5752VENDOR_ST_M45PE40:
9512                         tp->nvram_jedecnum = JEDEC_ST;
9513                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9514                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9515                         tp->nvram_pagesize = 256;
9516                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9517                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9518                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9519                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9520                         else
9521                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9522                         break;
9523         }
9524 }
9525
9526 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9527 {
9528         u32 nvcfg1;
9529
9530         nvcfg1 = tr32(NVRAM_CFG1);
9531
9532         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9533                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9534                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9535                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9536                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9537                         tp->nvram_jedecnum = JEDEC_ATMEL;
9538                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9539                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9540
9541                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9542                         tw32(NVRAM_CFG1, nvcfg1);
9543                         break;
9544                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9545                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9546                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9547                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9548                         tp->nvram_jedecnum = JEDEC_ATMEL;
9549                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9550                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9551                         tp->nvram_pagesize = 264;
9552                         break;
9553                 case FLASH_5752VENDOR_ST_M45PE10:
9554                 case FLASH_5752VENDOR_ST_M45PE20:
9555                 case FLASH_5752VENDOR_ST_M45PE40:
9556                         tp->nvram_jedecnum = JEDEC_ST;
9557                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9558                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9559                         tp->nvram_pagesize = 256;
9560                         break;
9561         }
9562 }
9563
9564 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9565 {
9566         tp->nvram_jedecnum = JEDEC_ATMEL;
9567         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9568         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9569 }
9570
9571 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9572 static void __devinit tg3_nvram_init(struct tg3 *tp)
9573 {
9574         tw32_f(GRC_EEPROM_ADDR,
9575              (EEPROM_ADDR_FSM_RESET |
9576               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9577                EEPROM_ADDR_CLKPERD_SHIFT)));
9578
9579         msleep(1);
9580
9581         /* Enable seeprom accesses. */
9582         tw32_f(GRC_LOCAL_CTRL,
9583              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9584         udelay(100);
9585
9586         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9587             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9588                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9589
9590                 if (tg3_nvram_lock(tp)) {
9591                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9592                                "tg3_nvram_init failed.\n", tp->dev->name);
9593                         return;
9594                 }
9595                 tg3_enable_nvram_access(tp);
9596
9597                 tp->nvram_size = 0;
9598
9599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9600                         tg3_get_5752_nvram_info(tp);
9601                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9602                         tg3_get_5755_nvram_info(tp);
9603                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9604                         tg3_get_5787_nvram_info(tp);
9605                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9606                         tg3_get_5906_nvram_info(tp);
9607                 else
9608                         tg3_get_nvram_info(tp);
9609
9610                 if (tp->nvram_size == 0)
9611                         tg3_get_nvram_size(tp);
9612
9613                 tg3_disable_nvram_access(tp);
9614                 tg3_nvram_unlock(tp);
9615
9616         } else {
9617                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9618
9619                 tg3_get_eeprom_size(tp);
9620         }
9621 }
9622
9623 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9624                                         u32 offset, u32 *val)
9625 {
9626         u32 tmp;
9627         int i;
9628
9629         if (offset > EEPROM_ADDR_ADDR_MASK ||
9630             (offset % 4) != 0)
9631                 return -EINVAL;
9632
9633         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9634                                         EEPROM_ADDR_DEVID_MASK |
9635                                         EEPROM_ADDR_READ);
9636         tw32(GRC_EEPROM_ADDR,
9637              tmp |
9638              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9639              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9640               EEPROM_ADDR_ADDR_MASK) |
9641              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9642
9643         for (i = 0; i < 1000; i++) {
9644                 tmp = tr32(GRC_EEPROM_ADDR);
9645
9646                 if (tmp & EEPROM_ADDR_COMPLETE)
9647                         break;
9648                 msleep(1);
9649         }
9650         if (!(tmp & EEPROM_ADDR_COMPLETE))
9651                 return -EBUSY;
9652
9653         *val = tr32(GRC_EEPROM_DATA);
9654         return 0;
9655 }
9656
9657 #define NVRAM_CMD_TIMEOUT 10000
9658
9659 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9660 {
9661         int i;
9662
9663         tw32(NVRAM_CMD, nvram_cmd);
9664         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9665                 udelay(10);
9666                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9667                         udelay(10);
9668                         break;
9669                 }
9670         }
9671         if (i == NVRAM_CMD_TIMEOUT) {
9672                 return -EBUSY;
9673         }
9674         return 0;
9675 }
9676
9677 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9678 {
9679         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9680             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9681             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9682             (tp->nvram_jedecnum == JEDEC_ATMEL))
9683
9684                 addr = ((addr / tp->nvram_pagesize) <<
9685                         ATMEL_AT45DB0X1B_PAGE_POS) +
9686                        (addr % tp->nvram_pagesize);
9687
9688         return addr;
9689 }
9690
9691 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9692 {
9693         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9694             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9695             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9696             (tp->nvram_jedecnum == JEDEC_ATMEL))
9697
9698                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9699                         tp->nvram_pagesize) +
9700                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9701
9702         return addr;
9703 }
9704
9705 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9706 {
9707         int ret;
9708
9709         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9710                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9711
9712         offset = tg3_nvram_phys_addr(tp, offset);
9713
9714         if (offset > NVRAM_ADDR_MSK)
9715                 return -EINVAL;
9716
9717         ret = tg3_nvram_lock(tp);
9718         if (ret)
9719                 return ret;
9720
9721         tg3_enable_nvram_access(tp);
9722
9723         tw32(NVRAM_ADDR, offset);
9724         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9725                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9726
9727         if (ret == 0)
9728                 *val = swab32(tr32(NVRAM_RDDATA));
9729
9730         tg3_disable_nvram_access(tp);
9731
9732         tg3_nvram_unlock(tp);
9733
9734         return ret;
9735 }
9736
9737 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9738 {
9739         int err;
9740         u32 tmp;
9741
9742         err = tg3_nvram_read(tp, offset, &tmp);
9743         *val = swab32(tmp);
9744         return err;
9745 }
9746
9747 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9748                                     u32 offset, u32 len, u8 *buf)
9749 {
9750         int i, j, rc = 0;
9751         u32 val;
9752
9753         for (i = 0; i < len; i += 4) {
9754                 u32 addr, data;
9755
9756                 addr = offset + i;
9757
9758                 memcpy(&data, buf + i, 4);
9759
9760                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9761
9762                 val = tr32(GRC_EEPROM_ADDR);
9763                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9764
9765                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9766                         EEPROM_ADDR_READ);
9767                 tw32(GRC_EEPROM_ADDR, val |
9768                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9769                         (addr & EEPROM_ADDR_ADDR_MASK) |
9770                         EEPROM_ADDR_START |
9771                         EEPROM_ADDR_WRITE);
9772
9773                 for (j = 0; j < 1000; j++) {
9774                         val = tr32(GRC_EEPROM_ADDR);
9775
9776                         if (val & EEPROM_ADDR_COMPLETE)
9777                                 break;
9778                         msleep(1);
9779                 }
9780                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9781                         rc = -EBUSY;
9782                         break;
9783                 }
9784         }
9785
9786         return rc;
9787 }
9788
9789 /* offset and length are dword aligned */
9790 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9791                 u8 *buf)
9792 {
9793         int ret = 0;
9794         u32 pagesize = tp->nvram_pagesize;
9795         u32 pagemask = pagesize - 1;
9796         u32 nvram_cmd;
9797         u8 *tmp;
9798
9799         tmp = kmalloc(pagesize, GFP_KERNEL);
9800         if (tmp == NULL)
9801                 return -ENOMEM;
9802
9803         while (len) {
9804                 int j;
9805                 u32 phy_addr, page_off, size;
9806
9807                 phy_addr = offset & ~pagemask;
9808
9809                 for (j = 0; j < pagesize; j += 4) {
9810                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9811                                                 (u32 *) (tmp + j))))
9812                                 break;
9813                 }
9814                 if (ret)
9815                         break;
9816
9817                 page_off = offset & pagemask;
9818                 size = pagesize;
9819                 if (len < size)
9820                         size = len;
9821
9822                 len -= size;
9823
9824                 memcpy(tmp + page_off, buf, size);
9825
9826                 offset = offset + (pagesize - page_off);
9827
9828                 tg3_enable_nvram_access(tp);
9829
9830                 /*
9831                  * Before we can erase the flash page, we need
9832                  * to issue a special "write enable" command.
9833                  */
9834                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9835
9836                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9837                         break;
9838
9839                 /* Erase the target page */
9840                 tw32(NVRAM_ADDR, phy_addr);
9841
9842                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9843                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9844
9845                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9846                         break;
9847
9848                 /* Issue another write enable to start the write. */
9849                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9850
9851                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9852                         break;
9853
9854                 for (j = 0; j < pagesize; j += 4) {
9855                         u32 data;
9856
9857                         data = *((u32 *) (tmp + j));
9858                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9859
9860                         tw32(NVRAM_ADDR, phy_addr + j);
9861
9862                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9863                                 NVRAM_CMD_WR;
9864
9865                         if (j == 0)
9866                                 nvram_cmd |= NVRAM_CMD_FIRST;
9867                         else if (j == (pagesize - 4))
9868                                 nvram_cmd |= NVRAM_CMD_LAST;
9869
9870                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9871                                 break;
9872                 }
9873                 if (ret)
9874                         break;
9875         }
9876
9877         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9878         tg3_nvram_exec_cmd(tp, nvram_cmd);
9879
9880         kfree(tmp);
9881
9882         return ret;
9883 }
9884
9885 /* offset and length are dword aligned */
9886 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9887                 u8 *buf)
9888 {
9889         int i, ret = 0;
9890
9891         for (i = 0; i < len; i += 4, offset += 4) {
9892                 u32 data, page_off, phy_addr, nvram_cmd;
9893
9894                 memcpy(&data, buf + i, 4);
9895                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9896
9897                 page_off = offset % tp->nvram_pagesize;
9898
9899                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9900
9901                 tw32(NVRAM_ADDR, phy_addr);
9902
9903                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9904
9905                 if ((page_off == 0) || (i == 0))
9906                         nvram_cmd |= NVRAM_CMD_FIRST;
9907                 if (page_off == (tp->nvram_pagesize - 4))
9908                         nvram_cmd |= NVRAM_CMD_LAST;
9909
9910                 if (i == (len - 4))
9911                         nvram_cmd |= NVRAM_CMD_LAST;
9912
9913                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9914                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9915                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9916                     (tp->nvram_jedecnum == JEDEC_ST) &&
9917                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9918
9919                         if ((ret = tg3_nvram_exec_cmd(tp,
9920                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9921                                 NVRAM_CMD_DONE)))
9922
9923                                 break;
9924                 }
9925                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9926                         /* We always do complete word writes to eeprom. */
9927                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9928                 }
9929
9930                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9931                         break;
9932         }
9933         return ret;
9934 }
9935
9936 /* offset and length are dword aligned */
9937 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9938 {
9939         int ret;
9940
9941         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9942                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9943                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9944                 udelay(40);
9945         }
9946
9947         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9948                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9949         }
9950         else {
9951                 u32 grc_mode;
9952
9953                 ret = tg3_nvram_lock(tp);
9954                 if (ret)
9955                         return ret;
9956
9957                 tg3_enable_nvram_access(tp);
9958                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9959                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9960                         tw32(NVRAM_WRITE1, 0x406);
9961
9962                 grc_mode = tr32(GRC_MODE);
9963                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9964
9965                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9966                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9967
9968                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9969                                 buf);
9970                 }
9971                 else {
9972                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9973                                 buf);
9974                 }
9975
9976                 grc_mode = tr32(GRC_MODE);
9977                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9978
9979                 tg3_disable_nvram_access(tp);
9980                 tg3_nvram_unlock(tp);
9981         }
9982
9983         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9984                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9985                 udelay(40);
9986         }
9987
9988         return ret;
9989 }
9990
9991 struct subsys_tbl_ent {
9992         u16 subsys_vendor, subsys_devid;
9993         u32 phy_id;
9994 };
9995
9996 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9997         /* Broadcom boards. */
9998         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9999         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10000         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10001         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10002         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10003         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10004         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10005         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10006         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10007         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10008         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10009
10010         /* 3com boards. */
10011         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10012         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10013         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10014         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10015         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10016
10017         /* DELL boards. */
10018         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10019         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10020         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10021         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10022
10023         /* Compaq boards. */
10024         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10025         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10026         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10027         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10028         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10029
10030         /* IBM boards. */
10031         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10032 };
10033
10034 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10035 {
10036         int i;
10037
10038         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10039                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10040                      tp->pdev->subsystem_vendor) &&
10041                     (subsys_id_to_phy_id[i].subsys_devid ==
10042                      tp->pdev->subsystem_device))
10043                         return &subsys_id_to_phy_id[i];
10044         }
10045         return NULL;
10046 }
10047
10048 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10049 {
10050         u32 val;
10051         u16 pmcsr;
10052
10053         /* On some early chips the SRAM cannot be accessed in D3hot state,
10054          * so need make sure we're in D0.
10055          */
10056         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10057         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10058         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10059         msleep(1);
10060
10061         /* Make sure register accesses (indirect or otherwise)
10062          * will function correctly.
10063          */
10064         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10065                                tp->misc_host_ctrl);
10066
10067         /* The memory arbiter has to be enabled in order for SRAM accesses
10068          * to succeed.  Normally on powerup the tg3 chip firmware will make
10069          * sure it is enabled, but other entities such as system netboot
10070          * code might disable it.
10071          */
10072         val = tr32(MEMARB_MODE);
10073         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10074
10075         tp->phy_id = PHY_ID_INVALID;
10076         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10077
10078         /* Assume an onboard device and WOL capable by default.  */
10079         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10080
10081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10082                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10083                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10084                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10085                 }
10086                 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10087                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10088                 return;
10089         }
10090
10091         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10092         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10093                 u32 nic_cfg, led_cfg;
10094                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10095                 int eeprom_phy_serdes = 0;
10096
10097                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10098                 tp->nic_sram_data_cfg = nic_cfg;
10099
10100                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10101                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10102                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10103                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10104                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10105                     (ver > 0) && (ver < 0x100))
10106                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10107
10108                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10109                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10110                         eeprom_phy_serdes = 1;
10111
10112                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10113                 if (nic_phy_id != 0) {
10114                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10115                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10116
10117                         eeprom_phy_id  = (id1 >> 16) << 10;
10118                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10119                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10120                 } else
10121                         eeprom_phy_id = 0;
10122
10123                 tp->phy_id = eeprom_phy_id;
10124                 if (eeprom_phy_serdes) {
10125                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10126                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10127                         else
10128                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10129                 }
10130
10131                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10132                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10133                                     SHASTA_EXT_LED_MODE_MASK);
10134                 else
10135                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10136
10137                 switch (led_cfg) {
10138                 default:
10139                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10140                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10141                         break;
10142
10143                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10144                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10145                         break;
10146
10147                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10148                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10149
10150                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10151                          * read on some older 5700/5701 bootcode.
10152                          */
10153                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10154                             ASIC_REV_5700 ||
10155                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10156                             ASIC_REV_5701)
10157                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10158
10159                         break;
10160
10161                 case SHASTA_EXT_LED_SHARED:
10162                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10163                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10164                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10165                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10166                                                  LED_CTRL_MODE_PHY_2);
10167                         break;
10168
10169                 case SHASTA_EXT_LED_MAC:
10170                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10171                         break;
10172
10173                 case SHASTA_EXT_LED_COMBO:
10174                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10175                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10176                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10177                                                  LED_CTRL_MODE_PHY_2);
10178                         break;
10179
10180                 };
10181
10182                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10183                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10184                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10185                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10186
10187                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10188                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10189                         if ((tp->pdev->subsystem_vendor ==
10190                              PCI_VENDOR_ID_ARIMA) &&
10191                             (tp->pdev->subsystem_device == 0x205a ||
10192                              tp->pdev->subsystem_device == 0x2063))
10193                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10194                 } else {
10195                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10196                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10197                 }
10198
10199                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10200                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10201                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10202                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10203                 }
10204                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10205                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10206                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10207
10208                 if (cfg2 & (1 << 17))
10209                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10210
10211                 /* serdes signal pre-emphasis in register 0x590 set by */
10212                 /* bootcode if bit 18 is set */
10213                 if (cfg2 & (1 << 18))
10214                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10215
10216                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10217                         u32 cfg3;
10218
10219                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10220                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10221                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10222                 }
10223         }
10224 }
10225
10226 static int __devinit tg3_phy_probe(struct tg3 *tp)
10227 {
10228         u32 hw_phy_id_1, hw_phy_id_2;
10229         u32 hw_phy_id, hw_phy_id_masked;
10230         int err;
10231
10232         /* Reading the PHY ID register can conflict with ASF
10233          * firwmare access to the PHY hardware.
10234          */
10235         err = 0;
10236         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10237                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10238         } else {
10239                 /* Now read the physical PHY_ID from the chip and verify
10240                  * that it is sane.  If it doesn't look good, we fall back
10241                  * to either the hard-coded table based PHY_ID and failing
10242                  * that the value found in the eeprom area.
10243                  */
10244                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10245                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10246
10247                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10248                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10249                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10250
10251                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10252         }
10253
10254         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10255                 tp->phy_id = hw_phy_id;
10256                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10257                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10258                 else
10259                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10260         } else {
10261                 if (tp->phy_id != PHY_ID_INVALID) {
10262                         /* Do nothing, phy ID already set up in
10263                          * tg3_get_eeprom_hw_cfg().
10264                          */
10265                 } else {
10266                         struct subsys_tbl_ent *p;
10267
10268                         /* No eeprom signature?  Try the hardcoded
10269                          * subsys device table.
10270                          */
10271                         p = lookup_by_subsys(tp);
10272                         if (!p)
10273                                 return -ENODEV;
10274
10275                         tp->phy_id = p->phy_id;
10276                         if (!tp->phy_id ||
10277                             tp->phy_id == PHY_ID_BCM8002)
10278                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10279                 }
10280         }
10281
10282         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10283             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10284                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10285
10286                 tg3_readphy(tp, MII_BMSR, &bmsr);
10287                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10288                     (bmsr & BMSR_LSTATUS))
10289                         goto skip_phy_reset;
10290
10291                 err = tg3_phy_reset(tp);
10292                 if (err)
10293                         return err;
10294
10295                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10296                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10297                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10298                 tg3_ctrl = 0;
10299                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10300                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10301                                     MII_TG3_CTRL_ADV_1000_FULL);
10302                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10303                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10304                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10305                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10306                 }
10307
10308                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10309                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10310                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10311                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10312                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10313
10314                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10315                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10316
10317                         tg3_writephy(tp, MII_BMCR,
10318                                      BMCR_ANENABLE | BMCR_ANRESTART);
10319                 }
10320                 tg3_phy_set_wirespeed(tp);
10321
10322                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10323                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10324                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10325         }
10326
10327 skip_phy_reset:
10328         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10329                 err = tg3_init_5401phy_dsp(tp);
10330                 if (err)
10331                         return err;
10332         }
10333
10334         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10335                 err = tg3_init_5401phy_dsp(tp);
10336         }
10337
10338         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10339                 tp->link_config.advertising =
10340                         (ADVERTISED_1000baseT_Half |
10341                          ADVERTISED_1000baseT_Full |
10342                          ADVERTISED_Autoneg |
10343                          ADVERTISED_FIBRE);
10344         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10345                 tp->link_config.advertising &=
10346                         ~(ADVERTISED_1000baseT_Half |
10347                           ADVERTISED_1000baseT_Full);
10348
10349         return err;
10350 }
10351
10352 static void __devinit tg3_read_partno(struct tg3 *tp)
10353 {
10354         unsigned char vpd_data[256];
10355         unsigned int i;
10356         u32 magic;
10357
10358         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10359                 goto out_not_found;
10360
10361         if (magic == TG3_EEPROM_MAGIC) {
10362                 for (i = 0; i < 256; i += 4) {
10363                         u32 tmp;
10364
10365                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10366                                 goto out_not_found;
10367
10368                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10369                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10370                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10371                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10372                 }
10373         } else {
10374                 int vpd_cap;
10375
10376                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10377                 for (i = 0; i < 256; i += 4) {
10378                         u32 tmp, j = 0;
10379                         u16 tmp16;
10380
10381                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10382                                               i);
10383                         while (j++ < 100) {
10384                                 pci_read_config_word(tp->pdev, vpd_cap +
10385                                                      PCI_VPD_ADDR, &tmp16);
10386                                 if (tmp16 & 0x8000)
10387                                         break;
10388                                 msleep(1);
10389                         }
10390                         if (!(tmp16 & 0x8000))
10391                                 goto out_not_found;
10392
10393                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10394                                               &tmp);
10395                         tmp = cpu_to_le32(tmp);
10396                         memcpy(&vpd_data[i], &tmp, 4);
10397                 }
10398         }
10399
10400         /* Now parse and find the part number. */
10401         for (i = 0; i < 254; ) {
10402                 unsigned char val = vpd_data[i];
10403                 unsigned int block_end;
10404
10405                 if (val == 0x82 || val == 0x91) {
10406                         i = (i + 3 +
10407                              (vpd_data[i + 1] +
10408                               (vpd_data[i + 2] << 8)));
10409                         continue;
10410                 }
10411
10412                 if (val != 0x90)
10413                         goto out_not_found;
10414
10415                 block_end = (i + 3 +
10416                              (vpd_data[i + 1] +
10417                               (vpd_data[i + 2] << 8)));
10418                 i += 3;
10419
10420                 if (block_end > 256)
10421                         goto out_not_found;
10422
10423                 while (i < (block_end - 2)) {
10424                         if (vpd_data[i + 0] == 'P' &&
10425                             vpd_data[i + 1] == 'N') {
10426                                 int partno_len = vpd_data[i + 2];
10427
10428                                 i += 3;
10429                                 if (partno_len > 24 || (partno_len + i) > 256)
10430                                         goto out_not_found;
10431
10432                                 memcpy(tp->board_part_number,
10433                                        &vpd_data[i], partno_len);
10434
10435                                 /* Success. */
10436                                 return;
10437                         }
10438                         i += 3 + vpd_data[i + 2];
10439                 }
10440
10441                 /* Part number not found. */
10442                 goto out_not_found;
10443         }
10444
10445 out_not_found:
10446         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10447                 strcpy(tp->board_part_number, "BCM95906");
10448         else
10449                 strcpy(tp->board_part_number, "none");
10450 }
10451
10452 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10453 {
10454         u32 val, offset, start;
10455
10456         if (tg3_nvram_read_swab(tp, 0, &val))
10457                 return;
10458
10459         if (val != TG3_EEPROM_MAGIC)
10460                 return;
10461
10462         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10463             tg3_nvram_read_swab(tp, 0x4, &start))
10464                 return;
10465
10466         offset = tg3_nvram_logical_addr(tp, offset);
10467         if (tg3_nvram_read_swab(tp, offset, &val))
10468                 return;
10469
10470         if ((val & 0xfc000000) == 0x0c000000) {
10471                 u32 ver_offset, addr;
10472                 int i;
10473
10474                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10475                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10476                         return;
10477
10478                 if (val != 0)
10479                         return;
10480
10481                 addr = offset + ver_offset - start;
10482                 for (i = 0; i < 16; i += 4) {
10483                         if (tg3_nvram_read(tp, addr + i, &val))
10484                                 return;
10485
10486                         val = cpu_to_le32(val);
10487                         memcpy(tp->fw_ver + i, &val, 4);
10488                 }
10489         }
10490 }
10491
10492 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10493
10494 static int __devinit tg3_get_invariants(struct tg3 *tp)
10495 {
10496         static struct pci_device_id write_reorder_chipsets[] = {
10497                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10498                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10499                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10500                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10501                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10502                              PCI_DEVICE_ID_VIA_8385_0) },
10503                 { },
10504         };
10505         u32 misc_ctrl_reg;
10506         u32 cacheline_sz_reg;
10507         u32 pci_state_reg, grc_misc_cfg;
10508         u32 val;
10509         u16 pci_cmd;
10510         int err, pcie_cap;
10511
10512         /* Force memory write invalidate off.  If we leave it on,
10513          * then on 5700_BX chips we have to enable a workaround.
10514          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10515          * to match the cacheline size.  The Broadcom driver have this
10516          * workaround but turns MWI off all the times so never uses
10517          * it.  This seems to suggest that the workaround is insufficient.
10518          */
10519         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10520         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10521         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10522
10523         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10524          * has the register indirect write enable bit set before
10525          * we try to access any of the MMIO registers.  It is also
10526          * critical that the PCI-X hw workaround situation is decided
10527          * before that as well.
10528          */
10529         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10530                               &misc_ctrl_reg);
10531
10532         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10533                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10535                 u32 prod_id_asic_rev;
10536
10537                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10538                                       &prod_id_asic_rev);
10539                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10540         }
10541
10542         /* Wrong chip ID in 5752 A0. This code can be removed later
10543          * as A0 is not in production.
10544          */
10545         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10546                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10547
10548         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10549          * we need to disable memory and use config. cycles
10550          * only to access all registers. The 5702/03 chips
10551          * can mistakenly decode the special cycles from the
10552          * ICH chipsets as memory write cycles, causing corruption
10553          * of register and memory space. Only certain ICH bridges
10554          * will drive special cycles with non-zero data during the
10555          * address phase which can fall within the 5703's address
10556          * range. This is not an ICH bug as the PCI spec allows
10557          * non-zero address during special cycles. However, only
10558          * these ICH bridges are known to drive non-zero addresses
10559          * during special cycles.
10560          *
10561          * Since special cycles do not cross PCI bridges, we only
10562          * enable this workaround if the 5703 is on the secondary
10563          * bus of these ICH bridges.
10564          */
10565         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10566             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10567                 static struct tg3_dev_id {
10568                         u32     vendor;
10569                         u32     device;
10570                         u32     rev;
10571                 } ich_chipsets[] = {
10572                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10573                           PCI_ANY_ID },
10574                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10575                           PCI_ANY_ID },
10576                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10577                           0xa },
10578                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10579                           PCI_ANY_ID },
10580                         { },
10581                 };
10582                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10583                 struct pci_dev *bridge = NULL;
10584
10585                 while (pci_id->vendor != 0) {
10586                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10587                                                 bridge);
10588                         if (!bridge) {
10589                                 pci_id++;
10590                                 continue;
10591                         }
10592                         if (pci_id->rev != PCI_ANY_ID) {
10593                                 if (bridge->revision > pci_id->rev)
10594                                         continue;
10595                         }
10596                         if (bridge->subordinate &&
10597                             (bridge->subordinate->number ==
10598                              tp->pdev->bus->number)) {
10599
10600                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10601                                 pci_dev_put(bridge);
10602                                 break;
10603                         }
10604                 }
10605         }
10606
10607         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10608          * DMA addresses > 40-bit. This bridge may have other additional
10609          * 57xx devices behind it in some 4-port NIC designs for example.
10610          * Any tg3 device found behind the bridge will also need the 40-bit
10611          * DMA workaround.
10612          */
10613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10615                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10616                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10617                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10618         }
10619         else {
10620                 struct pci_dev *bridge = NULL;
10621
10622                 do {
10623                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10624                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10625                                                 bridge);
10626                         if (bridge && bridge->subordinate &&
10627                             (bridge->subordinate->number <=
10628                              tp->pdev->bus->number) &&
10629                             (bridge->subordinate->subordinate >=
10630                              tp->pdev->bus->number)) {
10631                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10632                                 pci_dev_put(bridge);
10633                                 break;
10634                         }
10635                 } while (bridge);
10636         }
10637
10638         /* Initialize misc host control in PCI block. */
10639         tp->misc_host_ctrl |= (misc_ctrl_reg &
10640                                MISC_HOST_CTRL_CHIPREV);
10641         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10642                                tp->misc_host_ctrl);
10643
10644         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10645                               &cacheline_sz_reg);
10646
10647         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10648         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10649         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10650         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10651
10652         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10653             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10654                 tp->pdev_peer = tg3_find_peer(tp);
10655
10656         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10657             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10658             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10659             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10660             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10661             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10662                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10663
10664         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10665             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10666                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10667
10668         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10669                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10670                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10671                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10672                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10673                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10674                      tp->pdev_peer == tp->pdev))
10675                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10676
10677                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10678                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10679                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10680                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10681                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10682                 } else {
10683                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
10684                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10685                                 ASIC_REV_5750 &&
10686                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10687                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
10688                 }
10689         }
10690
10691         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10692             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10693             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10694             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10695             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10696             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10697                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10698
10699         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10700         if (pcie_cap != 0) {
10701                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10702                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10703                         u16 lnkctl;
10704
10705                         pci_read_config_word(tp->pdev,
10706                                              pcie_cap + PCI_EXP_LNKCTL,
10707                                              &lnkctl);
10708                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10709                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10710                 }
10711         }
10712
10713         /* If we have an AMD 762 or VIA K8T800 chipset, write
10714          * reordering to the mailbox registers done by the host
10715          * controller can cause major troubles.  We read back from
10716          * every mailbox register write to force the writes to be
10717          * posted to the chip in order.
10718          */
10719         if (pci_dev_present(write_reorder_chipsets) &&
10720             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10721                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10722
10723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10724             tp->pci_lat_timer < 64) {
10725                 tp->pci_lat_timer = 64;
10726
10727                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10728                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10729                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10730                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10731
10732                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10733                                        cacheline_sz_reg);
10734         }
10735
10736         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
10737             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10738                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
10739                 if (!tp->pcix_cap) {
10740                         printk(KERN_ERR PFX "Cannot find PCI-X "
10741                                             "capability, aborting.\n");
10742                         return -EIO;
10743                 }
10744         }
10745
10746         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10747                               &pci_state_reg);
10748
10749         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10750                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10751
10752                 /* If this is a 5700 BX chipset, and we are in PCI-X
10753                  * mode, enable register write workaround.
10754                  *
10755                  * The workaround is to use indirect register accesses
10756                  * for all chip writes not to mailbox registers.
10757                  */
10758                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10759                         u32 pm_reg;
10760
10761                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10762
10763                         /* The chip can have it's power management PCI config
10764                          * space registers clobbered due to this bug.
10765                          * So explicitly force the chip into D0 here.
10766                          */
10767                         pci_read_config_dword(tp->pdev,
10768                                               tp->pm_cap + PCI_PM_CTRL,
10769                                               &pm_reg);
10770                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10771                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10772                         pci_write_config_dword(tp->pdev,
10773                                                tp->pm_cap + PCI_PM_CTRL,
10774                                                pm_reg);
10775
10776                         /* Also, force SERR#/PERR# in PCI command. */
10777                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10778                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10779                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10780                 }
10781         }
10782
10783         /* 5700 BX chips need to have their TX producer index mailboxes
10784          * written twice to workaround a bug.
10785          */
10786         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10787                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10788
10789         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10790                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10791         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10792                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10793
10794         /* Chip-specific fixup from Broadcom driver */
10795         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10796             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10797                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10798                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10799         }
10800
10801         /* Default fast path register access methods */
10802         tp->read32 = tg3_read32;
10803         tp->write32 = tg3_write32;
10804         tp->read32_mbox = tg3_read32;
10805         tp->write32_mbox = tg3_write32;
10806         tp->write32_tx_mbox = tg3_write32;
10807         tp->write32_rx_mbox = tg3_write32;
10808
10809         /* Various workaround register access methods */
10810         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10811                 tp->write32 = tg3_write_indirect_reg32;
10812         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10813                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10814                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
10815                 /*
10816                  * Back to back register writes can cause problems on these
10817                  * chips, the workaround is to read back all reg writes
10818                  * except those to mailbox regs.
10819                  *
10820                  * See tg3_write_indirect_reg32().
10821                  */
10822                 tp->write32 = tg3_write_flush_reg32;
10823         }
10824
10825
10826         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10827             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10828                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10829                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10830                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10831         }
10832
10833         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10834                 tp->read32 = tg3_read_indirect_reg32;
10835                 tp->write32 = tg3_write_indirect_reg32;
10836                 tp->read32_mbox = tg3_read_indirect_mbox;
10837                 tp->write32_mbox = tg3_write_indirect_mbox;
10838                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10839                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10840
10841                 iounmap(tp->regs);
10842                 tp->regs = NULL;
10843
10844                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10845                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10846                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10847         }
10848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10849                 tp->read32_mbox = tg3_read32_mbox_5906;
10850                 tp->write32_mbox = tg3_write32_mbox_5906;
10851                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10852                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10853         }
10854
10855         if (tp->write32 == tg3_write_indirect_reg32 ||
10856             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10857              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10858               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10859                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10860
10861         /* Get eeprom hw config before calling tg3_set_power_state().
10862          * In particular, the TG3_FLG2_IS_NIC flag must be
10863          * determined before calling tg3_set_power_state() so that
10864          * we know whether or not to switch out of Vaux power.
10865          * When the flag is set, it means that GPIO1 is used for eeprom
10866          * write protect and also implies that it is a LOM where GPIOs
10867          * are not used to switch power.
10868          */
10869         tg3_get_eeprom_hw_cfg(tp);
10870
10871         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10872          * GPIO1 driven high will bring 5700's external PHY out of reset.
10873          * It is also used as eeprom write protect on LOMs.
10874          */
10875         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10876         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10877             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10878                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10879                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10880         /* Unused GPIO3 must be driven as output on 5752 because there
10881          * are no pull-up resistors on unused GPIO pins.
10882          */
10883         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10884                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10885
10886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10887                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10888
10889         /* Force the chip into D0. */
10890         err = tg3_set_power_state(tp, PCI_D0);
10891         if (err) {
10892                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10893                        pci_name(tp->pdev));
10894                 return err;
10895         }
10896
10897         /* 5700 B0 chips do not support checksumming correctly due
10898          * to hardware bugs.
10899          */
10900         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10901                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10902
10903         /* Derive initial jumbo mode from MTU assigned in
10904          * ether_setup() via the alloc_etherdev() call
10905          */
10906         if (tp->dev->mtu > ETH_DATA_LEN &&
10907             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10908                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10909
10910         /* Determine WakeOnLan speed to use. */
10911         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10912             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10913             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10914             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10915                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10916         } else {
10917                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10918         }
10919
10920         /* A few boards don't want Ethernet@WireSpeed phy feature */
10921         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10922             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10923              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10924              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10925             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10926             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10927                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10928
10929         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10930             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10931                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10932         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10933                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10934
10935         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10936                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10937                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10938                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10939                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10940                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10941                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10942                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10943                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10944                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10945         }
10946
10947         tp->coalesce_mode = 0;
10948         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10949             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10950                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10951
10952         /* Initialize MAC MI mode, polling disabled. */
10953         tw32_f(MAC_MI_MODE, tp->mi_mode);
10954         udelay(80);
10955
10956         /* Initialize data/descriptor byte/word swapping. */
10957         val = tr32(GRC_MODE);
10958         val &= GRC_MODE_HOST_STACKUP;
10959         tw32(GRC_MODE, val | tp->grc_mode);
10960
10961         tg3_switch_clocks(tp);
10962
10963         /* Clear this out for sanity. */
10964         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10965
10966         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10967                               &pci_state_reg);
10968         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10969             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10970                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10971
10972                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10973                     chiprevid == CHIPREV_ID_5701_B0 ||
10974                     chiprevid == CHIPREV_ID_5701_B2 ||
10975                     chiprevid == CHIPREV_ID_5701_B5) {
10976                         void __iomem *sram_base;
10977
10978                         /* Write some dummy words into the SRAM status block
10979                          * area, see if it reads back correctly.  If the return
10980                          * value is bad, force enable the PCIX workaround.
10981                          */
10982                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10983
10984                         writel(0x00000000, sram_base);
10985                         writel(0x00000000, sram_base + 4);
10986                         writel(0xffffffff, sram_base + 4);
10987                         if (readl(sram_base) != 0x00000000)
10988                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10989                 }
10990         }
10991
10992         udelay(50);
10993         tg3_nvram_init(tp);
10994
10995         grc_misc_cfg = tr32(GRC_MISC_CFG);
10996         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10997
10998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10999             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11000              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11001                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11002
11003         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11004             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11005                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11006         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11007                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11008                                       HOSTCC_MODE_CLRTICK_TXBD);
11009
11010                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11011                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11012                                        tp->misc_host_ctrl);
11013         }
11014
11015         /* these are limited to 10/100 only */
11016         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11017              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11018             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11019              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11020              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11021               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11022               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11023             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11024              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11025               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11026               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11027             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11028                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11029
11030         err = tg3_phy_probe(tp);
11031         if (err) {
11032                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11033                        pci_name(tp->pdev), err);
11034                 /* ... but do not return immediately ... */
11035         }
11036
11037         tg3_read_partno(tp);
11038         tg3_read_fw_ver(tp);
11039
11040         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11041                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11042         } else {
11043                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11044                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11045                 else
11046                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11047         }
11048
11049         /* 5700 {AX,BX} chips have a broken status block link
11050          * change bit implementation, so we must use the
11051          * status register in those cases.
11052          */
11053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11054                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11055         else
11056                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11057
11058         /* The led_ctrl is set during tg3_phy_probe, here we might
11059          * have to force the link status polling mechanism based
11060          * upon subsystem IDs.
11061          */
11062         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11063             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11064             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11065                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11066                                   TG3_FLAG_USE_LINKCHG_REG);
11067         }
11068
11069         /* For all SERDES we poll the MAC status register. */
11070         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11071                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11072         else
11073                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11074
11075         /* All chips before 5787 can get confused if TX buffers
11076          * straddle the 4GB address boundary in some cases.
11077          */
11078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11081                 tp->dev->hard_start_xmit = tg3_start_xmit;
11082         else
11083                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11084
11085         tp->rx_offset = 2;
11086         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11087             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11088                 tp->rx_offset = 0;
11089
11090         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11091
11092         /* Increment the rx prod index on the rx std ring by at most
11093          * 8 for these chips to workaround hw errata.
11094          */
11095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11098                 tp->rx_std_max_post = 8;
11099
11100         /* By default, disable wake-on-lan.  User can change this
11101          * using ETHTOOL_SWOL.
11102          */
11103         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11104
11105         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11106                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11107                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11108
11109         return err;
11110 }
11111
11112 #ifdef CONFIG_SPARC
11113 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11114 {
11115         struct net_device *dev = tp->dev;
11116         struct pci_dev *pdev = tp->pdev;
11117         struct device_node *dp = pci_device_to_OF_node(pdev);
11118         const unsigned char *addr;
11119         int len;
11120
11121         addr = of_get_property(dp, "local-mac-address", &len);
11122         if (addr && len == 6) {
11123                 memcpy(dev->dev_addr, addr, 6);
11124                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11125                 return 0;
11126         }
11127         return -ENODEV;
11128 }
11129
11130 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11131 {
11132         struct net_device *dev = tp->dev;
11133
11134         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11135         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11136         return 0;
11137 }
11138 #endif
11139
11140 static int __devinit tg3_get_device_address(struct tg3 *tp)
11141 {
11142         struct net_device *dev = tp->dev;
11143         u32 hi, lo, mac_offset;
11144         int addr_ok = 0;
11145
11146 #ifdef CONFIG_SPARC
11147         if (!tg3_get_macaddr_sparc(tp))
11148                 return 0;
11149 #endif
11150
11151         mac_offset = 0x7c;
11152         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11153             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11154                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11155                         mac_offset = 0xcc;
11156                 if (tg3_nvram_lock(tp))
11157                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11158                 else
11159                         tg3_nvram_unlock(tp);
11160         }
11161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11162                 mac_offset = 0x10;
11163
11164         /* First try to get it from MAC address mailbox. */
11165         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11166         if ((hi >> 16) == 0x484b) {
11167                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11168                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11169
11170                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11171                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11172                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11173                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11174                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11175
11176                 /* Some old bootcode may report a 0 MAC address in SRAM */
11177                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11178         }
11179         if (!addr_ok) {
11180                 /* Next, try NVRAM. */
11181                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11182                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11183                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11184                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11185                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11186                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11187                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11188                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11189                 }
11190                 /* Finally just fetch it out of the MAC control regs. */
11191                 else {
11192                         hi = tr32(MAC_ADDR_0_HIGH);
11193                         lo = tr32(MAC_ADDR_0_LOW);
11194
11195                         dev->dev_addr[5] = lo & 0xff;
11196                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11197                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11198                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11199                         dev->dev_addr[1] = hi & 0xff;
11200                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11201                 }
11202         }
11203
11204         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11205 #ifdef CONFIG_SPARC64
11206                 if (!tg3_get_default_macaddr_sparc(tp))
11207                         return 0;
11208 #endif
11209                 return -EINVAL;
11210         }
11211         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11212         return 0;
11213 }
11214
11215 #define BOUNDARY_SINGLE_CACHELINE       1
11216 #define BOUNDARY_MULTI_CACHELINE        2
11217
11218 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11219 {
11220         int cacheline_size;
11221         u8 byte;
11222         int goal;
11223
11224         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11225         if (byte == 0)
11226                 cacheline_size = 1024;
11227         else
11228                 cacheline_size = (int) byte * 4;
11229
11230         /* On 5703 and later chips, the boundary bits have no
11231          * effect.
11232          */
11233         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11234             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11235             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11236                 goto out;
11237
11238 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11239         goal = BOUNDARY_MULTI_CACHELINE;
11240 #else
11241 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11242         goal = BOUNDARY_SINGLE_CACHELINE;
11243 #else
11244         goal = 0;
11245 #endif
11246 #endif
11247
11248         if (!goal)
11249                 goto out;
11250
11251         /* PCI controllers on most RISC systems tend to disconnect
11252          * when a device tries to burst across a cache-line boundary.
11253          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11254          *
11255          * Unfortunately, for PCI-E there are only limited
11256          * write-side controls for this, and thus for reads
11257          * we will still get the disconnects.  We'll also waste
11258          * these PCI cycles for both read and write for chips
11259          * other than 5700 and 5701 which do not implement the
11260          * boundary bits.
11261          */
11262         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11263             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11264                 switch (cacheline_size) {
11265                 case 16:
11266                 case 32:
11267                 case 64:
11268                 case 128:
11269                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11270                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11271                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11272                         } else {
11273                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11274                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11275                         }
11276                         break;
11277
11278                 case 256:
11279                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11280                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11281                         break;
11282
11283                 default:
11284                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11285                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11286                         break;
11287                 };
11288         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11289                 switch (cacheline_size) {
11290                 case 16:
11291                 case 32:
11292                 case 64:
11293                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11294                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11295                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11296                                 break;
11297                         }
11298                         /* fallthrough */
11299                 case 128:
11300                 default:
11301                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11302                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11303                         break;
11304                 };
11305         } else {
11306                 switch (cacheline_size) {
11307                 case 16:
11308                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11309                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11310                                         DMA_RWCTRL_WRITE_BNDRY_16);
11311                                 break;
11312                         }
11313                         /* fallthrough */
11314                 case 32:
11315                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11316                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11317                                         DMA_RWCTRL_WRITE_BNDRY_32);
11318                                 break;
11319                         }
11320                         /* fallthrough */
11321                 case 64:
11322                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11323                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11324                                         DMA_RWCTRL_WRITE_BNDRY_64);
11325                                 break;
11326                         }
11327                         /* fallthrough */
11328                 case 128:
11329                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11330                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11331                                         DMA_RWCTRL_WRITE_BNDRY_128);
11332                                 break;
11333                         }
11334                         /* fallthrough */
11335                 case 256:
11336                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11337                                 DMA_RWCTRL_WRITE_BNDRY_256);
11338                         break;
11339                 case 512:
11340                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11341                                 DMA_RWCTRL_WRITE_BNDRY_512);
11342                         break;
11343                 case 1024:
11344                 default:
11345                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11346                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11347                         break;
11348                 };
11349         }
11350
11351 out:
11352         return val;
11353 }
11354
11355 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11356 {
11357         struct tg3_internal_buffer_desc test_desc;
11358         u32 sram_dma_descs;
11359         int i, ret;
11360
11361         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11362
11363         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11364         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11365         tw32(RDMAC_STATUS, 0);
11366         tw32(WDMAC_STATUS, 0);
11367
11368         tw32(BUFMGR_MODE, 0);
11369         tw32(FTQ_RESET, 0);
11370
11371         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11372         test_desc.addr_lo = buf_dma & 0xffffffff;
11373         test_desc.nic_mbuf = 0x00002100;
11374         test_desc.len = size;
11375
11376         /*
11377          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11378          * the *second* time the tg3 driver was getting loaded after an
11379          * initial scan.
11380          *
11381          * Broadcom tells me:
11382          *   ...the DMA engine is connected to the GRC block and a DMA
11383          *   reset may affect the GRC block in some unpredictable way...
11384          *   The behavior of resets to individual blocks has not been tested.
11385          *
11386          * Broadcom noted the GRC reset will also reset all sub-components.
11387          */
11388         if (to_device) {
11389                 test_desc.cqid_sqid = (13 << 8) | 2;
11390
11391                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11392                 udelay(40);
11393         } else {
11394                 test_desc.cqid_sqid = (16 << 8) | 7;
11395
11396                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11397                 udelay(40);
11398         }
11399         test_desc.flags = 0x00000005;
11400
11401         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11402                 u32 val;
11403
11404                 val = *(((u32 *)&test_desc) + i);
11405                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11406                                        sram_dma_descs + (i * sizeof(u32)));
11407                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11408         }
11409         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11410
11411         if (to_device) {
11412                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11413         } else {
11414                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11415         }
11416
11417         ret = -ENODEV;
11418         for (i = 0; i < 40; i++) {
11419                 u32 val;
11420
11421                 if (to_device)
11422                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11423                 else
11424                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11425                 if ((val & 0xffff) == sram_dma_descs) {
11426                         ret = 0;
11427                         break;
11428                 }
11429
11430                 udelay(100);
11431         }
11432
11433         return ret;
11434 }
11435
11436 #define TEST_BUFFER_SIZE        0x2000
11437
11438 static int __devinit tg3_test_dma(struct tg3 *tp)
11439 {
11440         dma_addr_t buf_dma;
11441         u32 *buf, saved_dma_rwctrl;
11442         int ret;
11443
11444         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11445         if (!buf) {
11446                 ret = -ENOMEM;
11447                 goto out_nofree;
11448         }
11449
11450         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11451                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11452
11453         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11454
11455         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11456                 /* DMA read watermark not used on PCIE */
11457                 tp->dma_rwctrl |= 0x00180000;
11458         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11459                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11460                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11461                         tp->dma_rwctrl |= 0x003f0000;
11462                 else
11463                         tp->dma_rwctrl |= 0x003f000f;
11464         } else {
11465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11466                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11467                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11468                         u32 read_water = 0x7;
11469
11470                         /* If the 5704 is behind the EPB bridge, we can
11471                          * do the less restrictive ONE_DMA workaround for
11472                          * better performance.
11473                          */
11474                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11475                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11476                                 tp->dma_rwctrl |= 0x8000;
11477                         else if (ccval == 0x6 || ccval == 0x7)
11478                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11479
11480                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11481                                 read_water = 4;
11482                         /* Set bit 23 to enable PCIX hw bug fix */
11483                         tp->dma_rwctrl |=
11484                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11485                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11486                                 (1 << 23);
11487                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11488                         /* 5780 always in PCIX mode */
11489                         tp->dma_rwctrl |= 0x00144000;
11490                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11491                         /* 5714 always in PCIX mode */
11492                         tp->dma_rwctrl |= 0x00148000;
11493                 } else {
11494                         tp->dma_rwctrl |= 0x001b000f;
11495                 }
11496         }
11497
11498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11499             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11500                 tp->dma_rwctrl &= 0xfffffff0;
11501
11502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11503             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11504                 /* Remove this if it causes problems for some boards. */
11505                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11506
11507                 /* On 5700/5701 chips, we need to set this bit.
11508                  * Otherwise the chip will issue cacheline transactions
11509                  * to streamable DMA memory with not all the byte
11510                  * enables turned on.  This is an error on several
11511                  * RISC PCI controllers, in particular sparc64.
11512                  *
11513                  * On 5703/5704 chips, this bit has been reassigned
11514                  * a different meaning.  In particular, it is used
11515                  * on those chips to enable a PCI-X workaround.
11516                  */
11517                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11518         }
11519
11520         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11521
11522 #if 0
11523         /* Unneeded, already done by tg3_get_invariants.  */
11524         tg3_switch_clocks(tp);
11525 #endif
11526
11527         ret = 0;
11528         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11529             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11530                 goto out;
11531
11532         /* It is best to perform DMA test with maximum write burst size
11533          * to expose the 5700/5701 write DMA bug.
11534          */
11535         saved_dma_rwctrl = tp->dma_rwctrl;
11536         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11537         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11538
11539         while (1) {
11540                 u32 *p = buf, i;
11541
11542                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11543                         p[i] = i;
11544
11545                 /* Send the buffer to the chip. */
11546                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11547                 if (ret) {
11548                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11549                         break;
11550                 }
11551
11552 #if 0
11553                 /* validate data reached card RAM correctly. */
11554                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11555                         u32 val;
11556                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11557                         if (le32_to_cpu(val) != p[i]) {
11558                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11559                                 /* ret = -ENODEV here? */
11560                         }
11561                         p[i] = 0;
11562                 }
11563 #endif
11564                 /* Now read it back. */
11565                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11566                 if (ret) {
11567                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11568
11569                         break;
11570                 }
11571
11572                 /* Verify it. */
11573                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11574                         if (p[i] == i)
11575                                 continue;
11576
11577                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11578                             DMA_RWCTRL_WRITE_BNDRY_16) {
11579                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11580                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11581                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11582                                 break;
11583                         } else {
11584                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11585                                 ret = -ENODEV;
11586                                 goto out;
11587                         }
11588                 }
11589
11590                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11591                         /* Success. */
11592                         ret = 0;
11593                         break;
11594                 }
11595         }
11596         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11597             DMA_RWCTRL_WRITE_BNDRY_16) {
11598                 static struct pci_device_id dma_wait_state_chipsets[] = {
11599                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11600                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11601                         { },
11602                 };
11603
11604                 /* DMA test passed without adjusting DMA boundary,
11605                  * now look for chipsets that are known to expose the
11606                  * DMA bug without failing the test.
11607                  */
11608                 if (pci_dev_present(dma_wait_state_chipsets)) {
11609                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11610                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11611                 }
11612                 else
11613                         /* Safe to use the calculated DMA boundary. */
11614                         tp->dma_rwctrl = saved_dma_rwctrl;
11615
11616                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11617         }
11618
11619 out:
11620         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11621 out_nofree:
11622         return ret;
11623 }
11624
11625 static void __devinit tg3_init_link_config(struct tg3 *tp)
11626 {
11627         tp->link_config.advertising =
11628                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11629                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11630                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11631                  ADVERTISED_Autoneg | ADVERTISED_MII);
11632         tp->link_config.speed = SPEED_INVALID;
11633         tp->link_config.duplex = DUPLEX_INVALID;
11634         tp->link_config.autoneg = AUTONEG_ENABLE;
11635         tp->link_config.active_speed = SPEED_INVALID;
11636         tp->link_config.active_duplex = DUPLEX_INVALID;
11637         tp->link_config.phy_is_low_power = 0;
11638         tp->link_config.orig_speed = SPEED_INVALID;
11639         tp->link_config.orig_duplex = DUPLEX_INVALID;
11640         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11641 }
11642
11643 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11644 {
11645         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11646                 tp->bufmgr_config.mbuf_read_dma_low_water =
11647                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11648                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11649                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11650                 tp->bufmgr_config.mbuf_high_water =
11651                         DEFAULT_MB_HIGH_WATER_5705;
11652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11653                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11654                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11655                         tp->bufmgr_config.mbuf_high_water =
11656                                 DEFAULT_MB_HIGH_WATER_5906;
11657                 }
11658
11659                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11660                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11661                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11662                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11663                 tp->bufmgr_config.mbuf_high_water_jumbo =
11664                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11665         } else {
11666                 tp->bufmgr_config.mbuf_read_dma_low_water =
11667                         DEFAULT_MB_RDMA_LOW_WATER;
11668                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11669                         DEFAULT_MB_MACRX_LOW_WATER;
11670                 tp->bufmgr_config.mbuf_high_water =
11671                         DEFAULT_MB_HIGH_WATER;
11672
11673                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11674                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11675                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11676                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11677                 tp->bufmgr_config.mbuf_high_water_jumbo =
11678                         DEFAULT_MB_HIGH_WATER_JUMBO;
11679         }
11680
11681         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11682         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11683 }
11684
11685 static char * __devinit tg3_phy_string(struct tg3 *tp)
11686 {
11687         switch (tp->phy_id & PHY_ID_MASK) {
11688         case PHY_ID_BCM5400:    return "5400";
11689         case PHY_ID_BCM5401:    return "5401";
11690         case PHY_ID_BCM5411:    return "5411";
11691         case PHY_ID_BCM5701:    return "5701";
11692         case PHY_ID_BCM5703:    return "5703";
11693         case PHY_ID_BCM5704:    return "5704";
11694         case PHY_ID_BCM5705:    return "5705";
11695         case PHY_ID_BCM5750:    return "5750";
11696         case PHY_ID_BCM5752:    return "5752";
11697         case PHY_ID_BCM5714:    return "5714";
11698         case PHY_ID_BCM5780:    return "5780";
11699         case PHY_ID_BCM5755:    return "5755";
11700         case PHY_ID_BCM5787:    return "5787";
11701         case PHY_ID_BCM5756:    return "5722/5756";
11702         case PHY_ID_BCM5906:    return "5906";
11703         case PHY_ID_BCM8002:    return "8002/serdes";
11704         case 0:                 return "serdes";
11705         default:                return "unknown";
11706         };
11707 }
11708
11709 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11710 {
11711         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11712                 strcpy(str, "PCI Express");
11713                 return str;
11714         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11715                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11716
11717                 strcpy(str, "PCIX:");
11718
11719                 if ((clock_ctrl == 7) ||
11720                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11721                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11722                         strcat(str, "133MHz");
11723                 else if (clock_ctrl == 0)
11724                         strcat(str, "33MHz");
11725                 else if (clock_ctrl == 2)
11726                         strcat(str, "50MHz");
11727                 else if (clock_ctrl == 4)
11728                         strcat(str, "66MHz");
11729                 else if (clock_ctrl == 6)
11730                         strcat(str, "100MHz");
11731         } else {
11732                 strcpy(str, "PCI:");
11733                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11734                         strcat(str, "66MHz");
11735                 else
11736                         strcat(str, "33MHz");
11737         }
11738         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11739                 strcat(str, ":32-bit");
11740         else
11741                 strcat(str, ":64-bit");
11742         return str;
11743 }
11744
11745 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11746 {
11747         struct pci_dev *peer;
11748         unsigned int func, devnr = tp->pdev->devfn & ~7;
11749
11750         for (func = 0; func < 8; func++) {
11751                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11752                 if (peer && peer != tp->pdev)
11753                         break;
11754                 pci_dev_put(peer);
11755         }
11756         /* 5704 can be configured in single-port mode, set peer to
11757          * tp->pdev in that case.
11758          */
11759         if (!peer) {
11760                 peer = tp->pdev;
11761                 return peer;
11762         }
11763
11764         /*
11765          * We don't need to keep the refcount elevated; there's no way
11766          * to remove one half of this device without removing the other
11767          */
11768         pci_dev_put(peer);
11769
11770         return peer;
11771 }
11772
11773 static void __devinit tg3_init_coal(struct tg3 *tp)
11774 {
11775         struct ethtool_coalesce *ec = &tp->coal;
11776
11777         memset(ec, 0, sizeof(*ec));
11778         ec->cmd = ETHTOOL_GCOALESCE;
11779         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11780         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11781         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11782         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11783         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11784         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11785         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11786         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11787         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11788
11789         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11790                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11791                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11792                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11793                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11794                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11795         }
11796
11797         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11798                 ec->rx_coalesce_usecs_irq = 0;
11799                 ec->tx_coalesce_usecs_irq = 0;
11800                 ec->stats_block_coalesce_usecs = 0;
11801         }
11802 }
11803
11804 static int __devinit tg3_init_one(struct pci_dev *pdev,
11805                                   const struct pci_device_id *ent)
11806 {
11807         static int tg3_version_printed = 0;
11808         unsigned long tg3reg_base, tg3reg_len;
11809         struct net_device *dev;
11810         struct tg3 *tp;
11811         int i, err, pm_cap;
11812         char str[40];
11813         u64 dma_mask, persist_dma_mask;
11814
11815         if (tg3_version_printed++ == 0)
11816                 printk(KERN_INFO "%s", version);
11817
11818         err = pci_enable_device(pdev);
11819         if (err) {
11820                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11821                        "aborting.\n");
11822                 return err;
11823         }
11824
11825         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11826                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11827                        "base address, aborting.\n");
11828                 err = -ENODEV;
11829                 goto err_out_disable_pdev;
11830         }
11831
11832         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11833         if (err) {
11834                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11835                        "aborting.\n");
11836                 goto err_out_disable_pdev;
11837         }
11838
11839         pci_set_master(pdev);
11840
11841         /* Find power-management capability. */
11842         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11843         if (pm_cap == 0) {
11844                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11845                        "aborting.\n");
11846                 err = -EIO;
11847                 goto err_out_free_res;
11848         }
11849
11850         tg3reg_base = pci_resource_start(pdev, 0);
11851         tg3reg_len = pci_resource_len(pdev, 0);
11852
11853         dev = alloc_etherdev(sizeof(*tp));
11854         if (!dev) {
11855                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11856                 err = -ENOMEM;
11857                 goto err_out_free_res;
11858         }
11859
11860         SET_NETDEV_DEV(dev, &pdev->dev);
11861
11862 #if TG3_VLAN_TAG_USED
11863         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11864         dev->vlan_rx_register = tg3_vlan_rx_register;
11865 #endif
11866
11867         tp = netdev_priv(dev);
11868         tp->pdev = pdev;
11869         tp->dev = dev;
11870         tp->pm_cap = pm_cap;
11871         tp->mac_mode = TG3_DEF_MAC_MODE;
11872         tp->rx_mode = TG3_DEF_RX_MODE;
11873         tp->tx_mode = TG3_DEF_TX_MODE;
11874         tp->mi_mode = MAC_MI_MODE_BASE;
11875         if (tg3_debug > 0)
11876                 tp->msg_enable = tg3_debug;
11877         else
11878                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11879
11880         /* The word/byte swap controls here control register access byte
11881          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11882          * setting below.
11883          */
11884         tp->misc_host_ctrl =
11885                 MISC_HOST_CTRL_MASK_PCI_INT |
11886                 MISC_HOST_CTRL_WORD_SWAP |
11887                 MISC_HOST_CTRL_INDIR_ACCESS |
11888                 MISC_HOST_CTRL_PCISTATE_RW;
11889
11890         /* The NONFRM (non-frame) byte/word swap controls take effect
11891          * on descriptor entries, anything which isn't packet data.
11892          *
11893          * The StrongARM chips on the board (one for tx, one for rx)
11894          * are running in big-endian mode.
11895          */
11896         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11897                         GRC_MODE_WSWAP_NONFRM_DATA);
11898 #ifdef __BIG_ENDIAN
11899         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11900 #endif
11901         spin_lock_init(&tp->lock);
11902         spin_lock_init(&tp->indirect_lock);
11903         INIT_WORK(&tp->reset_task, tg3_reset_task);
11904
11905         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11906         if (!tp->regs) {
11907                 printk(KERN_ERR PFX "Cannot map device registers, "
11908                        "aborting.\n");
11909                 err = -ENOMEM;
11910                 goto err_out_free_dev;
11911         }
11912
11913         tg3_init_link_config(tp);
11914
11915         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11916         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11917         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11918
11919         dev->open = tg3_open;
11920         dev->stop = tg3_close;
11921         dev->get_stats = tg3_get_stats;
11922         dev->set_multicast_list = tg3_set_rx_mode;
11923         dev->set_mac_address = tg3_set_mac_addr;
11924         dev->do_ioctl = tg3_ioctl;
11925         dev->tx_timeout = tg3_tx_timeout;
11926         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
11927         dev->ethtool_ops = &tg3_ethtool_ops;
11928         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11929         dev->change_mtu = tg3_change_mtu;
11930         dev->irq = pdev->irq;
11931 #ifdef CONFIG_NET_POLL_CONTROLLER
11932         dev->poll_controller = tg3_poll_controller;
11933 #endif
11934
11935         err = tg3_get_invariants(tp);
11936         if (err) {
11937                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11938                        "aborting.\n");
11939                 goto err_out_iounmap;
11940         }
11941
11942         /* The EPB bridge inside 5714, 5715, and 5780 and any
11943          * device behind the EPB cannot support DMA addresses > 40-bit.
11944          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11945          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11946          * do DMA address check in tg3_start_xmit().
11947          */
11948         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11949                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11950         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11951                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11952 #ifdef CONFIG_HIGHMEM
11953                 dma_mask = DMA_64BIT_MASK;
11954 #endif
11955         } else
11956                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11957
11958         /* Configure DMA attributes. */
11959         if (dma_mask > DMA_32BIT_MASK) {
11960                 err = pci_set_dma_mask(pdev, dma_mask);
11961                 if (!err) {
11962                         dev->features |= NETIF_F_HIGHDMA;
11963                         err = pci_set_consistent_dma_mask(pdev,
11964                                                           persist_dma_mask);
11965                         if (err < 0) {
11966                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11967                                        "DMA for consistent allocations\n");
11968                                 goto err_out_iounmap;
11969                         }
11970                 }
11971         }
11972         if (err || dma_mask == DMA_32BIT_MASK) {
11973                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11974                 if (err) {
11975                         printk(KERN_ERR PFX "No usable DMA configuration, "
11976                                "aborting.\n");
11977                         goto err_out_iounmap;
11978                 }
11979         }
11980
11981         tg3_init_bufmgr_config(tp);
11982
11983         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11984                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11985         }
11986         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11988             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11990             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11991                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11992         } else {
11993                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
11994         }
11995
11996         /* TSO is on by default on chips that support hardware TSO.
11997          * Firmware TSO on older chips gives lower performance, so it
11998          * is off by default, but can be enabled using ethtool.
11999          */
12000         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12001                 dev->features |= NETIF_F_TSO;
12002                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12003                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12004                         dev->features |= NETIF_F_TSO6;
12005         }
12006
12007
12008         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12009             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12010             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12011                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12012                 tp->rx_pending = 63;
12013         }
12014
12015         err = tg3_get_device_address(tp);
12016         if (err) {
12017                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12018                        "aborting.\n");
12019                 goto err_out_iounmap;
12020         }
12021
12022         /*
12023          * Reset chip in case UNDI or EFI driver did not shutdown
12024          * DMA self test will enable WDMAC and we'll see (spurious)
12025          * pending DMA on the PCI bus at that point.
12026          */
12027         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12028             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12029                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12030                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12031         }
12032
12033         err = tg3_test_dma(tp);
12034         if (err) {
12035                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12036                 goto err_out_iounmap;
12037         }
12038
12039         /* Tigon3 can do ipv4 only... and some chips have buggy
12040          * checksumming.
12041          */
12042         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12043                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12044                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12045                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
12046                         dev->features |= NETIF_F_IPV6_CSUM;
12047
12048                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12049         } else
12050                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12051
12052         /* flow control autonegotiation is default behavior */
12053         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12054
12055         tg3_init_coal(tp);
12056
12057         pci_set_drvdata(pdev, dev);
12058
12059         err = register_netdev(dev);
12060         if (err) {
12061                 printk(KERN_ERR PFX "Cannot register net device, "
12062                        "aborting.\n");
12063                 goto err_out_iounmap;
12064         }
12065
12066         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12067                dev->name,
12068                tp->board_part_number,
12069                tp->pci_chip_rev_id,
12070                tg3_phy_string(tp),
12071                tg3_bus_string(tp, str),
12072                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12073                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12074                  "10/100/1000Base-T")));
12075
12076         for (i = 0; i < 6; i++)
12077                 printk("%2.2x%c", dev->dev_addr[i],
12078                        i == 5 ? '\n' : ':');
12079
12080         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12081                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12082                dev->name,
12083                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12084                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12085                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12086                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12087                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12088                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12089         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12090                dev->name, tp->dma_rwctrl,
12091                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12092                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12093
12094         return 0;
12095
12096 err_out_iounmap:
12097         if (tp->regs) {
12098                 iounmap(tp->regs);
12099                 tp->regs = NULL;
12100         }
12101
12102 err_out_free_dev:
12103         free_netdev(dev);
12104
12105 err_out_free_res:
12106         pci_release_regions(pdev);
12107
12108 err_out_disable_pdev:
12109         pci_disable_device(pdev);
12110         pci_set_drvdata(pdev, NULL);
12111         return err;
12112 }
12113
12114 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12115 {
12116         struct net_device *dev = pci_get_drvdata(pdev);
12117
12118         if (dev) {
12119                 struct tg3 *tp = netdev_priv(dev);
12120
12121                 flush_scheduled_work();
12122                 unregister_netdev(dev);
12123                 if (tp->regs) {
12124                         iounmap(tp->regs);
12125                         tp->regs = NULL;
12126                 }
12127                 free_netdev(dev);
12128                 pci_release_regions(pdev);
12129                 pci_disable_device(pdev);
12130                 pci_set_drvdata(pdev, NULL);
12131         }
12132 }
12133
12134 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12135 {
12136         struct net_device *dev = pci_get_drvdata(pdev);
12137         struct tg3 *tp = netdev_priv(dev);
12138         int err;
12139
12140         /* PCI register 4 needs to be saved whether netif_running() or not.
12141          * MSI address and data need to be saved if using MSI and
12142          * netif_running().
12143          */
12144         pci_save_state(pdev);
12145
12146         if (!netif_running(dev))
12147                 return 0;
12148
12149         flush_scheduled_work();
12150         tg3_netif_stop(tp);
12151
12152         del_timer_sync(&tp->timer);
12153
12154         tg3_full_lock(tp, 1);
12155         tg3_disable_ints(tp);
12156         tg3_full_unlock(tp);
12157
12158         netif_device_detach(dev);
12159
12160         tg3_full_lock(tp, 0);
12161         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12162         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12163         tg3_full_unlock(tp);
12164
12165         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12166         if (err) {
12167                 tg3_full_lock(tp, 0);
12168
12169                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12170                 if (tg3_restart_hw(tp, 1))
12171                         goto out;
12172
12173                 tp->timer.expires = jiffies + tp->timer_offset;
12174                 add_timer(&tp->timer);
12175
12176                 netif_device_attach(dev);
12177                 tg3_netif_start(tp);
12178
12179 out:
12180                 tg3_full_unlock(tp);
12181         }
12182
12183         return err;
12184 }
12185
12186 static int tg3_resume(struct pci_dev *pdev)
12187 {
12188         struct net_device *dev = pci_get_drvdata(pdev);
12189         struct tg3 *tp = netdev_priv(dev);
12190         int err;
12191
12192         pci_restore_state(tp->pdev);
12193
12194         if (!netif_running(dev))
12195                 return 0;
12196
12197         err = tg3_set_power_state(tp, PCI_D0);
12198         if (err)
12199                 return err;
12200
12201         /* Hardware bug - MSI won't work if INTX disabled. */
12202         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12203             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12204                 pci_intx(tp->pdev, 1);
12205
12206         netif_device_attach(dev);
12207
12208         tg3_full_lock(tp, 0);
12209
12210         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12211         err = tg3_restart_hw(tp, 1);
12212         if (err)
12213                 goto out;
12214
12215         tp->timer.expires = jiffies + tp->timer_offset;
12216         add_timer(&tp->timer);
12217
12218         tg3_netif_start(tp);
12219
12220 out:
12221         tg3_full_unlock(tp);
12222
12223         return err;
12224 }
12225
12226 static struct pci_driver tg3_driver = {
12227         .name           = DRV_MODULE_NAME,
12228         .id_table       = tg3_pci_tbl,
12229         .probe          = tg3_init_one,
12230         .remove         = __devexit_p(tg3_remove_one),
12231         .suspend        = tg3_suspend,
12232         .resume         = tg3_resume
12233 };
12234
12235 static int __init tg3_init(void)
12236 {
12237         return pci_register_driver(&tg3_driver);
12238 }
12239
12240 static void __exit tg3_cleanup(void)
12241 {
12242         pci_unregister_driver(&tg3_driver);
12243 }
12244
12245 module_init(tg3_init);
12246 module_exit(tg3_cleanup);