]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Limit 5784 / 5764 to MAC LED mode
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.85"
68 #define DRV_MODULE_RELDATE      "October 18, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1110             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1111                 u32 val;
1112
1113                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1114                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1115                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1116                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1117                         udelay(40);
1118                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1119                 }
1120
1121                 /* Disable GPHY autopowerdown. */
1122                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1123                              MII_TG3_MISC_SHDW_WREN |
1124                              MII_TG3_MISC_SHDW_APD_SEL |
1125                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1126         }
1127
1128 out:
1129         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1131                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1132                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1135                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1136         }
1137         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139                 tg3_writephy(tp, 0x1c, 0x8d68);
1140         }
1141         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1142                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1143                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1144                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1145                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1146                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1147                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1148                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1149                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1150         }
1151         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1152                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1153                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1154                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1155                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1156                         tg3_writephy(tp, MII_TG3_TEST1,
1157                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1158                 } else
1159                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1160                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1161         }
1162         /* Set Extended packet length bit (bit 14) on all chips that */
1163         /* support jumbo frames */
1164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1165                 /* Cannot do read-modify-write on 5401 */
1166                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1167         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1168                 u32 phy_reg;
1169
1170                 /* Set bit 14 with read-modify-write to preserve other bits */
1171                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1172                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1173                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1174         }
1175
1176         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1177          * jumbo frames transmission.
1178          */
1179         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1180                 u32 phy_reg;
1181
1182                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1183                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1184                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1185         }
1186
1187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1188                 /* adjust output voltage */
1189                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1190         }
1191
1192         tg3_phy_toggle_automdix(tp, 1);
1193         tg3_phy_set_wirespeed(tp);
1194         return 0;
1195 }
1196
1197 static void tg3_frob_aux_power(struct tg3 *tp)
1198 {
1199         struct tg3 *tp_peer = tp;
1200
1201         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1202                 return;
1203
1204         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1205             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1206                 struct net_device *dev_peer;
1207
1208                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1209                 /* remove_one() may have been run on the peer. */
1210                 if (!dev_peer)
1211                         tp_peer = tp;
1212                 else
1213                         tp_peer = netdev_priv(dev_peer);
1214         }
1215
1216         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1217             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1219             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1221                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1222                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223                                     (GRC_LCLCTRL_GPIO_OE0 |
1224                                      GRC_LCLCTRL_GPIO_OE1 |
1225                                      GRC_LCLCTRL_GPIO_OE2 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1227                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1228                                     100);
1229                 } else {
1230                         u32 no_gpio2;
1231                         u32 grc_local_ctrl = 0;
1232
1233                         if (tp_peer != tp &&
1234                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1235                                 return;
1236
1237                         /* Workaround to prevent overdrawing Amps. */
1238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1239                             ASIC_REV_5714) {
1240                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1241                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1242                                             grc_local_ctrl, 100);
1243                         }
1244
1245                         /* On 5753 and variants, GPIO2 cannot be used. */
1246                         no_gpio2 = tp->nic_sram_data_cfg &
1247                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1248
1249                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1250                                          GRC_LCLCTRL_GPIO_OE1 |
1251                                          GRC_LCLCTRL_GPIO_OE2 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1253                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1254                         if (no_gpio2) {
1255                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1256                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1257                         }
1258                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1259                                                     grc_local_ctrl, 100);
1260
1261                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                                     grc_local_ctrl, 100);
1265
1266                         if (!no_gpio2) {
1267                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1268                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1269                                             grc_local_ctrl, 100);
1270                         }
1271                 }
1272         } else {
1273                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1274                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1275                         if (tp_peer != tp &&
1276                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1277                                 return;
1278
1279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1280                                     (GRC_LCLCTRL_GPIO_OE1 |
1281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1282
1283                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1284                                     GRC_LCLCTRL_GPIO_OE1, 100);
1285
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1287                                     (GRC_LCLCTRL_GPIO_OE1 |
1288                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1289                 }
1290         }
1291 }
1292
1293 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1294 {
1295         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1296                 return 1;
1297         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1298                 if (speed != SPEED_10)
1299                         return 1;
1300         } else if (speed == SPEED_10)
1301                 return 1;
1302
1303         return 0;
1304 }
1305
1306 static int tg3_setup_phy(struct tg3 *, int);
1307
1308 #define RESET_KIND_SHUTDOWN     0
1309 #define RESET_KIND_INIT         1
1310 #define RESET_KIND_SUSPEND      2
1311
1312 static void tg3_write_sig_post_reset(struct tg3 *, int);
1313 static int tg3_halt_cpu(struct tg3 *, u32);
1314 static int tg3_nvram_lock(struct tg3 *);
1315 static void tg3_nvram_unlock(struct tg3 *);
1316
1317 static void tg3_power_down_phy(struct tg3 *tp)
1318 {
1319         u32 val;
1320
1321         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1323                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1324                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1325
1326                         sg_dig_ctrl |=
1327                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1328                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1329                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1330                 }
1331                 return;
1332         }
1333
1334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1335                 tg3_bmcr_reset(tp);
1336                 val = tr32(GRC_MISC_CFG);
1337                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1338                 udelay(40);
1339                 return;
1340         } else {
1341                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1342                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1343                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1344         }
1345
1346         /* The PHY should not be powered down on some chips because
1347          * of bugs.
1348          */
1349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1350             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1351             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1352              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1353                 return;
1354
1355         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1356             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1357                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1358                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1359                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1360                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1361         }
1362
1363         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1364 }
1365
1366 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1367 {
1368         u32 misc_host_ctrl;
1369         u16 power_control, power_caps;
1370         int pm = tp->pm_cap;
1371
1372         /* Make sure register accesses (indirect or otherwise)
1373          * will function correctly.
1374          */
1375         pci_write_config_dword(tp->pdev,
1376                                TG3PCI_MISC_HOST_CTRL,
1377                                tp->misc_host_ctrl);
1378
1379         pci_read_config_word(tp->pdev,
1380                              pm + PCI_PM_CTRL,
1381                              &power_control);
1382         power_control |= PCI_PM_CTRL_PME_STATUS;
1383         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1384         switch (state) {
1385         case PCI_D0:
1386                 power_control |= 0;
1387                 pci_write_config_word(tp->pdev,
1388                                       pm + PCI_PM_CTRL,
1389                                       power_control);
1390                 udelay(100);    /* Delay after power state change */
1391
1392                 /* Switch out of Vaux if it is a NIC */
1393                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1394                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1395
1396                 return 0;
1397
1398         case PCI_D1:
1399                 power_control |= 1;
1400                 break;
1401
1402         case PCI_D2:
1403                 power_control |= 2;
1404                 break;
1405
1406         case PCI_D3hot:
1407                 power_control |= 3;
1408                 break;
1409
1410         default:
1411                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1412                        "requested.\n",
1413                        tp->dev->name, state);
1414                 return -EINVAL;
1415         };
1416
1417         power_control |= PCI_PM_CTRL_PME_ENABLE;
1418
1419         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1420         tw32(TG3PCI_MISC_HOST_CTRL,
1421              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1422
1423         if (tp->link_config.phy_is_low_power == 0) {
1424                 tp->link_config.phy_is_low_power = 1;
1425                 tp->link_config.orig_speed = tp->link_config.speed;
1426                 tp->link_config.orig_duplex = tp->link_config.duplex;
1427                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1428         }
1429
1430         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1431                 tp->link_config.speed = SPEED_10;
1432                 tp->link_config.duplex = DUPLEX_HALF;
1433                 tp->link_config.autoneg = AUTONEG_ENABLE;
1434                 tg3_setup_phy(tp, 0);
1435         }
1436
1437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1438                 u32 val;
1439
1440                 val = tr32(GRC_VCPU_EXT_CTRL);
1441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1442         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1443                 int i;
1444                 u32 val;
1445
1446                 for (i = 0; i < 200; i++) {
1447                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1448                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1449                                 break;
1450                         msleep(1);
1451                 }
1452         }
1453         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1454                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1455                                                      WOL_DRV_STATE_SHUTDOWN |
1456                                                      WOL_DRV_WOL |
1457                                                      WOL_SET_MAGIC_PKT);
1458
1459         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1460
1461         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1462                 u32 mac_mode;
1463
1464                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1465                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1466                         udelay(40);
1467
1468                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1469                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1470                         else
1471                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1472
1473                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1474                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1475                             ASIC_REV_5700) {
1476                                 u32 speed = (tp->tg3_flags &
1477                                              TG3_FLAG_WOL_SPEED_100MB) ?
1478                                              SPEED_100 : SPEED_10;
1479                                 if (tg3_5700_link_polarity(tp, speed))
1480                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1481                                 else
1482                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1483                         }
1484                 } else {
1485                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1486                 }
1487
1488                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1489                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1490
1491                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1492                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1493                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1494
1495                 tw32_f(MAC_MODE, mac_mode);
1496                 udelay(100);
1497
1498                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1499                 udelay(10);
1500         }
1501
1502         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1504              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1505                 u32 base_val;
1506
1507                 base_val = tp->pci_clock_ctrl;
1508                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1509                              CLOCK_CTRL_TXCLK_DISABLE);
1510
1511                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1512                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1513         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1514                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1515                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1516                 /* do nothing */
1517         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1518                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1519                 u32 newbits1, newbits2;
1520
1521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1522                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1523                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1524                                     CLOCK_CTRL_TXCLK_DISABLE |
1525                                     CLOCK_CTRL_ALTCLK);
1526                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1527                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1528                         newbits1 = CLOCK_CTRL_625_CORE;
1529                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1530                 } else {
1531                         newbits1 = CLOCK_CTRL_ALTCLK;
1532                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1533                 }
1534
1535                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1536                             40);
1537
1538                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1539                             40);
1540
1541                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1542                         u32 newbits3;
1543
1544                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1545                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1546                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1547                                             CLOCK_CTRL_TXCLK_DISABLE |
1548                                             CLOCK_CTRL_44MHZ_CORE);
1549                         } else {
1550                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1551                         }
1552
1553                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1554                                     tp->pci_clock_ctrl | newbits3, 40);
1555                 }
1556         }
1557
1558         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1559             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1560             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1561                 tg3_power_down_phy(tp);
1562
1563         tg3_frob_aux_power(tp);
1564
1565         /* Workaround for unstable PLL clock */
1566         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1567             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1568                 u32 val = tr32(0x7d00);
1569
1570                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1571                 tw32(0x7d00, val);
1572                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1573                         int err;
1574
1575                         err = tg3_nvram_lock(tp);
1576                         tg3_halt_cpu(tp, RX_CPU_BASE);
1577                         if (!err)
1578                                 tg3_nvram_unlock(tp);
1579                 }
1580         }
1581
1582         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1583
1584         /* Finally, set the new power state. */
1585         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1586         udelay(100);    /* Delay after power state change */
1587
1588         return 0;
1589 }
1590
1591 static void tg3_link_report(struct tg3 *tp)
1592 {
1593         if (!netif_carrier_ok(tp->dev)) {
1594                 if (netif_msg_link(tp))
1595                         printk(KERN_INFO PFX "%s: Link is down.\n",
1596                                tp->dev->name);
1597         } else if (netif_msg_link(tp)) {
1598                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1599                        tp->dev->name,
1600                        (tp->link_config.active_speed == SPEED_1000 ?
1601                         1000 :
1602                         (tp->link_config.active_speed == SPEED_100 ?
1603                          100 : 10)),
1604                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1605                         "full" : "half"));
1606
1607                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1608                        "%s for RX.\n",
1609                        tp->dev->name,
1610                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1611                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1612         }
1613 }
1614
1615 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1616 {
1617         u32 new_tg3_flags = 0;
1618         u32 old_rx_mode = tp->rx_mode;
1619         u32 old_tx_mode = tp->tx_mode;
1620
1621         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1622
1623                 /* Convert 1000BaseX flow control bits to 1000BaseT
1624                  * bits before resolving flow control.
1625                  */
1626                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1627                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1628                                        ADVERTISE_PAUSE_ASYM);
1629                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1630
1631                         if (local_adv & ADVERTISE_1000XPAUSE)
1632                                 local_adv |= ADVERTISE_PAUSE_CAP;
1633                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1634                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1635                         if (remote_adv & LPA_1000XPAUSE)
1636                                 remote_adv |= LPA_PAUSE_CAP;
1637                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1638                                 remote_adv |= LPA_PAUSE_ASYM;
1639                 }
1640
1641                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1642                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1643                                 if (remote_adv & LPA_PAUSE_CAP)
1644                                         new_tg3_flags |=
1645                                                 (TG3_FLAG_RX_PAUSE |
1646                                                 TG3_FLAG_TX_PAUSE);
1647                                 else if (remote_adv & LPA_PAUSE_ASYM)
1648                                         new_tg3_flags |=
1649                                                 (TG3_FLAG_RX_PAUSE);
1650                         } else {
1651                                 if (remote_adv & LPA_PAUSE_CAP)
1652                                         new_tg3_flags |=
1653                                                 (TG3_FLAG_RX_PAUSE |
1654                                                 TG3_FLAG_TX_PAUSE);
1655                         }
1656                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1657                         if ((remote_adv & LPA_PAUSE_CAP) &&
1658                         (remote_adv & LPA_PAUSE_ASYM))
1659                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1660                 }
1661
1662                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1663                 tp->tg3_flags |= new_tg3_flags;
1664         } else {
1665                 new_tg3_flags = tp->tg3_flags;
1666         }
1667
1668         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1669                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1670         else
1671                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1672
1673         if (old_rx_mode != tp->rx_mode) {
1674                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1675         }
1676
1677         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1678                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1679         else
1680                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1681
1682         if (old_tx_mode != tp->tx_mode) {
1683                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1684         }
1685 }
1686
1687 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1688 {
1689         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1690         case MII_TG3_AUX_STAT_10HALF:
1691                 *speed = SPEED_10;
1692                 *duplex = DUPLEX_HALF;
1693                 break;
1694
1695         case MII_TG3_AUX_STAT_10FULL:
1696                 *speed = SPEED_10;
1697                 *duplex = DUPLEX_FULL;
1698                 break;
1699
1700         case MII_TG3_AUX_STAT_100HALF:
1701                 *speed = SPEED_100;
1702                 *duplex = DUPLEX_HALF;
1703                 break;
1704
1705         case MII_TG3_AUX_STAT_100FULL:
1706                 *speed = SPEED_100;
1707                 *duplex = DUPLEX_FULL;
1708                 break;
1709
1710         case MII_TG3_AUX_STAT_1000HALF:
1711                 *speed = SPEED_1000;
1712                 *duplex = DUPLEX_HALF;
1713                 break;
1714
1715         case MII_TG3_AUX_STAT_1000FULL:
1716                 *speed = SPEED_1000;
1717                 *duplex = DUPLEX_FULL;
1718                 break;
1719
1720         default:
1721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1722                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1723                                  SPEED_10;
1724                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1725                                   DUPLEX_HALF;
1726                         break;
1727                 }
1728                 *speed = SPEED_INVALID;
1729                 *duplex = DUPLEX_INVALID;
1730                 break;
1731         };
1732 }
1733
1734 static void tg3_phy_copper_begin(struct tg3 *tp)
1735 {
1736         u32 new_adv;
1737         int i;
1738
1739         if (tp->link_config.phy_is_low_power) {
1740                 /* Entering low power mode.  Disable gigabit and
1741                  * 100baseT advertisements.
1742                  */
1743                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1744
1745                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1746                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1747                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1748                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1749
1750                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1751         } else if (tp->link_config.speed == SPEED_INVALID) {
1752                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1753                         tp->link_config.advertising &=
1754                                 ~(ADVERTISED_1000baseT_Half |
1755                                   ADVERTISED_1000baseT_Full);
1756
1757                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1758                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1759                         new_adv |= ADVERTISE_10HALF;
1760                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1761                         new_adv |= ADVERTISE_10FULL;
1762                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1763                         new_adv |= ADVERTISE_100HALF;
1764                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1765                         new_adv |= ADVERTISE_100FULL;
1766                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1767
1768                 if (tp->link_config.advertising &
1769                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1770                         new_adv = 0;
1771                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1772                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1773                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1774                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1775                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1776                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1777                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1778                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1779                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1780                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1781                 } else {
1782                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1783                 }
1784         } else {
1785                 /* Asking for a specific link mode. */
1786                 if (tp->link_config.speed == SPEED_1000) {
1787                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1788                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1789
1790                         if (tp->link_config.duplex == DUPLEX_FULL)
1791                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1792                         else
1793                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1794                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1795                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1796                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1797                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1798                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1799                 } else {
1800                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1801
1802                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1803                         if (tp->link_config.speed == SPEED_100) {
1804                                 if (tp->link_config.duplex == DUPLEX_FULL)
1805                                         new_adv |= ADVERTISE_100FULL;
1806                                 else
1807                                         new_adv |= ADVERTISE_100HALF;
1808                         } else {
1809                                 if (tp->link_config.duplex == DUPLEX_FULL)
1810                                         new_adv |= ADVERTISE_10FULL;
1811                                 else
1812                                         new_adv |= ADVERTISE_10HALF;
1813                         }
1814                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1815                 }
1816         }
1817
1818         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1819             tp->link_config.speed != SPEED_INVALID) {
1820                 u32 bmcr, orig_bmcr;
1821
1822                 tp->link_config.active_speed = tp->link_config.speed;
1823                 tp->link_config.active_duplex = tp->link_config.duplex;
1824
1825                 bmcr = 0;
1826                 switch (tp->link_config.speed) {
1827                 default:
1828                 case SPEED_10:
1829                         break;
1830
1831                 case SPEED_100:
1832                         bmcr |= BMCR_SPEED100;
1833                         break;
1834
1835                 case SPEED_1000:
1836                         bmcr |= TG3_BMCR_SPEED1000;
1837                         break;
1838                 };
1839
1840                 if (tp->link_config.duplex == DUPLEX_FULL)
1841                         bmcr |= BMCR_FULLDPLX;
1842
1843                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1844                     (bmcr != orig_bmcr)) {
1845                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1846                         for (i = 0; i < 1500; i++) {
1847                                 u32 tmp;
1848
1849                                 udelay(10);
1850                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1851                                     tg3_readphy(tp, MII_BMSR, &tmp))
1852                                         continue;
1853                                 if (!(tmp & BMSR_LSTATUS)) {
1854                                         udelay(40);
1855                                         break;
1856                                 }
1857                         }
1858                         tg3_writephy(tp, MII_BMCR, bmcr);
1859                         udelay(40);
1860                 }
1861         } else {
1862                 tg3_writephy(tp, MII_BMCR,
1863                              BMCR_ANENABLE | BMCR_ANRESTART);
1864         }
1865 }
1866
1867 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1868 {
1869         int err;
1870
1871         /* Turn off tap power management. */
1872         /* Set Extended packet length bit */
1873         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1874
1875         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1876         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1877
1878         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1879         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1880
1881         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1882         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1883
1884         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1885         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1886
1887         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1888         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1889
1890         udelay(40);
1891
1892         return err;
1893 }
1894
1895 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1896 {
1897         u32 adv_reg, all_mask = 0;
1898
1899         if (mask & ADVERTISED_10baseT_Half)
1900                 all_mask |= ADVERTISE_10HALF;
1901         if (mask & ADVERTISED_10baseT_Full)
1902                 all_mask |= ADVERTISE_10FULL;
1903         if (mask & ADVERTISED_100baseT_Half)
1904                 all_mask |= ADVERTISE_100HALF;
1905         if (mask & ADVERTISED_100baseT_Full)
1906                 all_mask |= ADVERTISE_100FULL;
1907
1908         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1909                 return 0;
1910
1911         if ((adv_reg & all_mask) != all_mask)
1912                 return 0;
1913         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1914                 u32 tg3_ctrl;
1915
1916                 all_mask = 0;
1917                 if (mask & ADVERTISED_1000baseT_Half)
1918                         all_mask |= ADVERTISE_1000HALF;
1919                 if (mask & ADVERTISED_1000baseT_Full)
1920                         all_mask |= ADVERTISE_1000FULL;
1921
1922                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1923                         return 0;
1924
1925                 if ((tg3_ctrl & all_mask) != all_mask)
1926                         return 0;
1927         }
1928         return 1;
1929 }
1930
1931 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1932 {
1933         int current_link_up;
1934         u32 bmsr, dummy;
1935         u16 current_speed;
1936         u8 current_duplex;
1937         int i, err;
1938
1939         tw32(MAC_EVENT, 0);
1940
1941         tw32_f(MAC_STATUS,
1942              (MAC_STATUS_SYNC_CHANGED |
1943               MAC_STATUS_CFG_CHANGED |
1944               MAC_STATUS_MI_COMPLETION |
1945               MAC_STATUS_LNKSTATE_CHANGED));
1946         udelay(40);
1947
1948         tp->mi_mode = MAC_MI_MODE_BASE;
1949         tw32_f(MAC_MI_MODE, tp->mi_mode);
1950         udelay(80);
1951
1952         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1953
1954         /* Some third-party PHYs need to be reset on link going
1955          * down.
1956          */
1957         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1958              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1959              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1960             netif_carrier_ok(tp->dev)) {
1961                 tg3_readphy(tp, MII_BMSR, &bmsr);
1962                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1963                     !(bmsr & BMSR_LSTATUS))
1964                         force_reset = 1;
1965         }
1966         if (force_reset)
1967                 tg3_phy_reset(tp);
1968
1969         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1970                 tg3_readphy(tp, MII_BMSR, &bmsr);
1971                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1972                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1973                         bmsr = 0;
1974
1975                 if (!(bmsr & BMSR_LSTATUS)) {
1976                         err = tg3_init_5401phy_dsp(tp);
1977                         if (err)
1978                                 return err;
1979
1980                         tg3_readphy(tp, MII_BMSR, &bmsr);
1981                         for (i = 0; i < 1000; i++) {
1982                                 udelay(10);
1983                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1984                                     (bmsr & BMSR_LSTATUS)) {
1985                                         udelay(40);
1986                                         break;
1987                                 }
1988                         }
1989
1990                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1991                             !(bmsr & BMSR_LSTATUS) &&
1992                             tp->link_config.active_speed == SPEED_1000) {
1993                                 err = tg3_phy_reset(tp);
1994                                 if (!err)
1995                                         err = tg3_init_5401phy_dsp(tp);
1996                                 if (err)
1997                                         return err;
1998                         }
1999                 }
2000         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2001                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2002                 /* 5701 {A0,B0} CRC bug workaround */
2003                 tg3_writephy(tp, 0x15, 0x0a75);
2004                 tg3_writephy(tp, 0x1c, 0x8c68);
2005                 tg3_writephy(tp, 0x1c, 0x8d68);
2006                 tg3_writephy(tp, 0x1c, 0x8c68);
2007         }
2008
2009         /* Clear pending interrupts... */
2010         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2011         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2012
2013         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2014                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2015         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2016                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2017
2018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2020                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2021                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2022                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2023                 else
2024                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2025         }
2026
2027         current_link_up = 0;
2028         current_speed = SPEED_INVALID;
2029         current_duplex = DUPLEX_INVALID;
2030
2031         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2032                 u32 val;
2033
2034                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2035                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2036                 if (!(val & (1 << 10))) {
2037                         val |= (1 << 10);
2038                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2039                         goto relink;
2040                 }
2041         }
2042
2043         bmsr = 0;
2044         for (i = 0; i < 100; i++) {
2045                 tg3_readphy(tp, MII_BMSR, &bmsr);
2046                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2047                     (bmsr & BMSR_LSTATUS))
2048                         break;
2049                 udelay(40);
2050         }
2051
2052         if (bmsr & BMSR_LSTATUS) {
2053                 u32 aux_stat, bmcr;
2054
2055                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2056                 for (i = 0; i < 2000; i++) {
2057                         udelay(10);
2058                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2059                             aux_stat)
2060                                 break;
2061                 }
2062
2063                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2064                                              &current_speed,
2065                                              &current_duplex);
2066
2067                 bmcr = 0;
2068                 for (i = 0; i < 200; i++) {
2069                         tg3_readphy(tp, MII_BMCR, &bmcr);
2070                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2071                                 continue;
2072                         if (bmcr && bmcr != 0x7fff)
2073                                 break;
2074                         udelay(10);
2075                 }
2076
2077                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2078                         if (bmcr & BMCR_ANENABLE) {
2079                                 current_link_up = 1;
2080
2081                                 /* Force autoneg restart if we are exiting
2082                                  * low power mode.
2083                                  */
2084                                 if (!tg3_copper_is_advertising_all(tp,
2085                                                 tp->link_config.advertising))
2086                                         current_link_up = 0;
2087                         } else {
2088                                 current_link_up = 0;
2089                         }
2090                 } else {
2091                         if (!(bmcr & BMCR_ANENABLE) &&
2092                             tp->link_config.speed == current_speed &&
2093                             tp->link_config.duplex == current_duplex) {
2094                                 current_link_up = 1;
2095                         } else {
2096                                 current_link_up = 0;
2097                         }
2098                 }
2099
2100                 tp->link_config.active_speed = current_speed;
2101                 tp->link_config.active_duplex = current_duplex;
2102         }
2103
2104         if (current_link_up == 1 &&
2105             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2106             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2107                 u32 local_adv, remote_adv;
2108
2109                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2110                         local_adv = 0;
2111                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2112
2113                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2114                         remote_adv = 0;
2115
2116                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2117
2118                 /* If we are not advertising full pause capability,
2119                  * something is wrong.  Bring the link down and reconfigure.
2120                  */
2121                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2122                         current_link_up = 0;
2123                 } else {
2124                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2125                 }
2126         }
2127 relink:
2128         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2129                 u32 tmp;
2130
2131                 tg3_phy_copper_begin(tp);
2132
2133                 tg3_readphy(tp, MII_BMSR, &tmp);
2134                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2135                     (tmp & BMSR_LSTATUS))
2136                         current_link_up = 1;
2137         }
2138
2139         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2140         if (current_link_up == 1) {
2141                 if (tp->link_config.active_speed == SPEED_100 ||
2142                     tp->link_config.active_speed == SPEED_10)
2143                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2144                 else
2145                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2146         } else
2147                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2148
2149         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2150         if (tp->link_config.active_duplex == DUPLEX_HALF)
2151                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2154                 if (current_link_up == 1 &&
2155                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2156                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2157                 else
2158                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2159         }
2160
2161         /* ??? Without this setting Netgear GA302T PHY does not
2162          * ??? send/receive packets...
2163          */
2164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2165             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2166                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2167                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2168                 udelay(80);
2169         }
2170
2171         tw32_f(MAC_MODE, tp->mac_mode);
2172         udelay(40);
2173
2174         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2175                 /* Polled via timer. */
2176                 tw32_f(MAC_EVENT, 0);
2177         } else {
2178                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2179         }
2180         udelay(40);
2181
2182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2183             current_link_up == 1 &&
2184             tp->link_config.active_speed == SPEED_1000 &&
2185             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2186              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2187                 udelay(120);
2188                 tw32_f(MAC_STATUS,
2189                      (MAC_STATUS_SYNC_CHANGED |
2190                       MAC_STATUS_CFG_CHANGED));
2191                 udelay(40);
2192                 tg3_write_mem(tp,
2193                               NIC_SRAM_FIRMWARE_MBOX,
2194                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2195         }
2196
2197         if (current_link_up != netif_carrier_ok(tp->dev)) {
2198                 if (current_link_up)
2199                         netif_carrier_on(tp->dev);
2200                 else
2201                         netif_carrier_off(tp->dev);
2202                 tg3_link_report(tp);
2203         }
2204
2205         return 0;
2206 }
2207
2208 struct tg3_fiber_aneginfo {
2209         int state;
2210 #define ANEG_STATE_UNKNOWN              0
2211 #define ANEG_STATE_AN_ENABLE            1
2212 #define ANEG_STATE_RESTART_INIT         2
2213 #define ANEG_STATE_RESTART              3
2214 #define ANEG_STATE_DISABLE_LINK_OK      4
2215 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2216 #define ANEG_STATE_ABILITY_DETECT       6
2217 #define ANEG_STATE_ACK_DETECT_INIT      7
2218 #define ANEG_STATE_ACK_DETECT           8
2219 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2220 #define ANEG_STATE_COMPLETE_ACK         10
2221 #define ANEG_STATE_IDLE_DETECT_INIT     11
2222 #define ANEG_STATE_IDLE_DETECT          12
2223 #define ANEG_STATE_LINK_OK              13
2224 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2225 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2226
2227         u32 flags;
2228 #define MR_AN_ENABLE            0x00000001
2229 #define MR_RESTART_AN           0x00000002
2230 #define MR_AN_COMPLETE          0x00000004
2231 #define MR_PAGE_RX              0x00000008
2232 #define MR_NP_LOADED            0x00000010
2233 #define MR_TOGGLE_TX            0x00000020
2234 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2235 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2236 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2237 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2238 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2239 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2240 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2241 #define MR_TOGGLE_RX            0x00002000
2242 #define MR_NP_RX                0x00004000
2243
2244 #define MR_LINK_OK              0x80000000
2245
2246         unsigned long link_time, cur_time;
2247
2248         u32 ability_match_cfg;
2249         int ability_match_count;
2250
2251         char ability_match, idle_match, ack_match;
2252
2253         u32 txconfig, rxconfig;
2254 #define ANEG_CFG_NP             0x00000080
2255 #define ANEG_CFG_ACK            0x00000040
2256 #define ANEG_CFG_RF2            0x00000020
2257 #define ANEG_CFG_RF1            0x00000010
2258 #define ANEG_CFG_PS2            0x00000001
2259 #define ANEG_CFG_PS1            0x00008000
2260 #define ANEG_CFG_HD             0x00004000
2261 #define ANEG_CFG_FD             0x00002000
2262 #define ANEG_CFG_INVAL          0x00001f06
2263
2264 };
2265 #define ANEG_OK         0
2266 #define ANEG_DONE       1
2267 #define ANEG_TIMER_ENAB 2
2268 #define ANEG_FAILED     -1
2269
2270 #define ANEG_STATE_SETTLE_TIME  10000
2271
2272 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2273                                    struct tg3_fiber_aneginfo *ap)
2274 {
2275         unsigned long delta;
2276         u32 rx_cfg_reg;
2277         int ret;
2278
2279         if (ap->state == ANEG_STATE_UNKNOWN) {
2280                 ap->rxconfig = 0;
2281                 ap->link_time = 0;
2282                 ap->cur_time = 0;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->idle_match = 0;
2287                 ap->ack_match = 0;
2288         }
2289         ap->cur_time++;
2290
2291         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2292                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2293
2294                 if (rx_cfg_reg != ap->ability_match_cfg) {
2295                         ap->ability_match_cfg = rx_cfg_reg;
2296                         ap->ability_match = 0;
2297                         ap->ability_match_count = 0;
2298                 } else {
2299                         if (++ap->ability_match_count > 1) {
2300                                 ap->ability_match = 1;
2301                                 ap->ability_match_cfg = rx_cfg_reg;
2302                         }
2303                 }
2304                 if (rx_cfg_reg & ANEG_CFG_ACK)
2305                         ap->ack_match = 1;
2306                 else
2307                         ap->ack_match = 0;
2308
2309                 ap->idle_match = 0;
2310         } else {
2311                 ap->idle_match = 1;
2312                 ap->ability_match_cfg = 0;
2313                 ap->ability_match_count = 0;
2314                 ap->ability_match = 0;
2315                 ap->ack_match = 0;
2316
2317                 rx_cfg_reg = 0;
2318         }
2319
2320         ap->rxconfig = rx_cfg_reg;
2321         ret = ANEG_OK;
2322
2323         switch(ap->state) {
2324         case ANEG_STATE_UNKNOWN:
2325                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2326                         ap->state = ANEG_STATE_AN_ENABLE;
2327
2328                 /* fallthru */
2329         case ANEG_STATE_AN_ENABLE:
2330                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2331                 if (ap->flags & MR_AN_ENABLE) {
2332                         ap->link_time = 0;
2333                         ap->cur_time = 0;
2334                         ap->ability_match_cfg = 0;
2335                         ap->ability_match_count = 0;
2336                         ap->ability_match = 0;
2337                         ap->idle_match = 0;
2338                         ap->ack_match = 0;
2339
2340                         ap->state = ANEG_STATE_RESTART_INIT;
2341                 } else {
2342                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2343                 }
2344                 break;
2345
2346         case ANEG_STATE_RESTART_INIT:
2347                 ap->link_time = ap->cur_time;
2348                 ap->flags &= ~(MR_NP_LOADED);
2349                 ap->txconfig = 0;
2350                 tw32(MAC_TX_AUTO_NEG, 0);
2351                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2352                 tw32_f(MAC_MODE, tp->mac_mode);
2353                 udelay(40);
2354
2355                 ret = ANEG_TIMER_ENAB;
2356                 ap->state = ANEG_STATE_RESTART;
2357
2358                 /* fallthru */
2359         case ANEG_STATE_RESTART:
2360                 delta = ap->cur_time - ap->link_time;
2361                 if (delta > ANEG_STATE_SETTLE_TIME) {
2362                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2363                 } else {
2364                         ret = ANEG_TIMER_ENAB;
2365                 }
2366                 break;
2367
2368         case ANEG_STATE_DISABLE_LINK_OK:
2369                 ret = ANEG_DONE;
2370                 break;
2371
2372         case ANEG_STATE_ABILITY_DETECT_INIT:
2373                 ap->flags &= ~(MR_TOGGLE_TX);
2374                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2375                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2376                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2377                 tw32_f(MAC_MODE, tp->mac_mode);
2378                 udelay(40);
2379
2380                 ap->state = ANEG_STATE_ABILITY_DETECT;
2381                 break;
2382
2383         case ANEG_STATE_ABILITY_DETECT:
2384                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2385                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2386                 }
2387                 break;
2388
2389         case ANEG_STATE_ACK_DETECT_INIT:
2390                 ap->txconfig |= ANEG_CFG_ACK;
2391                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2392                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2393                 tw32_f(MAC_MODE, tp->mac_mode);
2394                 udelay(40);
2395
2396                 ap->state = ANEG_STATE_ACK_DETECT;
2397
2398                 /* fallthru */
2399         case ANEG_STATE_ACK_DETECT:
2400                 if (ap->ack_match != 0) {
2401                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2402                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2403                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2404                         } else {
2405                                 ap->state = ANEG_STATE_AN_ENABLE;
2406                         }
2407                 } else if (ap->ability_match != 0 &&
2408                            ap->rxconfig == 0) {
2409                         ap->state = ANEG_STATE_AN_ENABLE;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_COMPLETE_ACK_INIT:
2414                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2415                         ret = ANEG_FAILED;
2416                         break;
2417                 }
2418                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2419                                MR_LP_ADV_HALF_DUPLEX |
2420                                MR_LP_ADV_SYM_PAUSE |
2421                                MR_LP_ADV_ASYM_PAUSE |
2422                                MR_LP_ADV_REMOTE_FAULT1 |
2423                                MR_LP_ADV_REMOTE_FAULT2 |
2424                                MR_LP_ADV_NEXT_PAGE |
2425                                MR_TOGGLE_RX |
2426                                MR_NP_RX);
2427                 if (ap->rxconfig & ANEG_CFG_FD)
2428                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2429                 if (ap->rxconfig & ANEG_CFG_HD)
2430                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2431                 if (ap->rxconfig & ANEG_CFG_PS1)
2432                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2433                 if (ap->rxconfig & ANEG_CFG_PS2)
2434                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2435                 if (ap->rxconfig & ANEG_CFG_RF1)
2436                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2437                 if (ap->rxconfig & ANEG_CFG_RF2)
2438                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2439                 if (ap->rxconfig & ANEG_CFG_NP)
2440                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2441
2442                 ap->link_time = ap->cur_time;
2443
2444                 ap->flags ^= (MR_TOGGLE_TX);
2445                 if (ap->rxconfig & 0x0008)
2446                         ap->flags |= MR_TOGGLE_RX;
2447                 if (ap->rxconfig & ANEG_CFG_NP)
2448                         ap->flags |= MR_NP_RX;
2449                 ap->flags |= MR_PAGE_RX;
2450
2451                 ap->state = ANEG_STATE_COMPLETE_ACK;
2452                 ret = ANEG_TIMER_ENAB;
2453                 break;
2454
2455         case ANEG_STATE_COMPLETE_ACK:
2456                 if (ap->ability_match != 0 &&
2457                     ap->rxconfig == 0) {
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459                         break;
2460                 }
2461                 delta = ap->cur_time - ap->link_time;
2462                 if (delta > ANEG_STATE_SETTLE_TIME) {
2463                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2464                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2465                         } else {
2466                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2467                                     !(ap->flags & MR_NP_RX)) {
2468                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2469                                 } else {
2470                                         ret = ANEG_FAILED;
2471                                 }
2472                         }
2473                 }
2474                 break;
2475
2476         case ANEG_STATE_IDLE_DETECT_INIT:
2477                 ap->link_time = ap->cur_time;
2478                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2479                 tw32_f(MAC_MODE, tp->mac_mode);
2480                 udelay(40);
2481
2482                 ap->state = ANEG_STATE_IDLE_DETECT;
2483                 ret = ANEG_TIMER_ENAB;
2484                 break;
2485
2486         case ANEG_STATE_IDLE_DETECT:
2487                 if (ap->ability_match != 0 &&
2488                     ap->rxconfig == 0) {
2489                         ap->state = ANEG_STATE_AN_ENABLE;
2490                         break;
2491                 }
2492                 delta = ap->cur_time - ap->link_time;
2493                 if (delta > ANEG_STATE_SETTLE_TIME) {
2494                         /* XXX another gem from the Broadcom driver :( */
2495                         ap->state = ANEG_STATE_LINK_OK;
2496                 }
2497                 break;
2498
2499         case ANEG_STATE_LINK_OK:
2500                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2501                 ret = ANEG_DONE;
2502                 break;
2503
2504         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2505                 /* ??? unimplemented */
2506                 break;
2507
2508         case ANEG_STATE_NEXT_PAGE_WAIT:
2509                 /* ??? unimplemented */
2510                 break;
2511
2512         default:
2513                 ret = ANEG_FAILED;
2514                 break;
2515         };
2516
2517         return ret;
2518 }
2519
2520 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2521 {
2522         int res = 0;
2523         struct tg3_fiber_aneginfo aninfo;
2524         int status = ANEG_FAILED;
2525         unsigned int tick;
2526         u32 tmp;
2527
2528         tw32_f(MAC_TX_AUTO_NEG, 0);
2529
2530         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2531         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2532         udelay(40);
2533
2534         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2535         udelay(40);
2536
2537         memset(&aninfo, 0, sizeof(aninfo));
2538         aninfo.flags |= MR_AN_ENABLE;
2539         aninfo.state = ANEG_STATE_UNKNOWN;
2540         aninfo.cur_time = 0;
2541         tick = 0;
2542         while (++tick < 195000) {
2543                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2544                 if (status == ANEG_DONE || status == ANEG_FAILED)
2545                         break;
2546
2547                 udelay(1);
2548         }
2549
2550         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2551         tw32_f(MAC_MODE, tp->mac_mode);
2552         udelay(40);
2553
2554         *flags = aninfo.flags;
2555
2556         if (status == ANEG_DONE &&
2557             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2558                              MR_LP_ADV_FULL_DUPLEX)))
2559                 res = 1;
2560
2561         return res;
2562 }
2563
2564 static void tg3_init_bcm8002(struct tg3 *tp)
2565 {
2566         u32 mac_status = tr32(MAC_STATUS);
2567         int i;
2568
2569         /* Reset when initting first time or we have a link. */
2570         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2571             !(mac_status & MAC_STATUS_PCS_SYNCED))
2572                 return;
2573
2574         /* Set PLL lock range. */
2575         tg3_writephy(tp, 0x16, 0x8007);
2576
2577         /* SW reset */
2578         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2579
2580         /* Wait for reset to complete. */
2581         /* XXX schedule_timeout() ... */
2582         for (i = 0; i < 500; i++)
2583                 udelay(10);
2584
2585         /* Config mode; select PMA/Ch 1 regs. */
2586         tg3_writephy(tp, 0x10, 0x8411);
2587
2588         /* Enable auto-lock and comdet, select txclk for tx. */
2589         tg3_writephy(tp, 0x11, 0x0a10);
2590
2591         tg3_writephy(tp, 0x18, 0x00a0);
2592         tg3_writephy(tp, 0x16, 0x41ff);
2593
2594         /* Assert and deassert POR. */
2595         tg3_writephy(tp, 0x13, 0x0400);
2596         udelay(40);
2597         tg3_writephy(tp, 0x13, 0x0000);
2598
2599         tg3_writephy(tp, 0x11, 0x0a50);
2600         udelay(40);
2601         tg3_writephy(tp, 0x11, 0x0a10);
2602
2603         /* Wait for signal to stabilize */
2604         /* XXX schedule_timeout() ... */
2605         for (i = 0; i < 15000; i++)
2606                 udelay(10);
2607
2608         /* Deselect the channel register so we can read the PHYID
2609          * later.
2610          */
2611         tg3_writephy(tp, 0x10, 0x8011);
2612 }
2613
2614 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2615 {
2616         u32 sg_dig_ctrl, sg_dig_status;
2617         u32 serdes_cfg, expected_sg_dig_ctrl;
2618         int workaround, port_a;
2619         int current_link_up;
2620
2621         serdes_cfg = 0;
2622         expected_sg_dig_ctrl = 0;
2623         workaround = 0;
2624         port_a = 1;
2625         current_link_up = 0;
2626
2627         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2628             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2629                 workaround = 1;
2630                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2631                         port_a = 0;
2632
2633                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2634                 /* preserve bits 20-23 for voltage regulator */
2635                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2636         }
2637
2638         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2639
2640         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2641                 if (sg_dig_ctrl & (1 << 31)) {
2642                         if (workaround) {
2643                                 u32 val = serdes_cfg;
2644
2645                                 if (port_a)
2646                                         val |= 0xc010000;
2647                                 else
2648                                         val |= 0x4010000;
2649                                 tw32_f(MAC_SERDES_CFG, val);
2650                         }
2651                         tw32_f(SG_DIG_CTRL, 0x01388400);
2652                 }
2653                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2654                         tg3_setup_flow_control(tp, 0, 0);
2655                         current_link_up = 1;
2656                 }
2657                 goto out;
2658         }
2659
2660         /* Want auto-negotiation.  */
2661         expected_sg_dig_ctrl = 0x81388400;
2662
2663         /* Pause capability */
2664         expected_sg_dig_ctrl |= (1 << 11);
2665
2666         /* Asymettric pause */
2667         expected_sg_dig_ctrl |= (1 << 12);
2668
2669         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2670                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2671                     tp->serdes_counter &&
2672                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2673                                     MAC_STATUS_RCVD_CFG)) ==
2674                      MAC_STATUS_PCS_SYNCED)) {
2675                         tp->serdes_counter--;
2676                         current_link_up = 1;
2677                         goto out;
2678                 }
2679 restart_autoneg:
2680                 if (workaround)
2681                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2682                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2683                 udelay(5);
2684                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2685
2686                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2687                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2688         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2689                                  MAC_STATUS_SIGNAL_DET)) {
2690                 sg_dig_status = tr32(SG_DIG_STATUS);
2691                 mac_status = tr32(MAC_STATUS);
2692
2693                 if ((sg_dig_status & (1 << 1)) &&
2694                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2695                         u32 local_adv, remote_adv;
2696
2697                         local_adv = ADVERTISE_PAUSE_CAP;
2698                         remote_adv = 0;
2699                         if (sg_dig_status & (1 << 19))
2700                                 remote_adv |= LPA_PAUSE_CAP;
2701                         if (sg_dig_status & (1 << 20))
2702                                 remote_adv |= LPA_PAUSE_ASYM;
2703
2704                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2705                         current_link_up = 1;
2706                         tp->serdes_counter = 0;
2707                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2708                 } else if (!(sg_dig_status & (1 << 1))) {
2709                         if (tp->serdes_counter)
2710                                 tp->serdes_counter--;
2711                         else {
2712                                 if (workaround) {
2713                                         u32 val = serdes_cfg;
2714
2715                                         if (port_a)
2716                                                 val |= 0xc010000;
2717                                         else
2718                                                 val |= 0x4010000;
2719
2720                                         tw32_f(MAC_SERDES_CFG, val);
2721                                 }
2722
2723                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2724                                 udelay(40);
2725
2726                                 /* Link parallel detection - link is up */
2727                                 /* only if we have PCS_SYNC and not */
2728                                 /* receiving config code words */
2729                                 mac_status = tr32(MAC_STATUS);
2730                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2731                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2732                                         tg3_setup_flow_control(tp, 0, 0);
2733                                         current_link_up = 1;
2734                                         tp->tg3_flags2 |=
2735                                                 TG3_FLG2_PARALLEL_DETECT;
2736                                         tp->serdes_counter =
2737                                                 SERDES_PARALLEL_DET_TIMEOUT;
2738                                 } else
2739                                         goto restart_autoneg;
2740                         }
2741                 }
2742         } else {
2743                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2744                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2745         }
2746
2747 out:
2748         return current_link_up;
2749 }
2750
2751 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2752 {
2753         int current_link_up = 0;
2754
2755         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2756                 goto out;
2757
2758         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2759                 u32 flags;
2760                 int i;
2761
2762                 if (fiber_autoneg(tp, &flags)) {
2763                         u32 local_adv, remote_adv;
2764
2765                         local_adv = ADVERTISE_PAUSE_CAP;
2766                         remote_adv = 0;
2767                         if (flags & MR_LP_ADV_SYM_PAUSE)
2768                                 remote_adv |= LPA_PAUSE_CAP;
2769                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2770                                 remote_adv |= LPA_PAUSE_ASYM;
2771
2772                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2773
2774                         current_link_up = 1;
2775                 }
2776                 for (i = 0; i < 30; i++) {
2777                         udelay(20);
2778                         tw32_f(MAC_STATUS,
2779                                (MAC_STATUS_SYNC_CHANGED |
2780                                 MAC_STATUS_CFG_CHANGED));
2781                         udelay(40);
2782                         if ((tr32(MAC_STATUS) &
2783                              (MAC_STATUS_SYNC_CHANGED |
2784                               MAC_STATUS_CFG_CHANGED)) == 0)
2785                                 break;
2786                 }
2787
2788                 mac_status = tr32(MAC_STATUS);
2789                 if (current_link_up == 0 &&
2790                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2791                     !(mac_status & MAC_STATUS_RCVD_CFG))
2792                         current_link_up = 1;
2793         } else {
2794                 /* Forcing 1000FD link up. */
2795                 current_link_up = 1;
2796
2797                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2798                 udelay(40);
2799
2800                 tw32_f(MAC_MODE, tp->mac_mode);
2801                 udelay(40);
2802         }
2803
2804 out:
2805         return current_link_up;
2806 }
2807
2808 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2809 {
2810         u32 orig_pause_cfg;
2811         u16 orig_active_speed;
2812         u8 orig_active_duplex;
2813         u32 mac_status;
2814         int current_link_up;
2815         int i;
2816
2817         orig_pause_cfg =
2818                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2819                                   TG3_FLAG_TX_PAUSE));
2820         orig_active_speed = tp->link_config.active_speed;
2821         orig_active_duplex = tp->link_config.active_duplex;
2822
2823         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2824             netif_carrier_ok(tp->dev) &&
2825             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2826                 mac_status = tr32(MAC_STATUS);
2827                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2828                                MAC_STATUS_SIGNAL_DET |
2829                                MAC_STATUS_CFG_CHANGED |
2830                                MAC_STATUS_RCVD_CFG);
2831                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2832                                    MAC_STATUS_SIGNAL_DET)) {
2833                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2834                                             MAC_STATUS_CFG_CHANGED));
2835                         return 0;
2836                 }
2837         }
2838
2839         tw32_f(MAC_TX_AUTO_NEG, 0);
2840
2841         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2842         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2843         tw32_f(MAC_MODE, tp->mac_mode);
2844         udelay(40);
2845
2846         if (tp->phy_id == PHY_ID_BCM8002)
2847                 tg3_init_bcm8002(tp);
2848
2849         /* Enable link change event even when serdes polling.  */
2850         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2851         udelay(40);
2852
2853         current_link_up = 0;
2854         mac_status = tr32(MAC_STATUS);
2855
2856         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2857                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2858         else
2859                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2860
2861         tp->hw_status->status =
2862                 (SD_STATUS_UPDATED |
2863                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2864
2865         for (i = 0; i < 100; i++) {
2866                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2867                                     MAC_STATUS_CFG_CHANGED));
2868                 udelay(5);
2869                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2870                                          MAC_STATUS_CFG_CHANGED |
2871                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2872                         break;
2873         }
2874
2875         mac_status = tr32(MAC_STATUS);
2876         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2877                 current_link_up = 0;
2878                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2879                     tp->serdes_counter == 0) {
2880                         tw32_f(MAC_MODE, (tp->mac_mode |
2881                                           MAC_MODE_SEND_CONFIGS));
2882                         udelay(1);
2883                         tw32_f(MAC_MODE, tp->mac_mode);
2884                 }
2885         }
2886
2887         if (current_link_up == 1) {
2888                 tp->link_config.active_speed = SPEED_1000;
2889                 tp->link_config.active_duplex = DUPLEX_FULL;
2890                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2891                                     LED_CTRL_LNKLED_OVERRIDE |
2892                                     LED_CTRL_1000MBPS_ON));
2893         } else {
2894                 tp->link_config.active_speed = SPEED_INVALID;
2895                 tp->link_config.active_duplex = DUPLEX_INVALID;
2896                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2897                                     LED_CTRL_LNKLED_OVERRIDE |
2898                                     LED_CTRL_TRAFFIC_OVERRIDE));
2899         }
2900
2901         if (current_link_up != netif_carrier_ok(tp->dev)) {
2902                 if (current_link_up)
2903                         netif_carrier_on(tp->dev);
2904                 else
2905                         netif_carrier_off(tp->dev);
2906                 tg3_link_report(tp);
2907         } else {
2908                 u32 now_pause_cfg =
2909                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2910                                          TG3_FLAG_TX_PAUSE);
2911                 if (orig_pause_cfg != now_pause_cfg ||
2912                     orig_active_speed != tp->link_config.active_speed ||
2913                     orig_active_duplex != tp->link_config.active_duplex)
2914                         tg3_link_report(tp);
2915         }
2916
2917         return 0;
2918 }
2919
2920 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2921 {
2922         int current_link_up, err = 0;
2923         u32 bmsr, bmcr;
2924         u16 current_speed;
2925         u8 current_duplex;
2926
2927         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2928         tw32_f(MAC_MODE, tp->mac_mode);
2929         udelay(40);
2930
2931         tw32(MAC_EVENT, 0);
2932
2933         tw32_f(MAC_STATUS,
2934              (MAC_STATUS_SYNC_CHANGED |
2935               MAC_STATUS_CFG_CHANGED |
2936               MAC_STATUS_MI_COMPLETION |
2937               MAC_STATUS_LNKSTATE_CHANGED));
2938         udelay(40);
2939
2940         if (force_reset)
2941                 tg3_phy_reset(tp);
2942
2943         current_link_up = 0;
2944         current_speed = SPEED_INVALID;
2945         current_duplex = DUPLEX_INVALID;
2946
2947         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2948         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2950                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2951                         bmsr |= BMSR_LSTATUS;
2952                 else
2953                         bmsr &= ~BMSR_LSTATUS;
2954         }
2955
2956         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2957
2958         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2959             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2960                 /* do nothing, just check for link up at the end */
2961         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2962                 u32 adv, new_adv;
2963
2964                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2965                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2966                                   ADVERTISE_1000XPAUSE |
2967                                   ADVERTISE_1000XPSE_ASYM |
2968                                   ADVERTISE_SLCT);
2969
2970                 /* Always advertise symmetric PAUSE just like copper */
2971                 new_adv |= ADVERTISE_1000XPAUSE;
2972
2973                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2974                         new_adv |= ADVERTISE_1000XHALF;
2975                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2976                         new_adv |= ADVERTISE_1000XFULL;
2977
2978                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2979                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2980                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2981                         tg3_writephy(tp, MII_BMCR, bmcr);
2982
2983                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2984                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2985                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2986
2987                         return err;
2988                 }
2989         } else {
2990                 u32 new_bmcr;
2991
2992                 bmcr &= ~BMCR_SPEED1000;
2993                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2994
2995                 if (tp->link_config.duplex == DUPLEX_FULL)
2996                         new_bmcr |= BMCR_FULLDPLX;
2997
2998                 if (new_bmcr != bmcr) {
2999                         /* BMCR_SPEED1000 is a reserved bit that needs
3000                          * to be set on write.
3001                          */
3002                         new_bmcr |= BMCR_SPEED1000;
3003
3004                         /* Force a linkdown */
3005                         if (netif_carrier_ok(tp->dev)) {
3006                                 u32 adv;
3007
3008                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3009                                 adv &= ~(ADVERTISE_1000XFULL |
3010                                          ADVERTISE_1000XHALF |
3011                                          ADVERTISE_SLCT);
3012                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3013                                 tg3_writephy(tp, MII_BMCR, bmcr |
3014                                                            BMCR_ANRESTART |
3015                                                            BMCR_ANENABLE);
3016                                 udelay(10);
3017                                 netif_carrier_off(tp->dev);
3018                         }
3019                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3020                         bmcr = new_bmcr;
3021                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3022                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3023                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3024                             ASIC_REV_5714) {
3025                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3026                                         bmsr |= BMSR_LSTATUS;
3027                                 else
3028                                         bmsr &= ~BMSR_LSTATUS;
3029                         }
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031                 }
3032         }
3033
3034         if (bmsr & BMSR_LSTATUS) {
3035                 current_speed = SPEED_1000;
3036                 current_link_up = 1;
3037                 if (bmcr & BMCR_FULLDPLX)
3038                         current_duplex = DUPLEX_FULL;
3039                 else
3040                         current_duplex = DUPLEX_HALF;
3041
3042                 if (bmcr & BMCR_ANENABLE) {
3043                         u32 local_adv, remote_adv, common;
3044
3045                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3046                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3047                         common = local_adv & remote_adv;
3048                         if (common & (ADVERTISE_1000XHALF |
3049                                       ADVERTISE_1000XFULL)) {
3050                                 if (common & ADVERTISE_1000XFULL)
3051                                         current_duplex = DUPLEX_FULL;
3052                                 else
3053                                         current_duplex = DUPLEX_HALF;
3054
3055                                 tg3_setup_flow_control(tp, local_adv,
3056                                                        remote_adv);
3057                         }
3058                         else
3059                                 current_link_up = 0;
3060                 }
3061         }
3062
3063         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3064         if (tp->link_config.active_duplex == DUPLEX_HALF)
3065                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3066
3067         tw32_f(MAC_MODE, tp->mac_mode);
3068         udelay(40);
3069
3070         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3071
3072         tp->link_config.active_speed = current_speed;
3073         tp->link_config.active_duplex = current_duplex;
3074
3075         if (current_link_up != netif_carrier_ok(tp->dev)) {
3076                 if (current_link_up)
3077                         netif_carrier_on(tp->dev);
3078                 else {
3079                         netif_carrier_off(tp->dev);
3080                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3081                 }
3082                 tg3_link_report(tp);
3083         }
3084         return err;
3085 }
3086
3087 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3088 {
3089         if (tp->serdes_counter) {
3090                 /* Give autoneg time to complete. */
3091                 tp->serdes_counter--;
3092                 return;
3093         }
3094         if (!netif_carrier_ok(tp->dev) &&
3095             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3096                 u32 bmcr;
3097
3098                 tg3_readphy(tp, MII_BMCR, &bmcr);
3099                 if (bmcr & BMCR_ANENABLE) {
3100                         u32 phy1, phy2;
3101
3102                         /* Select shadow register 0x1f */
3103                         tg3_writephy(tp, 0x1c, 0x7c00);
3104                         tg3_readphy(tp, 0x1c, &phy1);
3105
3106                         /* Select expansion interrupt status register */
3107                         tg3_writephy(tp, 0x17, 0x0f01);
3108                         tg3_readphy(tp, 0x15, &phy2);
3109                         tg3_readphy(tp, 0x15, &phy2);
3110
3111                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3112                                 /* We have signal detect and not receiving
3113                                  * config code words, link is up by parallel
3114                                  * detection.
3115                                  */
3116
3117                                 bmcr &= ~BMCR_ANENABLE;
3118                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3119                                 tg3_writephy(tp, MII_BMCR, bmcr);
3120                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3121                         }
3122                 }
3123         }
3124         else if (netif_carrier_ok(tp->dev) &&
3125                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3126                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3127                 u32 phy2;
3128
3129                 /* Select expansion interrupt status register */
3130                 tg3_writephy(tp, 0x17, 0x0f01);
3131                 tg3_readphy(tp, 0x15, &phy2);
3132                 if (phy2 & 0x20) {
3133                         u32 bmcr;
3134
3135                         /* Config code words received, turn on autoneg. */
3136                         tg3_readphy(tp, MII_BMCR, &bmcr);
3137                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3138
3139                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3140
3141                 }
3142         }
3143 }
3144
3145 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3146 {
3147         int err;
3148
3149         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3150                 err = tg3_setup_fiber_phy(tp, force_reset);
3151         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3152                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3153         } else {
3154                 err = tg3_setup_copper_phy(tp, force_reset);
3155         }
3156
3157         if (tp->link_config.active_speed == SPEED_1000 &&
3158             tp->link_config.active_duplex == DUPLEX_HALF)
3159                 tw32(MAC_TX_LENGTHS,
3160                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3161                       (6 << TX_LENGTHS_IPG_SHIFT) |
3162                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3163         else
3164                 tw32(MAC_TX_LENGTHS,
3165                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3166                       (6 << TX_LENGTHS_IPG_SHIFT) |
3167                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3168
3169         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3170                 if (netif_carrier_ok(tp->dev)) {
3171                         tw32(HOSTCC_STAT_COAL_TICKS,
3172                              tp->coal.stats_block_coalesce_usecs);
3173                 } else {
3174                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3175                 }
3176         }
3177
3178         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3179                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3180                 if (!netif_carrier_ok(tp->dev))
3181                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3182                               tp->pwrmgmt_thresh;
3183                 else
3184                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3185                 tw32(PCIE_PWR_MGMT_THRESH, val);
3186         }
3187
3188         return err;
3189 }
3190
3191 /* This is called whenever we suspect that the system chipset is re-
3192  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3193  * is bogus tx completions. We try to recover by setting the
3194  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3195  * in the workqueue.
3196  */
3197 static void tg3_tx_recover(struct tg3 *tp)
3198 {
3199         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3200                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3201
3202         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3203                "mapped I/O cycles to the network device, attempting to "
3204                "recover. Please report the problem to the driver maintainer "
3205                "and include system chipset information.\n", tp->dev->name);
3206
3207         spin_lock(&tp->lock);
3208         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3209         spin_unlock(&tp->lock);
3210 }
3211
3212 static inline u32 tg3_tx_avail(struct tg3 *tp)
3213 {
3214         smp_mb();
3215         return (tp->tx_pending -
3216                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3217 }
3218
3219 /* Tigon3 never reports partial packet sends.  So we do not
3220  * need special logic to handle SKBs that have not had all
3221  * of their frags sent yet, like SunGEM does.
3222  */
3223 static void tg3_tx(struct tg3 *tp)
3224 {
3225         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3226         u32 sw_idx = tp->tx_cons;
3227
3228         while (sw_idx != hw_idx) {
3229                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3230                 struct sk_buff *skb = ri->skb;
3231                 int i, tx_bug = 0;
3232
3233                 if (unlikely(skb == NULL)) {
3234                         tg3_tx_recover(tp);
3235                         return;
3236                 }
3237
3238                 pci_unmap_single(tp->pdev,
3239                                  pci_unmap_addr(ri, mapping),
3240                                  skb_headlen(skb),
3241                                  PCI_DMA_TODEVICE);
3242
3243                 ri->skb = NULL;
3244
3245                 sw_idx = NEXT_TX(sw_idx);
3246
3247                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3248                         ri = &tp->tx_buffers[sw_idx];
3249                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3250                                 tx_bug = 1;
3251
3252                         pci_unmap_page(tp->pdev,
3253                                        pci_unmap_addr(ri, mapping),
3254                                        skb_shinfo(skb)->frags[i].size,
3255                                        PCI_DMA_TODEVICE);
3256
3257                         sw_idx = NEXT_TX(sw_idx);
3258                 }
3259
3260                 dev_kfree_skb(skb);
3261
3262                 if (unlikely(tx_bug)) {
3263                         tg3_tx_recover(tp);
3264                         return;
3265                 }
3266         }
3267
3268         tp->tx_cons = sw_idx;
3269
3270         /* Need to make the tx_cons update visible to tg3_start_xmit()
3271          * before checking for netif_queue_stopped().  Without the
3272          * memory barrier, there is a small possibility that tg3_start_xmit()
3273          * will miss it and cause the queue to be stopped forever.
3274          */
3275         smp_mb();
3276
3277         if (unlikely(netif_queue_stopped(tp->dev) &&
3278                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3279                 netif_tx_lock(tp->dev);
3280                 if (netif_queue_stopped(tp->dev) &&
3281                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3282                         netif_wake_queue(tp->dev);
3283                 netif_tx_unlock(tp->dev);
3284         }
3285 }
3286
3287 /* Returns size of skb allocated or < 0 on error.
3288  *
3289  * We only need to fill in the address because the other members
3290  * of the RX descriptor are invariant, see tg3_init_rings.
3291  *
3292  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3293  * posting buffers we only dirty the first cache line of the RX
3294  * descriptor (containing the address).  Whereas for the RX status
3295  * buffers the cpu only reads the last cacheline of the RX descriptor
3296  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3297  */
3298 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3299                             int src_idx, u32 dest_idx_unmasked)
3300 {
3301         struct tg3_rx_buffer_desc *desc;
3302         struct ring_info *map, *src_map;
3303         struct sk_buff *skb;
3304         dma_addr_t mapping;
3305         int skb_size, dest_idx;
3306
3307         src_map = NULL;
3308         switch (opaque_key) {
3309         case RXD_OPAQUE_RING_STD:
3310                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3311                 desc = &tp->rx_std[dest_idx];
3312                 map = &tp->rx_std_buffers[dest_idx];
3313                 if (src_idx >= 0)
3314                         src_map = &tp->rx_std_buffers[src_idx];
3315                 skb_size = tp->rx_pkt_buf_sz;
3316                 break;
3317
3318         case RXD_OPAQUE_RING_JUMBO:
3319                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3320                 desc = &tp->rx_jumbo[dest_idx];
3321                 map = &tp->rx_jumbo_buffers[dest_idx];
3322                 if (src_idx >= 0)
3323                         src_map = &tp->rx_jumbo_buffers[src_idx];
3324                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3325                 break;
3326
3327         default:
3328                 return -EINVAL;
3329         };
3330
3331         /* Do not overwrite any of the map or rp information
3332          * until we are sure we can commit to a new buffer.
3333          *
3334          * Callers depend upon this behavior and assume that
3335          * we leave everything unchanged if we fail.
3336          */
3337         skb = netdev_alloc_skb(tp->dev, skb_size);
3338         if (skb == NULL)
3339                 return -ENOMEM;
3340
3341         skb_reserve(skb, tp->rx_offset);
3342
3343         mapping = pci_map_single(tp->pdev, skb->data,
3344                                  skb_size - tp->rx_offset,
3345                                  PCI_DMA_FROMDEVICE);
3346
3347         map->skb = skb;
3348         pci_unmap_addr_set(map, mapping, mapping);
3349
3350         if (src_map != NULL)
3351                 src_map->skb = NULL;
3352
3353         desc->addr_hi = ((u64)mapping >> 32);
3354         desc->addr_lo = ((u64)mapping & 0xffffffff);
3355
3356         return skb_size;
3357 }
3358
3359 /* We only need to move over in the address because the other
3360  * members of the RX descriptor are invariant.  See notes above
3361  * tg3_alloc_rx_skb for full details.
3362  */
3363 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3364                            int src_idx, u32 dest_idx_unmasked)
3365 {
3366         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3367         struct ring_info *src_map, *dest_map;
3368         int dest_idx;
3369
3370         switch (opaque_key) {
3371         case RXD_OPAQUE_RING_STD:
3372                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3373                 dest_desc = &tp->rx_std[dest_idx];
3374                 dest_map = &tp->rx_std_buffers[dest_idx];
3375                 src_desc = &tp->rx_std[src_idx];
3376                 src_map = &tp->rx_std_buffers[src_idx];
3377                 break;
3378
3379         case RXD_OPAQUE_RING_JUMBO:
3380                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3381                 dest_desc = &tp->rx_jumbo[dest_idx];
3382                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3383                 src_desc = &tp->rx_jumbo[src_idx];
3384                 src_map = &tp->rx_jumbo_buffers[src_idx];
3385                 break;
3386
3387         default:
3388                 return;
3389         };
3390
3391         dest_map->skb = src_map->skb;
3392         pci_unmap_addr_set(dest_map, mapping,
3393                            pci_unmap_addr(src_map, mapping));
3394         dest_desc->addr_hi = src_desc->addr_hi;
3395         dest_desc->addr_lo = src_desc->addr_lo;
3396
3397         src_map->skb = NULL;
3398 }
3399
3400 #if TG3_VLAN_TAG_USED
3401 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3402 {
3403         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3404 }
3405 #endif
3406
3407 /* The RX ring scheme is composed of multiple rings which post fresh
3408  * buffers to the chip, and one special ring the chip uses to report
3409  * status back to the host.
3410  *
3411  * The special ring reports the status of received packets to the
3412  * host.  The chip does not write into the original descriptor the
3413  * RX buffer was obtained from.  The chip simply takes the original
3414  * descriptor as provided by the host, updates the status and length
3415  * field, then writes this into the next status ring entry.
3416  *
3417  * Each ring the host uses to post buffers to the chip is described
3418  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3419  * it is first placed into the on-chip ram.  When the packet's length
3420  * is known, it walks down the TG3_BDINFO entries to select the ring.
3421  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3422  * which is within the range of the new packet's length is chosen.
3423  *
3424  * The "separate ring for rx status" scheme may sound queer, but it makes
3425  * sense from a cache coherency perspective.  If only the host writes
3426  * to the buffer post rings, and only the chip writes to the rx status
3427  * rings, then cache lines never move beyond shared-modified state.
3428  * If both the host and chip were to write into the same ring, cache line
3429  * eviction could occur since both entities want it in an exclusive state.
3430  */
3431 static int tg3_rx(struct tg3 *tp, int budget)
3432 {
3433         u32 work_mask, rx_std_posted = 0;
3434         u32 sw_idx = tp->rx_rcb_ptr;
3435         u16 hw_idx;
3436         int received;
3437
3438         hw_idx = tp->hw_status->idx[0].rx_producer;
3439         /*
3440          * We need to order the read of hw_idx and the read of
3441          * the opaque cookie.
3442          */
3443         rmb();
3444         work_mask = 0;
3445         received = 0;
3446         while (sw_idx != hw_idx && budget > 0) {
3447                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3448                 unsigned int len;
3449                 struct sk_buff *skb;
3450                 dma_addr_t dma_addr;
3451                 u32 opaque_key, desc_idx, *post_ptr;
3452
3453                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3454                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3455                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3456                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3457                                                   mapping);
3458                         skb = tp->rx_std_buffers[desc_idx].skb;
3459                         post_ptr = &tp->rx_std_ptr;
3460                         rx_std_posted++;
3461                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3462                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3463                                                   mapping);
3464                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3465                         post_ptr = &tp->rx_jumbo_ptr;
3466                 }
3467                 else {
3468                         goto next_pkt_nopost;
3469                 }
3470
3471                 work_mask |= opaque_key;
3472
3473                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3474                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3475                 drop_it:
3476                         tg3_recycle_rx(tp, opaque_key,
3477                                        desc_idx, *post_ptr);
3478                 drop_it_no_recycle:
3479                         /* Other statistics kept track of by card. */
3480                         tp->net_stats.rx_dropped++;
3481                         goto next_pkt;
3482                 }
3483
3484                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3485
3486                 if (len > RX_COPY_THRESHOLD
3487                         && tp->rx_offset == 2
3488                         /* rx_offset != 2 iff this is a 5701 card running
3489                          * in PCI-X mode [see tg3_get_invariants()] */
3490                 ) {
3491                         int skb_size;
3492
3493                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3494                                                     desc_idx, *post_ptr);
3495                         if (skb_size < 0)
3496                                 goto drop_it;
3497
3498                         pci_unmap_single(tp->pdev, dma_addr,
3499                                          skb_size - tp->rx_offset,
3500                                          PCI_DMA_FROMDEVICE);
3501
3502                         skb_put(skb, len);
3503                 } else {
3504                         struct sk_buff *copy_skb;
3505
3506                         tg3_recycle_rx(tp, opaque_key,
3507                                        desc_idx, *post_ptr);
3508
3509                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3510                         if (copy_skb == NULL)
3511                                 goto drop_it_no_recycle;
3512
3513                         skb_reserve(copy_skb, 2);
3514                         skb_put(copy_skb, len);
3515                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3516                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3517                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3518
3519                         /* We'll reuse the original ring buffer. */
3520                         skb = copy_skb;
3521                 }
3522
3523                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3524                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3525                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3526                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3527                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3528                 else
3529                         skb->ip_summed = CHECKSUM_NONE;
3530
3531                 skb->protocol = eth_type_trans(skb, tp->dev);
3532 #if TG3_VLAN_TAG_USED
3533                 if (tp->vlgrp != NULL &&
3534                     desc->type_flags & RXD_FLAG_VLAN) {
3535                         tg3_vlan_rx(tp, skb,
3536                                     desc->err_vlan & RXD_VLAN_MASK);
3537                 } else
3538 #endif
3539                         netif_receive_skb(skb);
3540
3541                 tp->dev->last_rx = jiffies;
3542                 received++;
3543                 budget--;
3544
3545 next_pkt:
3546                 (*post_ptr)++;
3547
3548                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3549                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3550
3551                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3552                                      TG3_64BIT_REG_LOW, idx);
3553                         work_mask &= ~RXD_OPAQUE_RING_STD;
3554                         rx_std_posted = 0;
3555                 }
3556 next_pkt_nopost:
3557                 sw_idx++;
3558                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3559
3560                 /* Refresh hw_idx to see if there is new work */
3561                 if (sw_idx == hw_idx) {
3562                         hw_idx = tp->hw_status->idx[0].rx_producer;
3563                         rmb();
3564                 }
3565         }
3566
3567         /* ACK the status ring. */
3568         tp->rx_rcb_ptr = sw_idx;
3569         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3570
3571         /* Refill RX ring(s). */
3572         if (work_mask & RXD_OPAQUE_RING_STD) {
3573                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3574                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3575                              sw_idx);
3576         }
3577         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3578                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3579                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3580                              sw_idx);
3581         }
3582         mmiowb();
3583
3584         return received;
3585 }
3586
3587 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3588 {
3589         struct tg3_hw_status *sblk = tp->hw_status;
3590
3591         /* handle link change and other phy events */
3592         if (!(tp->tg3_flags &
3593               (TG3_FLAG_USE_LINKCHG_REG |
3594                TG3_FLAG_POLL_SERDES))) {
3595                 if (sblk->status & SD_STATUS_LINK_CHG) {
3596                         sblk->status = SD_STATUS_UPDATED |
3597                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3598                         spin_lock(&tp->lock);
3599                         tg3_setup_phy(tp, 0);
3600                         spin_unlock(&tp->lock);
3601                 }
3602         }
3603
3604         /* run TX completion thread */
3605         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3606                 tg3_tx(tp);
3607                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3608                         return work_done;
3609         }
3610
3611         /* run RX thread, within the bounds set by NAPI.
3612          * All RX "locking" is done by ensuring outside
3613          * code synchronizes with tg3->napi.poll()
3614          */
3615         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3616                 work_done += tg3_rx(tp, budget - work_done);
3617
3618         return work_done;
3619 }
3620
3621 static int tg3_poll(struct napi_struct *napi, int budget)
3622 {
3623         struct tg3 *tp = container_of(napi, struct tg3, napi);
3624         int work_done = 0;
3625         struct tg3_hw_status *sblk = tp->hw_status;
3626
3627         while (1) {
3628                 work_done = tg3_poll_work(tp, work_done, budget);
3629
3630                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3631                         goto tx_recovery;
3632
3633                 if (unlikely(work_done >= budget))
3634                         break;
3635
3636                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3637                         /* tp->last_tag is used in tg3_restart_ints() below
3638                          * to tell the hw how much work has been processed,
3639                          * so we must read it before checking for more work.
3640                          */
3641                         tp->last_tag = sblk->status_tag;
3642                         rmb();
3643                 } else
3644                         sblk->status &= ~SD_STATUS_UPDATED;
3645
3646                 if (likely(!tg3_has_work(tp))) {
3647                         netif_rx_complete(tp->dev, napi);
3648                         tg3_restart_ints(tp);
3649                         break;
3650                 }
3651         }
3652
3653         return work_done;
3654
3655 tx_recovery:
3656         /* work_done is guaranteed to be less than budget. */
3657         netif_rx_complete(tp->dev, napi);
3658         schedule_work(&tp->reset_task);
3659         return work_done;
3660 }
3661
3662 static void tg3_irq_quiesce(struct tg3 *tp)
3663 {
3664         BUG_ON(tp->irq_sync);
3665
3666         tp->irq_sync = 1;
3667         smp_mb();
3668
3669         synchronize_irq(tp->pdev->irq);
3670 }
3671
3672 static inline int tg3_irq_sync(struct tg3 *tp)
3673 {
3674         return tp->irq_sync;
3675 }
3676
3677 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3678  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3679  * with as well.  Most of the time, this is not necessary except when
3680  * shutting down the device.
3681  */
3682 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3683 {
3684         spin_lock_bh(&tp->lock);
3685         if (irq_sync)
3686                 tg3_irq_quiesce(tp);
3687 }
3688
3689 static inline void tg3_full_unlock(struct tg3 *tp)
3690 {
3691         spin_unlock_bh(&tp->lock);
3692 }
3693
3694 /* One-shot MSI handler - Chip automatically disables interrupt
3695  * after sending MSI so driver doesn't have to do it.
3696  */
3697 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3698 {
3699         struct net_device *dev = dev_id;
3700         struct tg3 *tp = netdev_priv(dev);
3701
3702         prefetch(tp->hw_status);
3703         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3704
3705         if (likely(!tg3_irq_sync(tp)))
3706                 netif_rx_schedule(dev, &tp->napi);
3707
3708         return IRQ_HANDLED;
3709 }
3710
3711 /* MSI ISR - No need to check for interrupt sharing and no need to
3712  * flush status block and interrupt mailbox. PCI ordering rules
3713  * guarantee that MSI will arrive after the status block.
3714  */
3715 static irqreturn_t tg3_msi(int irq, void *dev_id)
3716 {
3717         struct net_device *dev = dev_id;
3718         struct tg3 *tp = netdev_priv(dev);
3719
3720         prefetch(tp->hw_status);
3721         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3722         /*
3723          * Writing any value to intr-mbox-0 clears PCI INTA# and
3724          * chip-internal interrupt pending events.
3725          * Writing non-zero to intr-mbox-0 additional tells the
3726          * NIC to stop sending us irqs, engaging "in-intr-handler"
3727          * event coalescing.
3728          */
3729         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3730         if (likely(!tg3_irq_sync(tp)))
3731                 netif_rx_schedule(dev, &tp->napi);
3732
3733         return IRQ_RETVAL(1);
3734 }
3735
3736 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3737 {
3738         struct net_device *dev = dev_id;
3739         struct tg3 *tp = netdev_priv(dev);
3740         struct tg3_hw_status *sblk = tp->hw_status;
3741         unsigned int handled = 1;
3742
3743         /* In INTx mode, it is possible for the interrupt to arrive at
3744          * the CPU before the status block posted prior to the interrupt.
3745          * Reading the PCI State register will confirm whether the
3746          * interrupt is ours and will flush the status block.
3747          */
3748         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3749                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3750                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3751                         handled = 0;
3752                         goto out;
3753                 }
3754         }
3755
3756         /*
3757          * Writing any value to intr-mbox-0 clears PCI INTA# and
3758          * chip-internal interrupt pending events.
3759          * Writing non-zero to intr-mbox-0 additional tells the
3760          * NIC to stop sending us irqs, engaging "in-intr-handler"
3761          * event coalescing.
3762          *
3763          * Flush the mailbox to de-assert the IRQ immediately to prevent
3764          * spurious interrupts.  The flush impacts performance but
3765          * excessive spurious interrupts can be worse in some cases.
3766          */
3767         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3768         if (tg3_irq_sync(tp))
3769                 goto out;
3770         sblk->status &= ~SD_STATUS_UPDATED;
3771         if (likely(tg3_has_work(tp))) {
3772                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3773                 netif_rx_schedule(dev, &tp->napi);
3774         } else {
3775                 /* No work, shared interrupt perhaps?  re-enable
3776                  * interrupts, and flush that PCI write
3777                  */
3778                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3779                                0x00000000);
3780         }
3781 out:
3782         return IRQ_RETVAL(handled);
3783 }
3784
3785 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3786 {
3787         struct net_device *dev = dev_id;
3788         struct tg3 *tp = netdev_priv(dev);
3789         struct tg3_hw_status *sblk = tp->hw_status;
3790         unsigned int handled = 1;
3791
3792         /* In INTx mode, it is possible for the interrupt to arrive at
3793          * the CPU before the status block posted prior to the interrupt.
3794          * Reading the PCI State register will confirm whether the
3795          * interrupt is ours and will flush the status block.
3796          */
3797         if (unlikely(sblk->status_tag == tp->last_tag)) {
3798                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3799                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3800                         handled = 0;
3801                         goto out;
3802                 }
3803         }
3804
3805         /*
3806          * writing any value to intr-mbox-0 clears PCI INTA# and
3807          * chip-internal interrupt pending events.
3808          * writing non-zero to intr-mbox-0 additional tells the
3809          * NIC to stop sending us irqs, engaging "in-intr-handler"
3810          * event coalescing.
3811          *
3812          * Flush the mailbox to de-assert the IRQ immediately to prevent
3813          * spurious interrupts.  The flush impacts performance but
3814          * excessive spurious interrupts can be worse in some cases.
3815          */
3816         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3817         if (tg3_irq_sync(tp))
3818                 goto out;
3819         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3820                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3821                 /* Update last_tag to mark that this status has been
3822                  * seen. Because interrupt may be shared, we may be
3823                  * racing with tg3_poll(), so only update last_tag
3824                  * if tg3_poll() is not scheduled.
3825                  */
3826                 tp->last_tag = sblk->status_tag;
3827                 __netif_rx_schedule(dev, &tp->napi);
3828         }
3829 out:
3830         return IRQ_RETVAL(handled);
3831 }
3832
3833 /* ISR for interrupt test */
3834 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3835 {
3836         struct net_device *dev = dev_id;
3837         struct tg3 *tp = netdev_priv(dev);
3838         struct tg3_hw_status *sblk = tp->hw_status;
3839
3840         if ((sblk->status & SD_STATUS_UPDATED) ||
3841             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3842                 tg3_disable_ints(tp);
3843                 return IRQ_RETVAL(1);
3844         }
3845         return IRQ_RETVAL(0);
3846 }
3847
3848 static int tg3_init_hw(struct tg3 *, int);
3849 static int tg3_halt(struct tg3 *, int, int);
3850
3851 /* Restart hardware after configuration changes, self-test, etc.
3852  * Invoked with tp->lock held.
3853  */
3854 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3855 {
3856         int err;
3857
3858         err = tg3_init_hw(tp, reset_phy);
3859         if (err) {
3860                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3861                        "aborting.\n", tp->dev->name);
3862                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3863                 tg3_full_unlock(tp);
3864                 del_timer_sync(&tp->timer);
3865                 tp->irq_sync = 0;
3866                 napi_enable(&tp->napi);
3867                 dev_close(tp->dev);
3868                 tg3_full_lock(tp, 0);
3869         }
3870         return err;
3871 }
3872
3873 #ifdef CONFIG_NET_POLL_CONTROLLER
3874 static void tg3_poll_controller(struct net_device *dev)
3875 {
3876         struct tg3 *tp = netdev_priv(dev);
3877
3878         tg3_interrupt(tp->pdev->irq, dev);
3879 }
3880 #endif
3881
3882 static void tg3_reset_task(struct work_struct *work)
3883 {
3884         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3885         unsigned int restart_timer;
3886
3887         tg3_full_lock(tp, 0);
3888
3889         if (!netif_running(tp->dev)) {
3890                 tg3_full_unlock(tp);
3891                 return;
3892         }
3893
3894         tg3_full_unlock(tp);
3895
3896         tg3_netif_stop(tp);
3897
3898         tg3_full_lock(tp, 1);
3899
3900         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3901         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3902
3903         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3904                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3905                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3906                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3907                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3908         }
3909
3910         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3911         if (tg3_init_hw(tp, 1))
3912                 goto out;
3913
3914         tg3_netif_start(tp);
3915
3916         if (restart_timer)
3917                 mod_timer(&tp->timer, jiffies + 1);
3918
3919 out:
3920         tg3_full_unlock(tp);
3921 }
3922
3923 static void tg3_dump_short_state(struct tg3 *tp)
3924 {
3925         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3926                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3927         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3928                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3929 }
3930
3931 static void tg3_tx_timeout(struct net_device *dev)
3932 {
3933         struct tg3 *tp = netdev_priv(dev);
3934
3935         if (netif_msg_tx_err(tp)) {
3936                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3937                        dev->name);
3938                 tg3_dump_short_state(tp);
3939         }
3940
3941         schedule_work(&tp->reset_task);
3942 }
3943
3944 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3945 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3946 {
3947         u32 base = (u32) mapping & 0xffffffff;
3948
3949         return ((base > 0xffffdcc0) &&
3950                 (base + len + 8 < base));
3951 }
3952
3953 /* Test for DMA addresses > 40-bit */
3954 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3955                                           int len)
3956 {
3957 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3958         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3959                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3960         return 0;
3961 #else
3962         return 0;
3963 #endif
3964 }
3965
3966 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3967
3968 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3969 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3970                                        u32 last_plus_one, u32 *start,
3971                                        u32 base_flags, u32 mss)
3972 {
3973         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3974         dma_addr_t new_addr = 0;
3975         u32 entry = *start;
3976         int i, ret = 0;
3977
3978         if (!new_skb) {
3979                 ret = -1;
3980         } else {
3981                 /* New SKB is guaranteed to be linear. */
3982                 entry = *start;
3983                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3984                                           PCI_DMA_TODEVICE);
3985                 /* Make sure new skb does not cross any 4G boundaries.
3986                  * Drop the packet if it does.
3987                  */
3988                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3989                         ret = -1;
3990                         dev_kfree_skb(new_skb);
3991                         new_skb = NULL;
3992                 } else {
3993                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3994                                     base_flags, 1 | (mss << 1));
3995                         *start = NEXT_TX(entry);
3996                 }
3997         }
3998
3999         /* Now clean up the sw ring entries. */
4000         i = 0;
4001         while (entry != last_plus_one) {
4002                 int len;
4003
4004                 if (i == 0)
4005                         len = skb_headlen(skb);
4006                 else
4007                         len = skb_shinfo(skb)->frags[i-1].size;
4008                 pci_unmap_single(tp->pdev,
4009                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4010                                  len, PCI_DMA_TODEVICE);
4011                 if (i == 0) {
4012                         tp->tx_buffers[entry].skb = new_skb;
4013                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4014                 } else {
4015                         tp->tx_buffers[entry].skb = NULL;
4016                 }
4017                 entry = NEXT_TX(entry);
4018                 i++;
4019         }
4020
4021         dev_kfree_skb(skb);
4022
4023         return ret;
4024 }
4025
4026 static void tg3_set_txd(struct tg3 *tp, int entry,
4027                         dma_addr_t mapping, int len, u32 flags,
4028                         u32 mss_and_is_end)
4029 {
4030         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4031         int is_end = (mss_and_is_end & 0x1);
4032         u32 mss = (mss_and_is_end >> 1);
4033         u32 vlan_tag = 0;
4034
4035         if (is_end)
4036                 flags |= TXD_FLAG_END;
4037         if (flags & TXD_FLAG_VLAN) {
4038                 vlan_tag = flags >> 16;
4039                 flags &= 0xffff;
4040         }
4041         vlan_tag |= (mss << TXD_MSS_SHIFT);
4042
4043         txd->addr_hi = ((u64) mapping >> 32);
4044         txd->addr_lo = ((u64) mapping & 0xffffffff);
4045         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4046         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4047 }
4048
4049 /* hard_start_xmit for devices that don't have any bugs and
4050  * support TG3_FLG2_HW_TSO_2 only.
4051  */
4052 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4053 {
4054         struct tg3 *tp = netdev_priv(dev);
4055         dma_addr_t mapping;
4056         u32 len, entry, base_flags, mss;
4057
4058         len = skb_headlen(skb);
4059
4060         /* We are running in BH disabled context with netif_tx_lock
4061          * and TX reclaim runs via tp->napi.poll inside of a software
4062          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4063          * no IRQ context deadlocks to worry about either.  Rejoice!
4064          */
4065         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4066                 if (!netif_queue_stopped(dev)) {
4067                         netif_stop_queue(dev);
4068
4069                         /* This is a hard error, log it. */
4070                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4071                                "queue awake!\n", dev->name);
4072                 }
4073                 return NETDEV_TX_BUSY;
4074         }
4075
4076         entry = tp->tx_prod;
4077         base_flags = 0;
4078         mss = 0;
4079         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4080                 int tcp_opt_len, ip_tcp_len;
4081
4082                 if (skb_header_cloned(skb) &&
4083                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4084                         dev_kfree_skb(skb);
4085                         goto out_unlock;
4086                 }
4087
4088                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4089                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4090                 else {
4091                         struct iphdr *iph = ip_hdr(skb);
4092
4093                         tcp_opt_len = tcp_optlen(skb);
4094                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4095
4096                         iph->check = 0;
4097                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4098                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4099                 }
4100
4101                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4102                                TXD_FLAG_CPU_POST_DMA);
4103
4104                 tcp_hdr(skb)->check = 0;
4105
4106         }
4107         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4108                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4109 #if TG3_VLAN_TAG_USED
4110         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4111                 base_flags |= (TXD_FLAG_VLAN |
4112                                (vlan_tx_tag_get(skb) << 16));
4113 #endif
4114
4115         /* Queue skb data, a.k.a. the main skb fragment. */
4116         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4117
4118         tp->tx_buffers[entry].skb = skb;
4119         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4120
4121         tg3_set_txd(tp, entry, mapping, len, base_flags,
4122                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4123
4124         entry = NEXT_TX(entry);
4125
4126         /* Now loop through additional data fragments, and queue them. */
4127         if (skb_shinfo(skb)->nr_frags > 0) {
4128                 unsigned int i, last;
4129
4130                 last = skb_shinfo(skb)->nr_frags - 1;
4131                 for (i = 0; i <= last; i++) {
4132                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4133
4134                         len = frag->size;
4135                         mapping = pci_map_page(tp->pdev,
4136                                                frag->page,
4137                                                frag->page_offset,
4138                                                len, PCI_DMA_TODEVICE);
4139
4140                         tp->tx_buffers[entry].skb = NULL;
4141                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4142
4143                         tg3_set_txd(tp, entry, mapping, len,
4144                                     base_flags, (i == last) | (mss << 1));
4145
4146                         entry = NEXT_TX(entry);
4147                 }
4148         }
4149
4150         /* Packets are ready, update Tx producer idx local and on card. */
4151         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4152
4153         tp->tx_prod = entry;
4154         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4155                 netif_stop_queue(dev);
4156                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4157                         netif_wake_queue(tp->dev);
4158         }
4159
4160 out_unlock:
4161         mmiowb();
4162
4163         dev->trans_start = jiffies;
4164
4165         return NETDEV_TX_OK;
4166 }
4167
4168 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4169
4170 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4171  * TSO header is greater than 80 bytes.
4172  */
4173 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4174 {
4175         struct sk_buff *segs, *nskb;
4176
4177         /* Estimate the number of fragments in the worst case */
4178         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4179                 netif_stop_queue(tp->dev);
4180                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4181                         return NETDEV_TX_BUSY;
4182
4183                 netif_wake_queue(tp->dev);
4184         }
4185
4186         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4187         if (unlikely(IS_ERR(segs)))
4188                 goto tg3_tso_bug_end;
4189
4190         do {
4191                 nskb = segs;
4192                 segs = segs->next;
4193                 nskb->next = NULL;
4194                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4195         } while (segs);
4196
4197 tg3_tso_bug_end:
4198         dev_kfree_skb(skb);
4199
4200         return NETDEV_TX_OK;
4201 }
4202
4203 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4204  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4205  */
4206 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4207 {
4208         struct tg3 *tp = netdev_priv(dev);
4209         dma_addr_t mapping;
4210         u32 len, entry, base_flags, mss;
4211         int would_hit_hwbug;
4212
4213         len = skb_headlen(skb);
4214
4215         /* We are running in BH disabled context with netif_tx_lock
4216          * and TX reclaim runs via tp->napi.poll inside of a software
4217          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4218          * no IRQ context deadlocks to worry about either.  Rejoice!
4219          */
4220         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4221                 if (!netif_queue_stopped(dev)) {
4222                         netif_stop_queue(dev);
4223
4224                         /* This is a hard error, log it. */
4225                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4226                                "queue awake!\n", dev->name);
4227                 }
4228                 return NETDEV_TX_BUSY;
4229         }
4230
4231         entry = tp->tx_prod;
4232         base_flags = 0;
4233         if (skb->ip_summed == CHECKSUM_PARTIAL)
4234                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4235         mss = 0;
4236         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4237                 struct iphdr *iph;
4238                 int tcp_opt_len, ip_tcp_len, hdr_len;
4239
4240                 if (skb_header_cloned(skb) &&
4241                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4242                         dev_kfree_skb(skb);
4243                         goto out_unlock;
4244                 }
4245
4246                 tcp_opt_len = tcp_optlen(skb);
4247                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4248
4249                 hdr_len = ip_tcp_len + tcp_opt_len;
4250                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4251                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4252                         return (tg3_tso_bug(tp, skb));
4253
4254                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4255                                TXD_FLAG_CPU_POST_DMA);
4256
4257                 iph = ip_hdr(skb);
4258                 iph->check = 0;
4259                 iph->tot_len = htons(mss + hdr_len);
4260                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4261                         tcp_hdr(skb)->check = 0;
4262                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4263                 } else
4264                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4265                                                                  iph->daddr, 0,
4266                                                                  IPPROTO_TCP,
4267                                                                  0);
4268
4269                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4270                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4271                         if (tcp_opt_len || iph->ihl > 5) {
4272                                 int tsflags;
4273
4274                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4275                                 mss |= (tsflags << 11);
4276                         }
4277                 } else {
4278                         if (tcp_opt_len || iph->ihl > 5) {
4279                                 int tsflags;
4280
4281                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4282                                 base_flags |= tsflags << 12;
4283                         }
4284                 }
4285         }
4286 #if TG3_VLAN_TAG_USED
4287         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4288                 base_flags |= (TXD_FLAG_VLAN |
4289                                (vlan_tx_tag_get(skb) << 16));
4290 #endif
4291
4292         /* Queue skb data, a.k.a. the main skb fragment. */
4293         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4294
4295         tp->tx_buffers[entry].skb = skb;
4296         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4297
4298         would_hit_hwbug = 0;
4299
4300         if (tg3_4g_overflow_test(mapping, len))
4301                 would_hit_hwbug = 1;
4302
4303         tg3_set_txd(tp, entry, mapping, len, base_flags,
4304                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4305
4306         entry = NEXT_TX(entry);
4307
4308         /* Now loop through additional data fragments, and queue them. */
4309         if (skb_shinfo(skb)->nr_frags > 0) {
4310                 unsigned int i, last;
4311
4312                 last = skb_shinfo(skb)->nr_frags - 1;
4313                 for (i = 0; i <= last; i++) {
4314                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4315
4316                         len = frag->size;
4317                         mapping = pci_map_page(tp->pdev,
4318                                                frag->page,
4319                                                frag->page_offset,
4320                                                len, PCI_DMA_TODEVICE);
4321
4322                         tp->tx_buffers[entry].skb = NULL;
4323                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4324
4325                         if (tg3_4g_overflow_test(mapping, len))
4326                                 would_hit_hwbug = 1;
4327
4328                         if (tg3_40bit_overflow_test(tp, mapping, len))
4329                                 would_hit_hwbug = 1;
4330
4331                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4332                                 tg3_set_txd(tp, entry, mapping, len,
4333                                             base_flags, (i == last)|(mss << 1));
4334                         else
4335                                 tg3_set_txd(tp, entry, mapping, len,
4336                                             base_flags, (i == last));
4337
4338                         entry = NEXT_TX(entry);
4339                 }
4340         }
4341
4342         if (would_hit_hwbug) {
4343                 u32 last_plus_one = entry;
4344                 u32 start;
4345
4346                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4347                 start &= (TG3_TX_RING_SIZE - 1);
4348
4349                 /* If the workaround fails due to memory/mapping
4350                  * failure, silently drop this packet.
4351                  */
4352                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4353                                                 &start, base_flags, mss))
4354                         goto out_unlock;
4355
4356                 entry = start;
4357         }
4358
4359         /* Packets are ready, update Tx producer idx local and on card. */
4360         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4361
4362         tp->tx_prod = entry;
4363         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4364                 netif_stop_queue(dev);
4365                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4366                         netif_wake_queue(tp->dev);
4367         }
4368
4369 out_unlock:
4370         mmiowb();
4371
4372         dev->trans_start = jiffies;
4373
4374         return NETDEV_TX_OK;
4375 }
4376
4377 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4378                                int new_mtu)
4379 {
4380         dev->mtu = new_mtu;
4381
4382         if (new_mtu > ETH_DATA_LEN) {
4383                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4384                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4385                         ethtool_op_set_tso(dev, 0);
4386                 }
4387                 else
4388                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4389         } else {
4390                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4391                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4392                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4393         }
4394 }
4395
4396 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4397 {
4398         struct tg3 *tp = netdev_priv(dev);
4399         int err;
4400
4401         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4402                 return -EINVAL;
4403
4404         if (!netif_running(dev)) {
4405                 /* We'll just catch it later when the
4406                  * device is up'd.
4407                  */
4408                 tg3_set_mtu(dev, tp, new_mtu);
4409                 return 0;
4410         }
4411
4412         tg3_netif_stop(tp);
4413
4414         tg3_full_lock(tp, 1);
4415
4416         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4417
4418         tg3_set_mtu(dev, tp, new_mtu);
4419
4420         err = tg3_restart_hw(tp, 0);
4421
4422         if (!err)
4423                 tg3_netif_start(tp);
4424
4425         tg3_full_unlock(tp);
4426
4427         return err;
4428 }
4429
4430 /* Free up pending packets in all rx/tx rings.
4431  *
4432  * The chip has been shut down and the driver detached from
4433  * the networking, so no interrupts or new tx packets will
4434  * end up in the driver.  tp->{tx,}lock is not held and we are not
4435  * in an interrupt context and thus may sleep.
4436  */
4437 static void tg3_free_rings(struct tg3 *tp)
4438 {
4439         struct ring_info *rxp;
4440         int i;
4441
4442         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4443                 rxp = &tp->rx_std_buffers[i];
4444
4445                 if (rxp->skb == NULL)
4446                         continue;
4447                 pci_unmap_single(tp->pdev,
4448                                  pci_unmap_addr(rxp, mapping),
4449                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4450                                  PCI_DMA_FROMDEVICE);
4451                 dev_kfree_skb_any(rxp->skb);
4452                 rxp->skb = NULL;
4453         }
4454
4455         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4456                 rxp = &tp->rx_jumbo_buffers[i];
4457
4458                 if (rxp->skb == NULL)
4459                         continue;
4460                 pci_unmap_single(tp->pdev,
4461                                  pci_unmap_addr(rxp, mapping),
4462                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4463                                  PCI_DMA_FROMDEVICE);
4464                 dev_kfree_skb_any(rxp->skb);
4465                 rxp->skb = NULL;
4466         }
4467
4468         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4469                 struct tx_ring_info *txp;
4470                 struct sk_buff *skb;
4471                 int j;
4472
4473                 txp = &tp->tx_buffers[i];
4474                 skb = txp->skb;
4475
4476                 if (skb == NULL) {
4477                         i++;
4478                         continue;
4479                 }
4480
4481                 pci_unmap_single(tp->pdev,
4482                                  pci_unmap_addr(txp, mapping),
4483                                  skb_headlen(skb),
4484                                  PCI_DMA_TODEVICE);
4485                 txp->skb = NULL;
4486
4487                 i++;
4488
4489                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4490                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4491                         pci_unmap_page(tp->pdev,
4492                                        pci_unmap_addr(txp, mapping),
4493                                        skb_shinfo(skb)->frags[j].size,
4494                                        PCI_DMA_TODEVICE);
4495                         i++;
4496                 }
4497
4498                 dev_kfree_skb_any(skb);
4499         }
4500 }
4501
4502 /* Initialize tx/rx rings for packet processing.
4503  *
4504  * The chip has been shut down and the driver detached from
4505  * the networking, so no interrupts or new tx packets will
4506  * end up in the driver.  tp->{tx,}lock are held and thus
4507  * we may not sleep.
4508  */
4509 static int tg3_init_rings(struct tg3 *tp)
4510 {
4511         u32 i;
4512
4513         /* Free up all the SKBs. */
4514         tg3_free_rings(tp);
4515
4516         /* Zero out all descriptors. */
4517         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4518         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4519         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4520         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4521
4522         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4523         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4524             (tp->dev->mtu > ETH_DATA_LEN))
4525                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4526
4527         /* Initialize invariants of the rings, we only set this
4528          * stuff once.  This works because the card does not
4529          * write into the rx buffer posting rings.
4530          */
4531         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4532                 struct tg3_rx_buffer_desc *rxd;
4533
4534                 rxd = &tp->rx_std[i];
4535                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4536                         << RXD_LEN_SHIFT;
4537                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4538                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4539                                (i << RXD_OPAQUE_INDEX_SHIFT));
4540         }
4541
4542         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4543                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4544                         struct tg3_rx_buffer_desc *rxd;
4545
4546                         rxd = &tp->rx_jumbo[i];
4547                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4548                                 << RXD_LEN_SHIFT;
4549                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4550                                 RXD_FLAG_JUMBO;
4551                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4552                                (i << RXD_OPAQUE_INDEX_SHIFT));
4553                 }
4554         }
4555
4556         /* Now allocate fresh SKBs for each rx ring. */
4557         for (i = 0; i < tp->rx_pending; i++) {
4558                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4559                         printk(KERN_WARNING PFX
4560                                "%s: Using a smaller RX standard ring, "
4561                                "only %d out of %d buffers were allocated "
4562                                "successfully.\n",
4563                                tp->dev->name, i, tp->rx_pending);
4564                         if (i == 0)
4565                                 return -ENOMEM;
4566                         tp->rx_pending = i;
4567                         break;
4568                 }
4569         }
4570
4571         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4572                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4573                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4574                                              -1, i) < 0) {
4575                                 printk(KERN_WARNING PFX
4576                                        "%s: Using a smaller RX jumbo ring, "
4577                                        "only %d out of %d buffers were "
4578                                        "allocated successfully.\n",
4579                                        tp->dev->name, i, tp->rx_jumbo_pending);
4580                                 if (i == 0) {
4581                                         tg3_free_rings(tp);
4582                                         return -ENOMEM;
4583                                 }
4584                                 tp->rx_jumbo_pending = i;
4585                                 break;
4586                         }
4587                 }
4588         }
4589         return 0;
4590 }
4591
4592 /*
4593  * Must not be invoked with interrupt sources disabled and
4594  * the hardware shutdown down.
4595  */
4596 static void tg3_free_consistent(struct tg3 *tp)
4597 {
4598         kfree(tp->rx_std_buffers);
4599         tp->rx_std_buffers = NULL;
4600         if (tp->rx_std) {
4601                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4602                                     tp->rx_std, tp->rx_std_mapping);
4603                 tp->rx_std = NULL;
4604         }
4605         if (tp->rx_jumbo) {
4606                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4607                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4608                 tp->rx_jumbo = NULL;
4609         }
4610         if (tp->rx_rcb) {
4611                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4612                                     tp->rx_rcb, tp->rx_rcb_mapping);
4613                 tp->rx_rcb = NULL;
4614         }
4615         if (tp->tx_ring) {
4616                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4617                         tp->tx_ring, tp->tx_desc_mapping);
4618                 tp->tx_ring = NULL;
4619         }
4620         if (tp->hw_status) {
4621                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4622                                     tp->hw_status, tp->status_mapping);
4623                 tp->hw_status = NULL;
4624         }
4625         if (tp->hw_stats) {
4626                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4627                                     tp->hw_stats, tp->stats_mapping);
4628                 tp->hw_stats = NULL;
4629         }
4630 }
4631
4632 /*
4633  * Must not be invoked with interrupt sources disabled and
4634  * the hardware shutdown down.  Can sleep.
4635  */
4636 static int tg3_alloc_consistent(struct tg3 *tp)
4637 {
4638         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4639                                       (TG3_RX_RING_SIZE +
4640                                        TG3_RX_JUMBO_RING_SIZE)) +
4641                                      (sizeof(struct tx_ring_info) *
4642                                       TG3_TX_RING_SIZE),
4643                                      GFP_KERNEL);
4644         if (!tp->rx_std_buffers)
4645                 return -ENOMEM;
4646
4647         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4648         tp->tx_buffers = (struct tx_ring_info *)
4649                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4650
4651         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4652                                           &tp->rx_std_mapping);
4653         if (!tp->rx_std)
4654                 goto err_out;
4655
4656         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4657                                             &tp->rx_jumbo_mapping);
4658
4659         if (!tp->rx_jumbo)
4660                 goto err_out;
4661
4662         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4663                                           &tp->rx_rcb_mapping);
4664         if (!tp->rx_rcb)
4665                 goto err_out;
4666
4667         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4668                                            &tp->tx_desc_mapping);
4669         if (!tp->tx_ring)
4670                 goto err_out;
4671
4672         tp->hw_status = pci_alloc_consistent(tp->pdev,
4673                                              TG3_HW_STATUS_SIZE,
4674                                              &tp->status_mapping);
4675         if (!tp->hw_status)
4676                 goto err_out;
4677
4678         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4679                                             sizeof(struct tg3_hw_stats),
4680                                             &tp->stats_mapping);
4681         if (!tp->hw_stats)
4682                 goto err_out;
4683
4684         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4685         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4686
4687         return 0;
4688
4689 err_out:
4690         tg3_free_consistent(tp);
4691         return -ENOMEM;
4692 }
4693
4694 #define MAX_WAIT_CNT 1000
4695
4696 /* To stop a block, clear the enable bit and poll till it
4697  * clears.  tp->lock is held.
4698  */
4699 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4700 {
4701         unsigned int i;
4702         u32 val;
4703
4704         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4705                 switch (ofs) {
4706                 case RCVLSC_MODE:
4707                 case DMAC_MODE:
4708                 case MBFREE_MODE:
4709                 case BUFMGR_MODE:
4710                 case MEMARB_MODE:
4711                         /* We can't enable/disable these bits of the
4712                          * 5705/5750, just say success.
4713                          */
4714                         return 0;
4715
4716                 default:
4717                         break;
4718                 };
4719         }
4720
4721         val = tr32(ofs);
4722         val &= ~enable_bit;
4723         tw32_f(ofs, val);
4724
4725         for (i = 0; i < MAX_WAIT_CNT; i++) {
4726                 udelay(100);
4727                 val = tr32(ofs);
4728                 if ((val & enable_bit) == 0)
4729                         break;
4730         }
4731
4732         if (i == MAX_WAIT_CNT && !silent) {
4733                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4734                        "ofs=%lx enable_bit=%x\n",
4735                        ofs, enable_bit);
4736                 return -ENODEV;
4737         }
4738
4739         return 0;
4740 }
4741
4742 /* tp->lock is held. */
4743 static int tg3_abort_hw(struct tg3 *tp, int silent)
4744 {
4745         int i, err;
4746
4747         tg3_disable_ints(tp);
4748
4749         tp->rx_mode &= ~RX_MODE_ENABLE;
4750         tw32_f(MAC_RX_MODE, tp->rx_mode);
4751         udelay(10);
4752
4753         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4754         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4755         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4756         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4757         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4758         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4759
4760         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4761         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4762         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4763         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4764         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4765         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4766         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4767
4768         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4769         tw32_f(MAC_MODE, tp->mac_mode);
4770         udelay(40);
4771
4772         tp->tx_mode &= ~TX_MODE_ENABLE;
4773         tw32_f(MAC_TX_MODE, tp->tx_mode);
4774
4775         for (i = 0; i < MAX_WAIT_CNT; i++) {
4776                 udelay(100);
4777                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4778                         break;
4779         }
4780         if (i >= MAX_WAIT_CNT) {
4781                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4782                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4783                        tp->dev->name, tr32(MAC_TX_MODE));
4784                 err |= -ENODEV;
4785         }
4786
4787         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4788         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4789         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4790
4791         tw32(FTQ_RESET, 0xffffffff);
4792         tw32(FTQ_RESET, 0x00000000);
4793
4794         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4795         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4796
4797         if (tp->hw_status)
4798                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4799         if (tp->hw_stats)
4800                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4801
4802         return err;
4803 }
4804
4805 /* tp->lock is held. */
4806 static int tg3_nvram_lock(struct tg3 *tp)
4807 {
4808         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4809                 int i;
4810
4811                 if (tp->nvram_lock_cnt == 0) {
4812                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4813                         for (i = 0; i < 8000; i++) {
4814                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4815                                         break;
4816                                 udelay(20);
4817                         }
4818                         if (i == 8000) {
4819                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4820                                 return -ENODEV;
4821                         }
4822                 }
4823                 tp->nvram_lock_cnt++;
4824         }
4825         return 0;
4826 }
4827
4828 /* tp->lock is held. */
4829 static void tg3_nvram_unlock(struct tg3 *tp)
4830 {
4831         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4832                 if (tp->nvram_lock_cnt > 0)
4833                         tp->nvram_lock_cnt--;
4834                 if (tp->nvram_lock_cnt == 0)
4835                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4836         }
4837 }
4838
4839 /* tp->lock is held. */
4840 static void tg3_enable_nvram_access(struct tg3 *tp)
4841 {
4842         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4843             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4844                 u32 nvaccess = tr32(NVRAM_ACCESS);
4845
4846                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4847         }
4848 }
4849
4850 /* tp->lock is held. */
4851 static void tg3_disable_nvram_access(struct tg3 *tp)
4852 {
4853         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4854             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4855                 u32 nvaccess = tr32(NVRAM_ACCESS);
4856
4857                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4858         }
4859 }
4860
4861 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4862 {
4863         int i;
4864         u32 apedata;
4865
4866         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4867         if (apedata != APE_SEG_SIG_MAGIC)
4868                 return;
4869
4870         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4871         if (apedata != APE_FW_STATUS_READY)
4872                 return;
4873
4874         /* Wait for up to 1 millisecond for APE to service previous event. */
4875         for (i = 0; i < 10; i++) {
4876                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4877                         return;
4878
4879                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4880
4881                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4882                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4883                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4884
4885                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4886
4887                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4888                         break;
4889
4890                 udelay(100);
4891         }
4892
4893         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4894                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4895 }
4896
4897 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4898 {
4899         u32 event;
4900         u32 apedata;
4901
4902         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4903                 return;
4904
4905         switch (kind) {
4906                 case RESET_KIND_INIT:
4907                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4908                                         APE_HOST_SEG_SIG_MAGIC);
4909                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4910                                         APE_HOST_SEG_LEN_MAGIC);
4911                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4912                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4913                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4914                                         APE_HOST_DRIVER_ID_MAGIC);
4915                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4916                                         APE_HOST_BEHAV_NO_PHYLOCK);
4917
4918                         event = APE_EVENT_STATUS_STATE_START;
4919                         break;
4920                 case RESET_KIND_SHUTDOWN:
4921                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4922                         break;
4923                 case RESET_KIND_SUSPEND:
4924                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4925                         break;
4926                 default:
4927                         return;
4928         }
4929
4930         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4931
4932         tg3_ape_send_event(tp, event);
4933 }
4934
4935 /* tp->lock is held. */
4936 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4937 {
4938         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4939                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4940
4941         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4942                 switch (kind) {
4943                 case RESET_KIND_INIT:
4944                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4945                                       DRV_STATE_START);
4946                         break;
4947
4948                 case RESET_KIND_SHUTDOWN:
4949                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4950                                       DRV_STATE_UNLOAD);
4951                         break;
4952
4953                 case RESET_KIND_SUSPEND:
4954                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4955                                       DRV_STATE_SUSPEND);
4956                         break;
4957
4958                 default:
4959                         break;
4960                 };
4961         }
4962
4963         if (kind == RESET_KIND_INIT ||
4964             kind == RESET_KIND_SUSPEND)
4965                 tg3_ape_driver_state_change(tp, kind);
4966 }
4967
4968 /* tp->lock is held. */
4969 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4970 {
4971         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4972                 switch (kind) {
4973                 case RESET_KIND_INIT:
4974                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4975                                       DRV_STATE_START_DONE);
4976                         break;
4977
4978                 case RESET_KIND_SHUTDOWN:
4979                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4980                                       DRV_STATE_UNLOAD_DONE);
4981                         break;
4982
4983                 default:
4984                         break;
4985                 };
4986         }
4987
4988         if (kind == RESET_KIND_SHUTDOWN)
4989                 tg3_ape_driver_state_change(tp, kind);
4990 }
4991
4992 /* tp->lock is held. */
4993 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4994 {
4995         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4996                 switch (kind) {
4997                 case RESET_KIND_INIT:
4998                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4999                                       DRV_STATE_START);
5000                         break;
5001
5002                 case RESET_KIND_SHUTDOWN:
5003                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5004                                       DRV_STATE_UNLOAD);
5005                         break;
5006
5007                 case RESET_KIND_SUSPEND:
5008                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5009                                       DRV_STATE_SUSPEND);
5010                         break;
5011
5012                 default:
5013                         break;
5014                 };
5015         }
5016 }
5017
5018 static int tg3_poll_fw(struct tg3 *tp)
5019 {
5020         int i;
5021         u32 val;
5022
5023         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5024                 /* Wait up to 20ms for init done. */
5025                 for (i = 0; i < 200; i++) {
5026                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5027                                 return 0;
5028                         udelay(100);
5029                 }
5030                 return -ENODEV;
5031         }
5032
5033         /* Wait for firmware initialization to complete. */
5034         for (i = 0; i < 100000; i++) {
5035                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5036                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5037                         break;
5038                 udelay(10);
5039         }
5040
5041         /* Chip might not be fitted with firmware.  Some Sun onboard
5042          * parts are configured like that.  So don't signal the timeout
5043          * of the above loop as an error, but do report the lack of
5044          * running firmware once.
5045          */
5046         if (i >= 100000 &&
5047             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5048                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5049
5050                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5051                        tp->dev->name);
5052         }
5053
5054         return 0;
5055 }
5056
5057 /* Save PCI command register before chip reset */
5058 static void tg3_save_pci_state(struct tg3 *tp)
5059 {
5060         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5061 }
5062
5063 /* Restore PCI state after chip reset */
5064 static void tg3_restore_pci_state(struct tg3 *tp)
5065 {
5066         u32 val;
5067
5068         /* Re-enable indirect register accesses. */
5069         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5070                                tp->misc_host_ctrl);
5071
5072         /* Set MAX PCI retry to zero. */
5073         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5074         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5075             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5076                 val |= PCISTATE_RETRY_SAME_DMA;
5077         /* Allow reads and writes to the APE register and memory space. */
5078         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5079                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5080                        PCISTATE_ALLOW_APE_SHMEM_WR;
5081         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5082
5083         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5084
5085         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5086                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5087                                       tp->pci_cacheline_sz);
5088                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5089                                       tp->pci_lat_timer);
5090         }
5091         /* Make sure PCI-X relaxed ordering bit is clear. */
5092         if (tp->pcix_cap) {
5093                 u16 pcix_cmd;
5094
5095                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5096                                      &pcix_cmd);
5097                 pcix_cmd &= ~PCI_X_CMD_ERO;
5098                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5099                                       pcix_cmd);
5100         }
5101
5102         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5103
5104                 /* Chip reset on 5780 will reset MSI enable bit,
5105                  * so need to restore it.
5106                  */
5107                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5108                         u16 ctrl;
5109
5110                         pci_read_config_word(tp->pdev,
5111                                              tp->msi_cap + PCI_MSI_FLAGS,
5112                                              &ctrl);
5113                         pci_write_config_word(tp->pdev,
5114                                               tp->msi_cap + PCI_MSI_FLAGS,
5115                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5116                         val = tr32(MSGINT_MODE);
5117                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5118                 }
5119         }
5120 }
5121
5122 static void tg3_stop_fw(struct tg3 *);
5123
5124 /* tp->lock is held. */
5125 static int tg3_chip_reset(struct tg3 *tp)
5126 {
5127         u32 val;
5128         void (*write_op)(struct tg3 *, u32, u32);
5129         int err;
5130
5131         tg3_nvram_lock(tp);
5132
5133         /* No matching tg3_nvram_unlock() after this because
5134          * chip reset below will undo the nvram lock.
5135          */
5136         tp->nvram_lock_cnt = 0;
5137
5138         /* GRC_MISC_CFG core clock reset will clear the memory
5139          * enable bit in PCI register 4 and the MSI enable bit
5140          * on some chips, so we save relevant registers here.
5141          */
5142         tg3_save_pci_state(tp);
5143
5144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5149                 tw32(GRC_FASTBOOT_PC, 0);
5150
5151         /*
5152          * We must avoid the readl() that normally takes place.
5153          * It locks machines, causes machine checks, and other
5154          * fun things.  So, temporarily disable the 5701
5155          * hardware workaround, while we do the reset.
5156          */
5157         write_op = tp->write32;
5158         if (write_op == tg3_write_flush_reg32)
5159                 tp->write32 = tg3_write32;
5160
5161         /* Prevent the irq handler from reading or writing PCI registers
5162          * during chip reset when the memory enable bit in the PCI command
5163          * register may be cleared.  The chip does not generate interrupt
5164          * at this time, but the irq handler may still be called due to irq
5165          * sharing or irqpoll.
5166          */
5167         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5168         if (tp->hw_status) {
5169                 tp->hw_status->status = 0;
5170                 tp->hw_status->status_tag = 0;
5171         }
5172         tp->last_tag = 0;
5173         smp_mb();
5174         synchronize_irq(tp->pdev->irq);
5175
5176         /* do the reset */
5177         val = GRC_MISC_CFG_CORECLK_RESET;
5178
5179         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5180                 if (tr32(0x7e2c) == 0x60) {
5181                         tw32(0x7e2c, 0x20);
5182                 }
5183                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5184                         tw32(GRC_MISC_CFG, (1 << 29));
5185                         val |= (1 << 29);
5186                 }
5187         }
5188
5189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5190                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5191                 tw32(GRC_VCPU_EXT_CTRL,
5192                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5193         }
5194
5195         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5196                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5197         tw32(GRC_MISC_CFG, val);
5198
5199         /* restore 5701 hardware bug workaround write method */
5200         tp->write32 = write_op;
5201
5202         /* Unfortunately, we have to delay before the PCI read back.
5203          * Some 575X chips even will not respond to a PCI cfg access
5204          * when the reset command is given to the chip.
5205          *
5206          * How do these hardware designers expect things to work
5207          * properly if the PCI write is posted for a long period
5208          * of time?  It is always necessary to have some method by
5209          * which a register read back can occur to push the write
5210          * out which does the reset.
5211          *
5212          * For most tg3 variants the trick below was working.
5213          * Ho hum...
5214          */
5215         udelay(120);
5216
5217         /* Flush PCI posted writes.  The normal MMIO registers
5218          * are inaccessible at this time so this is the only
5219          * way to make this reliably (actually, this is no longer
5220          * the case, see above).  I tried to use indirect
5221          * register read/write but this upset some 5701 variants.
5222          */
5223         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5224
5225         udelay(120);
5226
5227         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5228                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5229                         int i;
5230                         u32 cfg_val;
5231
5232                         /* Wait for link training to complete.  */
5233                         for (i = 0; i < 5000; i++)
5234                                 udelay(100);
5235
5236                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5237                         pci_write_config_dword(tp->pdev, 0xc4,
5238                                                cfg_val | (1 << 15));
5239                 }
5240                 /* Set PCIE max payload size and clear error status.  */
5241                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5242         }
5243
5244         tg3_restore_pci_state(tp);
5245
5246         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5247
5248         val = 0;
5249         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5250                 val = tr32(MEMARB_MODE);
5251         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5252
5253         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5254                 tg3_stop_fw(tp);
5255                 tw32(0x5000, 0x400);
5256         }
5257
5258         tw32(GRC_MODE, tp->grc_mode);
5259
5260         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5261                 val = tr32(0xc4);
5262
5263                 tw32(0xc4, val | (1 << 15));
5264         }
5265
5266         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5268                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5269                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5270                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5271                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5272         }
5273
5274         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5275                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5276                 tw32_f(MAC_MODE, tp->mac_mode);
5277         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5278                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5279                 tw32_f(MAC_MODE, tp->mac_mode);
5280         } else
5281                 tw32_f(MAC_MODE, 0);
5282         udelay(40);
5283
5284         err = tg3_poll_fw(tp);
5285         if (err)
5286                 return err;
5287
5288         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5289             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5290                 val = tr32(0x7c00);
5291
5292                 tw32(0x7c00, val | (1 << 25));
5293         }
5294
5295         /* Reprobe ASF enable state.  */
5296         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5297         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5298         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5299         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5300                 u32 nic_cfg;
5301
5302                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5303                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5304                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5305                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5306                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5307                 }
5308         }
5309
5310         return 0;
5311 }
5312
5313 /* tp->lock is held. */
5314 static void tg3_stop_fw(struct tg3 *tp)
5315 {
5316         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5317            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5318                 u32 val;
5319                 int i;
5320
5321                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5322                 val = tr32(GRC_RX_CPU_EVENT);
5323                 val |= (1 << 14);
5324                 tw32(GRC_RX_CPU_EVENT, val);
5325
5326                 /* Wait for RX cpu to ACK the event.  */
5327                 for (i = 0; i < 100; i++) {
5328                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5329                                 break;
5330                         udelay(1);
5331                 }
5332         }
5333 }
5334
5335 /* tp->lock is held. */
5336 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5337 {
5338         int err;
5339
5340         tg3_stop_fw(tp);
5341
5342         tg3_write_sig_pre_reset(tp, kind);
5343
5344         tg3_abort_hw(tp, silent);
5345         err = tg3_chip_reset(tp);
5346
5347         tg3_write_sig_legacy(tp, kind);
5348         tg3_write_sig_post_reset(tp, kind);
5349
5350         if (err)
5351                 return err;
5352
5353         return 0;
5354 }
5355
5356 #define TG3_FW_RELEASE_MAJOR    0x0
5357 #define TG3_FW_RELASE_MINOR     0x0
5358 #define TG3_FW_RELEASE_FIX      0x0
5359 #define TG3_FW_START_ADDR       0x08000000
5360 #define TG3_FW_TEXT_ADDR        0x08000000
5361 #define TG3_FW_TEXT_LEN         0x9c0
5362 #define TG3_FW_RODATA_ADDR      0x080009c0
5363 #define TG3_FW_RODATA_LEN       0x60
5364 #define TG3_FW_DATA_ADDR        0x08000a40
5365 #define TG3_FW_DATA_LEN         0x20
5366 #define TG3_FW_SBSS_ADDR        0x08000a60
5367 #define TG3_FW_SBSS_LEN         0xc
5368 #define TG3_FW_BSS_ADDR         0x08000a70
5369 #define TG3_FW_BSS_LEN          0x10
5370
5371 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5372         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5373         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5374         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5375         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5376         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5377         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5378         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5379         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5380         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5381         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5382         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5383         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5384         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5385         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5386         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5387         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5388         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5389         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5390         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5391         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5392         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5393         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5394         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5395         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5396         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5397         0, 0, 0, 0, 0, 0,
5398         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5399         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5400         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5401         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5402         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5403         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5404         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5405         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5406         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5407         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5408         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5409         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5410         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5411         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5412         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5413         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5414         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5415         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5416         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5417         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5418         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5419         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5420         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5421         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5422         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5423         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5424         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5425         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5426         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5427         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5428         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5429         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5430         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5431         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5432         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5433         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5434         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5435         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5436         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5437         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5438         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5439         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5440         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5441         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5442         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5443         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5444         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5445         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5446         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5447         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5448         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5449         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5450         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5451         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5452         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5453         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5454         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5455         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5456         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5457         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5458         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5459         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5460         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5461         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5462         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5463 };
5464
5465 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5466         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5467         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5468         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5469         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5470         0x00000000
5471 };
5472
5473 #if 0 /* All zeros, don't eat up space with it. */
5474 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5475         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5476         0x00000000, 0x00000000, 0x00000000, 0x00000000
5477 };
5478 #endif
5479
5480 #define RX_CPU_SCRATCH_BASE     0x30000
5481 #define RX_CPU_SCRATCH_SIZE     0x04000
5482 #define TX_CPU_SCRATCH_BASE     0x34000
5483 #define TX_CPU_SCRATCH_SIZE     0x04000
5484
5485 /* tp->lock is held. */
5486 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5487 {
5488         int i;
5489
5490         BUG_ON(offset == TX_CPU_BASE &&
5491             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5492
5493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5494                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5495
5496                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5497                 return 0;
5498         }
5499         if (offset == RX_CPU_BASE) {
5500                 for (i = 0; i < 10000; i++) {
5501                         tw32(offset + CPU_STATE, 0xffffffff);
5502                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5503                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5504                                 break;
5505                 }
5506
5507                 tw32(offset + CPU_STATE, 0xffffffff);
5508                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5509                 udelay(10);
5510         } else {
5511                 for (i = 0; i < 10000; i++) {
5512                         tw32(offset + CPU_STATE, 0xffffffff);
5513                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5514                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5515                                 break;
5516                 }
5517         }
5518
5519         if (i >= 10000) {
5520                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5521                        "and %s CPU\n",
5522                        tp->dev->name,
5523                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5524                 return -ENODEV;
5525         }
5526
5527         /* Clear firmware's nvram arbitration. */
5528         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5529                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5530         return 0;
5531 }
5532
5533 struct fw_info {
5534         unsigned int text_base;
5535         unsigned int text_len;
5536         const u32 *text_data;
5537         unsigned int rodata_base;
5538         unsigned int rodata_len;
5539         const u32 *rodata_data;
5540         unsigned int data_base;
5541         unsigned int data_len;
5542         const u32 *data_data;
5543 };
5544
5545 /* tp->lock is held. */
5546 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5547                                  int cpu_scratch_size, struct fw_info *info)
5548 {
5549         int err, lock_err, i;
5550         void (*write_op)(struct tg3 *, u32, u32);
5551
5552         if (cpu_base == TX_CPU_BASE &&
5553             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5554                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5555                        "TX cpu firmware on %s which is 5705.\n",
5556                        tp->dev->name);
5557                 return -EINVAL;
5558         }
5559
5560         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5561                 write_op = tg3_write_mem;
5562         else
5563                 write_op = tg3_write_indirect_reg32;
5564
5565         /* It is possible that bootcode is still loading at this point.
5566          * Get the nvram lock first before halting the cpu.
5567          */
5568         lock_err = tg3_nvram_lock(tp);
5569         err = tg3_halt_cpu(tp, cpu_base);
5570         if (!lock_err)
5571                 tg3_nvram_unlock(tp);
5572         if (err)
5573                 goto out;
5574
5575         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5576                 write_op(tp, cpu_scratch_base + i, 0);
5577         tw32(cpu_base + CPU_STATE, 0xffffffff);
5578         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5579         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5580                 write_op(tp, (cpu_scratch_base +
5581                               (info->text_base & 0xffff) +
5582                               (i * sizeof(u32))),
5583                          (info->text_data ?
5584                           info->text_data[i] : 0));
5585         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5586                 write_op(tp, (cpu_scratch_base +
5587                               (info->rodata_base & 0xffff) +
5588                               (i * sizeof(u32))),
5589                          (info->rodata_data ?
5590                           info->rodata_data[i] : 0));
5591         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5592                 write_op(tp, (cpu_scratch_base +
5593                               (info->data_base & 0xffff) +
5594                               (i * sizeof(u32))),
5595                          (info->data_data ?
5596                           info->data_data[i] : 0));
5597
5598         err = 0;
5599
5600 out:
5601         return err;
5602 }
5603
5604 /* tp->lock is held. */
5605 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5606 {
5607         struct fw_info info;
5608         int err, i;
5609
5610         info.text_base = TG3_FW_TEXT_ADDR;
5611         info.text_len = TG3_FW_TEXT_LEN;
5612         info.text_data = &tg3FwText[0];
5613         info.rodata_base = TG3_FW_RODATA_ADDR;
5614         info.rodata_len = TG3_FW_RODATA_LEN;
5615         info.rodata_data = &tg3FwRodata[0];
5616         info.data_base = TG3_FW_DATA_ADDR;
5617         info.data_len = TG3_FW_DATA_LEN;
5618         info.data_data = NULL;
5619
5620         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5621                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5622                                     &info);
5623         if (err)
5624                 return err;
5625
5626         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5627                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5628                                     &info);
5629         if (err)
5630                 return err;
5631
5632         /* Now startup only the RX cpu. */
5633         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5634         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5635
5636         for (i = 0; i < 5; i++) {
5637                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5638                         break;
5639                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5640                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5641                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5642                 udelay(1000);
5643         }
5644         if (i >= 5) {
5645                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5646                        "to set RX CPU PC, is %08x should be %08x\n",
5647                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5648                        TG3_FW_TEXT_ADDR);
5649                 return -ENODEV;
5650         }
5651         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5652         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5653
5654         return 0;
5655 }
5656
5657
5658 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5659 #define TG3_TSO_FW_RELASE_MINOR         0x6
5660 #define TG3_TSO_FW_RELEASE_FIX          0x0
5661 #define TG3_TSO_FW_START_ADDR           0x08000000
5662 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5663 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5664 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5665 #define TG3_TSO_FW_RODATA_LEN           0x60
5666 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5667 #define TG3_TSO_FW_DATA_LEN             0x30
5668 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5669 #define TG3_TSO_FW_SBSS_LEN             0x2c
5670 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5671 #define TG3_TSO_FW_BSS_LEN              0x894
5672
5673 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5674         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5675         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5676         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5677         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5678         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5679         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5680         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5681         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5682         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5683         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5684         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5685         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5686         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5687         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5688         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5689         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5690         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5691         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5692         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5693         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5694         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5695         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5696         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5697         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5698         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5699         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5700         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5701         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5702         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5703         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5704         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5705         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5706         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5707         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5708         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5709         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5710         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5711         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5712         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5713         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5714         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5715         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5716         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5717         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5718         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5719         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5720         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5721         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5722         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5723         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5724         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5725         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5726         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5727         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5728         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5729         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5730         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5731         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5732         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5733         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5734         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5735         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5736         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5737         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5738         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5739         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5740         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5741         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5742         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5743         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5744         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5745         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5746         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5747         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5748         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5749         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5750         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5751         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5752         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5753         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5754         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5755         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5756         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5757         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5758         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5759         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5760         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5761         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5762         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5763         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5764         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5765         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5766         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5767         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5768         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5769         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5770         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5771         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5772         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5773         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5774         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5775         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5776         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5777         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5778         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5779         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5780         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5781         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5782         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5783         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5784         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5785         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5786         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5787         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5788         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5789         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5790         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5791         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5792         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5793         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5794         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5795         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5796         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5797         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5798         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5799         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5800         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5801         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5802         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5803         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5804         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5805         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5806         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5807         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5808         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5809         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5810         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5811         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5812         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5813         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5814         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5815         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5816         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5817         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5818         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5819         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5820         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5821         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5822         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5823         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5824         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5825         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5826         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5827         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5828         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5829         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5830         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5831         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5832         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5833         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5834         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5835         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5836         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5837         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5838         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5839         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5840         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5841         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5842         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5843         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5844         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5845         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5846         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5847         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5848         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5849         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5850         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5851         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5852         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5853         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5854         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5855         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5856         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5857         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5858         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5859         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5860         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5861         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5862         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5863         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5864         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5865         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5866         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5867         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5868         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5869         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5870         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5871         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5872         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5873         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5874         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5875         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5876         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5877         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5878         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5879         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5880         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5881         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5882         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5883         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5884         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5885         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5886         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5887         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5888         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5889         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5890         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5891         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5892         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5893         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5894         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5895         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5896         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5897         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5898         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5899         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5900         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5901         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5902         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5903         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5904         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5905         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5906         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5907         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5908         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5909         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5910         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5911         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5912         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5913         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5914         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5915         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5916         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5917         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5918         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5919         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5920         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5921         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5922         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5923         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5924         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5925         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5926         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5927         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5928         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5929         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5930         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5931         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5932         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5933         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5934         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5935         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5936         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5937         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5938         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5939         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5940         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5941         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5942         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5943         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5944         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5945         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5946         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5947         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5948         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5949         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5950         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5951         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5952         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5953         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5954         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5955         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5956         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5957         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5958 };
5959
5960 static const u32 tg3TsoFwRodata[] = {
5961         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5962         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5963         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5964         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5965         0x00000000,
5966 };
5967
5968 static const u32 tg3TsoFwData[] = {
5969         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5970         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5971         0x00000000,
5972 };
5973
5974 /* 5705 needs a special version of the TSO firmware.  */
5975 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5976 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5977 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5978 #define TG3_TSO5_FW_START_ADDR          0x00010000
5979 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5980 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5981 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5982 #define TG3_TSO5_FW_RODATA_LEN          0x50
5983 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5984 #define TG3_TSO5_FW_DATA_LEN            0x20
5985 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5986 #define TG3_TSO5_FW_SBSS_LEN            0x28
5987 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5988 #define TG3_TSO5_FW_BSS_LEN             0x88
5989
5990 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5991         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5992         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5993         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5994         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5995         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5996         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5997         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5998         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5999         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6000         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6001         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6002         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6003         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6004         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6005         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6006         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6007         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6008         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6009         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6010         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6011         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6012         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6013         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6014         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6015         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6016         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6017         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6018         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6019         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6020         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6021         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6022         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6023         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6024         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6025         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6026         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6027         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6028         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6029         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6030         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6031         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6032         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6033         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6034         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6035         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6036         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6037         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6038         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6039         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6040         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6041         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6042         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6043         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6044         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6045         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6046         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6047         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6048         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6049         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6050         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6051         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6052         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6053         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6054         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6055         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6056         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6057         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6058         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6059         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6060         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6061         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6062         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6063         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6064         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6065         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6066         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6067         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6068         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6069         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6070         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6071         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6072         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6073         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6074         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6075         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6076         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6077         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6078         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6079         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6080         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6081         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6082         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6083         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6084         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6085         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6086         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6087         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6088         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6089         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6090         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6091         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6092         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6093         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6094         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6095         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6096         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6097         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6098         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6099         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6100         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6101         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6102         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6103         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6104         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6105         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6106         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6107         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6108         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6109         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6110         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6111         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6112         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6113         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6114         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6115         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6116         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6117         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6118         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6119         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6120         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6121         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6122         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6123         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6124         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6125         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6126         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6127         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6128         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6129         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6130         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6131         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6132         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6133         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6134         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6135         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6136         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6137         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6138         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6139         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6140         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6141         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6142         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6143         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6144         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6145         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6146         0x00000000, 0x00000000, 0x00000000,
6147 };
6148
6149 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6150         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6151         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6152         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6153         0x00000000, 0x00000000, 0x00000000,
6154 };
6155
6156 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6157         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6158         0x00000000, 0x00000000, 0x00000000,
6159 };
6160
6161 /* tp->lock is held. */
6162 static int tg3_load_tso_firmware(struct tg3 *tp)
6163 {
6164         struct fw_info info;
6165         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6166         int err, i;
6167
6168         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6169                 return 0;
6170
6171         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6172                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6173                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6174                 info.text_data = &tg3Tso5FwText[0];
6175                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6176                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6177                 info.rodata_data = &tg3Tso5FwRodata[0];
6178                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6179                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6180                 info.data_data = &tg3Tso5FwData[0];
6181                 cpu_base = RX_CPU_BASE;
6182                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6183                 cpu_scratch_size = (info.text_len +
6184                                     info.rodata_len +
6185                                     info.data_len +
6186                                     TG3_TSO5_FW_SBSS_LEN +
6187                                     TG3_TSO5_FW_BSS_LEN);
6188         } else {
6189                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6190                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6191                 info.text_data = &tg3TsoFwText[0];
6192                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6193                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6194                 info.rodata_data = &tg3TsoFwRodata[0];
6195                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6196                 info.data_len = TG3_TSO_FW_DATA_LEN;
6197                 info.data_data = &tg3TsoFwData[0];
6198                 cpu_base = TX_CPU_BASE;
6199                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6200                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6201         }
6202
6203         err = tg3_load_firmware_cpu(tp, cpu_base,
6204                                     cpu_scratch_base, cpu_scratch_size,
6205                                     &info);
6206         if (err)
6207                 return err;
6208
6209         /* Now startup the cpu. */
6210         tw32(cpu_base + CPU_STATE, 0xffffffff);
6211         tw32_f(cpu_base + CPU_PC,    info.text_base);
6212
6213         for (i = 0; i < 5; i++) {
6214                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6215                         break;
6216                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6217                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6218                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6219                 udelay(1000);
6220         }
6221         if (i >= 5) {
6222                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6223                        "to set CPU PC, is %08x should be %08x\n",
6224                        tp->dev->name, tr32(cpu_base + CPU_PC),
6225                        info.text_base);
6226                 return -ENODEV;
6227         }
6228         tw32(cpu_base + CPU_STATE, 0xffffffff);
6229         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6230         return 0;
6231 }
6232
6233
6234 /* tp->lock is held. */
6235 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6236 {
6237         u32 addr_high, addr_low;
6238         int i;
6239
6240         addr_high = ((tp->dev->dev_addr[0] << 8) |
6241                      tp->dev->dev_addr[1]);
6242         addr_low = ((tp->dev->dev_addr[2] << 24) |
6243                     (tp->dev->dev_addr[3] << 16) |
6244                     (tp->dev->dev_addr[4] <<  8) |
6245                     (tp->dev->dev_addr[5] <<  0));
6246         for (i = 0; i < 4; i++) {
6247                 if (i == 1 && skip_mac_1)
6248                         continue;
6249                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6250                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6251         }
6252
6253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6255                 for (i = 0; i < 12; i++) {
6256                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6257                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6258                 }
6259         }
6260
6261         addr_high = (tp->dev->dev_addr[0] +
6262                      tp->dev->dev_addr[1] +
6263                      tp->dev->dev_addr[2] +
6264                      tp->dev->dev_addr[3] +
6265                      tp->dev->dev_addr[4] +
6266                      tp->dev->dev_addr[5]) &
6267                 TX_BACKOFF_SEED_MASK;
6268         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6269 }
6270
6271 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6272 {
6273         struct tg3 *tp = netdev_priv(dev);
6274         struct sockaddr *addr = p;
6275         int err = 0, skip_mac_1 = 0;
6276
6277         if (!is_valid_ether_addr(addr->sa_data))
6278                 return -EINVAL;
6279
6280         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6281
6282         if (!netif_running(dev))
6283                 return 0;
6284
6285         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6286                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6287
6288                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6289                 addr0_low = tr32(MAC_ADDR_0_LOW);
6290                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6291                 addr1_low = tr32(MAC_ADDR_1_LOW);
6292
6293                 /* Skip MAC addr 1 if ASF is using it. */
6294                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6295                     !(addr1_high == 0 && addr1_low == 0))
6296                         skip_mac_1 = 1;
6297         }
6298         spin_lock_bh(&tp->lock);
6299         __tg3_set_mac_addr(tp, skip_mac_1);
6300         spin_unlock_bh(&tp->lock);
6301
6302         return err;
6303 }
6304
6305 /* tp->lock is held. */
6306 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6307                            dma_addr_t mapping, u32 maxlen_flags,
6308                            u32 nic_addr)
6309 {
6310         tg3_write_mem(tp,
6311                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6312                       ((u64) mapping >> 32));
6313         tg3_write_mem(tp,
6314                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6315                       ((u64) mapping & 0xffffffff));
6316         tg3_write_mem(tp,
6317                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6318                        maxlen_flags);
6319
6320         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6321                 tg3_write_mem(tp,
6322                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6323                               nic_addr);
6324 }
6325
6326 static void __tg3_set_rx_mode(struct net_device *);
6327 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6328 {
6329         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6330         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6331         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6332         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6333         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6334                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6335                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6336         }
6337         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6338         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6339         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6340                 u32 val = ec->stats_block_coalesce_usecs;
6341
6342                 if (!netif_carrier_ok(tp->dev))
6343                         val = 0;
6344
6345                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6346         }
6347 }
6348
6349 /* tp->lock is held. */
6350 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6351 {
6352         u32 val, rdmac_mode;
6353         int i, err, limit;
6354
6355         tg3_disable_ints(tp);
6356
6357         tg3_stop_fw(tp);
6358
6359         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6360
6361         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6362                 tg3_abort_hw(tp, 1);
6363         }
6364
6365         if (reset_phy)
6366                 tg3_phy_reset(tp);
6367
6368         err = tg3_chip_reset(tp);
6369         if (err)
6370                 return err;
6371
6372         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6373
6374         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6375                 val = tr32(TG3_CPMU_CTRL);
6376                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6377                 tw32(TG3_CPMU_CTRL, val);
6378
6379                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6380                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6381                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6382                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6383
6384                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6385                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6386                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6387                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6388
6389                 val = tr32(TG3_CPMU_HST_ACC);
6390                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6391                 val |= CPMU_HST_ACC_MACCLK_6_25;
6392                 tw32(TG3_CPMU_HST_ACC, val);
6393         }
6394
6395         /* This works around an issue with Athlon chipsets on
6396          * B3 tigon3 silicon.  This bit has no effect on any
6397          * other revision.  But do not set this on PCI Express
6398          * chips and don't even touch the clocks if the CPMU is present.
6399          */
6400         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6401                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6402                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6403                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6404         }
6405
6406         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6407             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6408                 val = tr32(TG3PCI_PCISTATE);
6409                 val |= PCISTATE_RETRY_SAME_DMA;
6410                 tw32(TG3PCI_PCISTATE, val);
6411         }
6412
6413         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6414                 /* Allow reads and writes to the
6415                  * APE register and memory space.
6416                  */
6417                 val = tr32(TG3PCI_PCISTATE);
6418                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6419                        PCISTATE_ALLOW_APE_SHMEM_WR;
6420                 tw32(TG3PCI_PCISTATE, val);
6421         }
6422
6423         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6424                 /* Enable some hw fixes.  */
6425                 val = tr32(TG3PCI_MSI_DATA);
6426                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6427                 tw32(TG3PCI_MSI_DATA, val);
6428         }
6429
6430         /* Descriptor ring init may make accesses to the
6431          * NIC SRAM area to setup the TX descriptors, so we
6432          * can only do this after the hardware has been
6433          * successfully reset.
6434          */
6435         err = tg3_init_rings(tp);
6436         if (err)
6437                 return err;
6438
6439         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6440             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6441                 /* This value is determined during the probe time DMA
6442                  * engine test, tg3_test_dma.
6443                  */
6444                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6445         }
6446
6447         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6448                           GRC_MODE_4X_NIC_SEND_RINGS |
6449                           GRC_MODE_NO_TX_PHDR_CSUM |
6450                           GRC_MODE_NO_RX_PHDR_CSUM);
6451         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6452
6453         /* Pseudo-header checksum is done by hardware logic and not
6454          * the offload processers, so make the chip do the pseudo-
6455          * header checksums on receive.  For transmit it is more
6456          * convenient to do the pseudo-header checksum in software
6457          * as Linux does that on transmit for us in all cases.
6458          */
6459         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6460
6461         tw32(GRC_MODE,
6462              tp->grc_mode |
6463              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6464
6465         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6466         val = tr32(GRC_MISC_CFG);
6467         val &= ~0xff;
6468         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6469         tw32(GRC_MISC_CFG, val);
6470
6471         /* Initialize MBUF/DESC pool. */
6472         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6473                 /* Do nothing.  */
6474         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6475                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6476                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6477                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6478                 else
6479                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6480                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6481                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6482         }
6483         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6484                 int fw_len;
6485
6486                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6487                           TG3_TSO5_FW_RODATA_LEN +
6488                           TG3_TSO5_FW_DATA_LEN +
6489                           TG3_TSO5_FW_SBSS_LEN +
6490                           TG3_TSO5_FW_BSS_LEN);
6491                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6492                 tw32(BUFMGR_MB_POOL_ADDR,
6493                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6494                 tw32(BUFMGR_MB_POOL_SIZE,
6495                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6496         }
6497
6498         if (tp->dev->mtu <= ETH_DATA_LEN) {
6499                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6500                      tp->bufmgr_config.mbuf_read_dma_low_water);
6501                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6502                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6503                 tw32(BUFMGR_MB_HIGH_WATER,
6504                      tp->bufmgr_config.mbuf_high_water);
6505         } else {
6506                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6507                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6508                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6509                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6510                 tw32(BUFMGR_MB_HIGH_WATER,
6511                      tp->bufmgr_config.mbuf_high_water_jumbo);
6512         }
6513         tw32(BUFMGR_DMA_LOW_WATER,
6514              tp->bufmgr_config.dma_low_water);
6515         tw32(BUFMGR_DMA_HIGH_WATER,
6516              tp->bufmgr_config.dma_high_water);
6517
6518         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6519         for (i = 0; i < 2000; i++) {
6520                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6521                         break;
6522                 udelay(10);
6523         }
6524         if (i >= 2000) {
6525                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6526                        tp->dev->name);
6527                 return -ENODEV;
6528         }
6529
6530         /* Setup replenish threshold. */
6531         val = tp->rx_pending / 8;
6532         if (val == 0)
6533                 val = 1;
6534         else if (val > tp->rx_std_max_post)
6535                 val = tp->rx_std_max_post;
6536         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6537                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6538                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6539
6540                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6541                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6542         }
6543
6544         tw32(RCVBDI_STD_THRESH, val);
6545
6546         /* Initialize TG3_BDINFO's at:
6547          *  RCVDBDI_STD_BD:     standard eth size rx ring
6548          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6549          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6550          *
6551          * like so:
6552          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6553          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6554          *                              ring attribute flags
6555          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6556          *
6557          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6558          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6559          *
6560          * The size of each ring is fixed in the firmware, but the location is
6561          * configurable.
6562          */
6563         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6564              ((u64) tp->rx_std_mapping >> 32));
6565         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6566              ((u64) tp->rx_std_mapping & 0xffffffff));
6567         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6568              NIC_SRAM_RX_BUFFER_DESC);
6569
6570         /* Don't even try to program the JUMBO/MINI buffer descriptor
6571          * configs on 5705.
6572          */
6573         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6574                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6575                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6576         } else {
6577                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6578                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6579
6580                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6581                      BDINFO_FLAGS_DISABLED);
6582
6583                 /* Setup replenish threshold. */
6584                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6585
6586                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6587                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6588                              ((u64) tp->rx_jumbo_mapping >> 32));
6589                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6590                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6591                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6592                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6593                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6594                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6595                 } else {
6596                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6597                              BDINFO_FLAGS_DISABLED);
6598                 }
6599
6600         }
6601
6602         /* There is only one send ring on 5705/5750, no need to explicitly
6603          * disable the others.
6604          */
6605         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6606                 /* Clear out send RCB ring in SRAM. */
6607                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6608                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6609                                       BDINFO_FLAGS_DISABLED);
6610         }
6611
6612         tp->tx_prod = 0;
6613         tp->tx_cons = 0;
6614         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6615         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6616
6617         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6618                        tp->tx_desc_mapping,
6619                        (TG3_TX_RING_SIZE <<
6620                         BDINFO_FLAGS_MAXLEN_SHIFT),
6621                        NIC_SRAM_TX_BUFFER_DESC);
6622
6623         /* There is only one receive return ring on 5705/5750, no need
6624          * to explicitly disable the others.
6625          */
6626         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6627                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6628                      i += TG3_BDINFO_SIZE) {
6629                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6630                                       BDINFO_FLAGS_DISABLED);
6631                 }
6632         }
6633
6634         tp->rx_rcb_ptr = 0;
6635         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6636
6637         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6638                        tp->rx_rcb_mapping,
6639                        (TG3_RX_RCB_RING_SIZE(tp) <<
6640                         BDINFO_FLAGS_MAXLEN_SHIFT),
6641                        0);
6642
6643         tp->rx_std_ptr = tp->rx_pending;
6644         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6645                      tp->rx_std_ptr);
6646
6647         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6648                                                 tp->rx_jumbo_pending : 0;
6649         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6650                      tp->rx_jumbo_ptr);
6651
6652         /* Initialize MAC address and backoff seed. */
6653         __tg3_set_mac_addr(tp, 0);
6654
6655         /* MTU + ethernet header + FCS + optional VLAN tag */
6656         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6657
6658         /* The slot time is changed by tg3_setup_phy if we
6659          * run at gigabit with half duplex.
6660          */
6661         tw32(MAC_TX_LENGTHS,
6662              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6663              (6 << TX_LENGTHS_IPG_SHIFT) |
6664              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6665
6666         /* Receive rules. */
6667         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6668         tw32(RCVLPC_CONFIG, 0x0181);
6669
6670         /* Calculate RDMAC_MODE setting early, we need it to determine
6671          * the RCVLPC_STATE_ENABLE mask.
6672          */
6673         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6674                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6675                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6676                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6677                       RDMAC_MODE_LNGREAD_ENAB);
6678
6679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6680                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6681                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6682                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6683
6684         /* If statement applies to 5705 and 5750 PCI devices only */
6685         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6686              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6688                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6689                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6690                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6691                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6692                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6693                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6694                 }
6695         }
6696
6697         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6698                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6699
6700         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6701                 rdmac_mode |= (1 << 27);
6702
6703         /* Receive/send statistics. */
6704         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6705                 val = tr32(RCVLPC_STATS_ENABLE);
6706                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6707                 tw32(RCVLPC_STATS_ENABLE, val);
6708         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6709                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6710                 val = tr32(RCVLPC_STATS_ENABLE);
6711                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6712                 tw32(RCVLPC_STATS_ENABLE, val);
6713         } else {
6714                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6715         }
6716         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6717         tw32(SNDDATAI_STATSENAB, 0xffffff);
6718         tw32(SNDDATAI_STATSCTRL,
6719              (SNDDATAI_SCTRL_ENABLE |
6720               SNDDATAI_SCTRL_FASTUPD));
6721
6722         /* Setup host coalescing engine. */
6723         tw32(HOSTCC_MODE, 0);
6724         for (i = 0; i < 2000; i++) {
6725                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6726                         break;
6727                 udelay(10);
6728         }
6729
6730         __tg3_set_coalesce(tp, &tp->coal);
6731
6732         /* set status block DMA address */
6733         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6734              ((u64) tp->status_mapping >> 32));
6735         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6736              ((u64) tp->status_mapping & 0xffffffff));
6737
6738         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6739                 /* Status/statistics block address.  See tg3_timer,
6740                  * the tg3_periodic_fetch_stats call there, and
6741                  * tg3_get_stats to see how this works for 5705/5750 chips.
6742                  */
6743                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6744                      ((u64) tp->stats_mapping >> 32));
6745                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6746                      ((u64) tp->stats_mapping & 0xffffffff));
6747                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6748                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6749         }
6750
6751         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6752
6753         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6754         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6755         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6756                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6757
6758         /* Clear statistics/status block in chip, and status block in ram. */
6759         for (i = NIC_SRAM_STATS_BLK;
6760              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6761              i += sizeof(u32)) {
6762                 tg3_write_mem(tp, i, 0);
6763                 udelay(40);
6764         }
6765         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6766
6767         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6768                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6769                 /* reset to prevent losing 1st rx packet intermittently */
6770                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6771                 udelay(10);
6772         }
6773
6774         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6775                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6776         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6777             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6778             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6779                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6780         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6781         udelay(40);
6782
6783         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6784          * If TG3_FLG2_IS_NIC is zero, we should read the
6785          * register to preserve the GPIO settings for LOMs. The GPIOs,
6786          * whether used as inputs or outputs, are set by boot code after
6787          * reset.
6788          */
6789         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6790                 u32 gpio_mask;
6791
6792                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6793                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6794                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6795
6796                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6797                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6798                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6799
6800                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6801                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6802
6803                 tp->grc_local_ctrl &= ~gpio_mask;
6804                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6805
6806                 /* GPIO1 must be driven high for eeprom write protect */
6807                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6808                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6809                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6810         }
6811         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6812         udelay(100);
6813
6814         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6815         tp->last_tag = 0;
6816
6817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6818                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6819                 udelay(40);
6820         }
6821
6822         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6823                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6824                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6825                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6826                WDMAC_MODE_LNGREAD_ENAB);
6827
6828         /* If statement applies to 5705 and 5750 PCI devices only */
6829         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6830              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6831             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6832                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6833                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6834                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6835                         /* nothing */
6836                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6837                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6838                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6839                         val |= WDMAC_MODE_RX_ACCEL;
6840                 }
6841         }
6842
6843         /* Enable host coalescing bug fix */
6844         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6845             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6846             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6847             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6848                 val |= (1 << 29);
6849
6850         tw32_f(WDMAC_MODE, val);
6851         udelay(40);
6852
6853         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6854                 u16 pcix_cmd;
6855
6856                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6857                                      &pcix_cmd);
6858                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6859                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6860                         pcix_cmd |= PCI_X_CMD_READ_2K;
6861                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6862                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6863                         pcix_cmd |= PCI_X_CMD_READ_2K;
6864                 }
6865                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6866                                       pcix_cmd);
6867         }
6868
6869         tw32_f(RDMAC_MODE, rdmac_mode);
6870         udelay(40);
6871
6872         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6873         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6874                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6875
6876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6877                 tw32(SNDDATAC_MODE,
6878                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6879         else
6880                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6881
6882         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6883         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6884         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6885         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6886         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6887                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6888         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6889         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6890
6891         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6892                 err = tg3_load_5701_a0_firmware_fix(tp);
6893                 if (err)
6894                         return err;
6895         }
6896
6897         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6898                 err = tg3_load_tso_firmware(tp);
6899                 if (err)
6900                         return err;
6901         }
6902
6903         tp->tx_mode = TX_MODE_ENABLE;
6904         tw32_f(MAC_TX_MODE, tp->tx_mode);
6905         udelay(100);
6906
6907         tp->rx_mode = RX_MODE_ENABLE;
6908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6910                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6911
6912         tw32_f(MAC_RX_MODE, tp->rx_mode);
6913         udelay(10);
6914
6915         if (tp->link_config.phy_is_low_power) {
6916                 tp->link_config.phy_is_low_power = 0;
6917                 tp->link_config.speed = tp->link_config.orig_speed;
6918                 tp->link_config.duplex = tp->link_config.orig_duplex;
6919                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6920         }
6921
6922         tp->mi_mode = MAC_MI_MODE_BASE;
6923         tw32_f(MAC_MI_MODE, tp->mi_mode);
6924         udelay(80);
6925
6926         tw32(MAC_LED_CTRL, tp->led_ctrl);
6927
6928         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6929         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6930                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6931                 udelay(10);
6932         }
6933         tw32_f(MAC_RX_MODE, tp->rx_mode);
6934         udelay(10);
6935
6936         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6937                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6938                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6939                         /* Set drive transmission level to 1.2V  */
6940                         /* only if the signal pre-emphasis bit is not set  */
6941                         val = tr32(MAC_SERDES_CFG);
6942                         val &= 0xfffff000;
6943                         val |= 0x880;
6944                         tw32(MAC_SERDES_CFG, val);
6945                 }
6946                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6947                         tw32(MAC_SERDES_CFG, 0x616000);
6948         }
6949
6950         /* Prevent chip from dropping frames when flow control
6951          * is enabled.
6952          */
6953         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6954
6955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6956             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6957                 /* Use hardware link auto-negotiation */
6958                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6959         }
6960
6961         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6962             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6963                 u32 tmp;
6964
6965                 tmp = tr32(SERDES_RX_CTRL);
6966                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6967                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6968                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6969                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6970         }
6971
6972         err = tg3_setup_phy(tp, 0);
6973         if (err)
6974                 return err;
6975
6976         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6977             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6978                 u32 tmp;
6979
6980                 /* Clear CRC stats. */
6981                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6982                         tg3_writephy(tp, MII_TG3_TEST1,
6983                                      tmp | MII_TG3_TEST1_CRC_EN);
6984                         tg3_readphy(tp, 0x14, &tmp);
6985                 }
6986         }
6987
6988         __tg3_set_rx_mode(tp->dev);
6989
6990         /* Initialize receive rules. */
6991         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6992         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6993         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6994         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6995
6996         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6997             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6998                 limit = 8;
6999         else
7000                 limit = 16;
7001         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7002                 limit -= 4;
7003         switch (limit) {
7004         case 16:
7005                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7006         case 15:
7007                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7008         case 14:
7009                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7010         case 13:
7011                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7012         case 12:
7013                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7014         case 11:
7015                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7016         case 10:
7017                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7018         case 9:
7019                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7020         case 8:
7021                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7022         case 7:
7023                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7024         case 6:
7025                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7026         case 5:
7027                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7028         case 4:
7029                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7030         case 3:
7031                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7032         case 2:
7033         case 1:
7034
7035         default:
7036                 break;
7037         };
7038
7039         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7040                 /* Write our heartbeat update interval to APE. */
7041                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7042                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7043
7044         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7045
7046         return 0;
7047 }
7048
7049 /* Called at device open time to get the chip ready for
7050  * packet processing.  Invoked with tp->lock held.
7051  */
7052 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7053 {
7054         int err;
7055
7056         /* Force the chip into D0. */
7057         err = tg3_set_power_state(tp, PCI_D0);
7058         if (err)
7059                 goto out;
7060
7061         tg3_switch_clocks(tp);
7062
7063         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7064
7065         err = tg3_reset_hw(tp, reset_phy);
7066
7067 out:
7068         return err;
7069 }
7070
7071 #define TG3_STAT_ADD32(PSTAT, REG) \
7072 do {    u32 __val = tr32(REG); \
7073         (PSTAT)->low += __val; \
7074         if ((PSTAT)->low < __val) \
7075                 (PSTAT)->high += 1; \
7076 } while (0)
7077
7078 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7079 {
7080         struct tg3_hw_stats *sp = tp->hw_stats;
7081
7082         if (!netif_carrier_ok(tp->dev))
7083                 return;
7084
7085         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7086         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7087         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7088         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7089         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7090         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7091         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7092         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7093         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7094         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7095         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7096         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7097         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7098
7099         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7100         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7101         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7102         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7103         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7104         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7105         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7106         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7107         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7108         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7109         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7110         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7111         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7112         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7113
7114         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7115         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7116         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7117 }
7118
7119 static void tg3_timer(unsigned long __opaque)
7120 {
7121         struct tg3 *tp = (struct tg3 *) __opaque;
7122
7123         if (tp->irq_sync)
7124                 goto restart_timer;
7125
7126         spin_lock(&tp->lock);
7127
7128         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7129                 /* All of this garbage is because when using non-tagged
7130                  * IRQ status the mailbox/status_block protocol the chip
7131                  * uses with the cpu is race prone.
7132                  */
7133                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7134                         tw32(GRC_LOCAL_CTRL,
7135                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7136                 } else {
7137                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7138                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7139                 }
7140
7141                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7142                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7143                         spin_unlock(&tp->lock);
7144                         schedule_work(&tp->reset_task);
7145                         return;
7146                 }
7147         }
7148
7149         /* This part only runs once per second. */
7150         if (!--tp->timer_counter) {
7151                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7152                         tg3_periodic_fetch_stats(tp);
7153
7154                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7155                         u32 mac_stat;
7156                         int phy_event;
7157
7158                         mac_stat = tr32(MAC_STATUS);
7159
7160                         phy_event = 0;
7161                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7162                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7163                                         phy_event = 1;
7164                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7165                                 phy_event = 1;
7166
7167                         if (phy_event)
7168                                 tg3_setup_phy(tp, 0);
7169                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7170                         u32 mac_stat = tr32(MAC_STATUS);
7171                         int need_setup = 0;
7172
7173                         if (netif_carrier_ok(tp->dev) &&
7174                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7175                                 need_setup = 1;
7176                         }
7177                         if (! netif_carrier_ok(tp->dev) &&
7178                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7179                                          MAC_STATUS_SIGNAL_DET))) {
7180                                 need_setup = 1;
7181                         }
7182                         if (need_setup) {
7183                                 if (!tp->serdes_counter) {
7184                                         tw32_f(MAC_MODE,
7185                                              (tp->mac_mode &
7186                                               ~MAC_MODE_PORT_MODE_MASK));
7187                                         udelay(40);
7188                                         tw32_f(MAC_MODE, tp->mac_mode);
7189                                         udelay(40);
7190                                 }
7191                                 tg3_setup_phy(tp, 0);
7192                         }
7193                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7194                         tg3_serdes_parallel_detect(tp);
7195
7196                 tp->timer_counter = tp->timer_multiplier;
7197         }
7198
7199         /* Heartbeat is only sent once every 2 seconds.
7200          *
7201          * The heartbeat is to tell the ASF firmware that the host
7202          * driver is still alive.  In the event that the OS crashes,
7203          * ASF needs to reset the hardware to free up the FIFO space
7204          * that may be filled with rx packets destined for the host.
7205          * If the FIFO is full, ASF will no longer function properly.
7206          *
7207          * Unintended resets have been reported on real time kernels
7208          * where the timer doesn't run on time.  Netpoll will also have
7209          * same problem.
7210          *
7211          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7212          * to check the ring condition when the heartbeat is expiring
7213          * before doing the reset.  This will prevent most unintended
7214          * resets.
7215          */
7216         if (!--tp->asf_counter) {
7217                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7218                         u32 val;
7219
7220                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7221                                       FWCMD_NICDRV_ALIVE3);
7222                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7223                         /* 5 seconds timeout */
7224                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7225                         val = tr32(GRC_RX_CPU_EVENT);
7226                         val |= (1 << 14);
7227                         tw32(GRC_RX_CPU_EVENT, val);
7228                 }
7229                 tp->asf_counter = tp->asf_multiplier;
7230         }
7231
7232         spin_unlock(&tp->lock);
7233
7234 restart_timer:
7235         tp->timer.expires = jiffies + tp->timer_offset;
7236         add_timer(&tp->timer);
7237 }
7238
7239 static int tg3_request_irq(struct tg3 *tp)
7240 {
7241         irq_handler_t fn;
7242         unsigned long flags;
7243         struct net_device *dev = tp->dev;
7244
7245         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7246                 fn = tg3_msi;
7247                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7248                         fn = tg3_msi_1shot;
7249                 flags = IRQF_SAMPLE_RANDOM;
7250         } else {
7251                 fn = tg3_interrupt;
7252                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7253                         fn = tg3_interrupt_tagged;
7254                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7255         }
7256         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7257 }
7258
7259 static int tg3_test_interrupt(struct tg3 *tp)
7260 {
7261         struct net_device *dev = tp->dev;
7262         int err, i, intr_ok = 0;
7263
7264         if (!netif_running(dev))
7265                 return -ENODEV;
7266
7267         tg3_disable_ints(tp);
7268
7269         free_irq(tp->pdev->irq, dev);
7270
7271         err = request_irq(tp->pdev->irq, tg3_test_isr,
7272                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7273         if (err)
7274                 return err;
7275
7276         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7277         tg3_enable_ints(tp);
7278
7279         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7280                HOSTCC_MODE_NOW);
7281
7282         for (i = 0; i < 5; i++) {
7283                 u32 int_mbox, misc_host_ctrl;
7284
7285                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7286                                         TG3_64BIT_REG_LOW);
7287                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7288
7289                 if ((int_mbox != 0) ||
7290                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7291                         intr_ok = 1;
7292                         break;
7293                 }
7294
7295                 msleep(10);
7296         }
7297
7298         tg3_disable_ints(tp);
7299
7300         free_irq(tp->pdev->irq, dev);
7301
7302         err = tg3_request_irq(tp);
7303
7304         if (err)
7305                 return err;
7306
7307         if (intr_ok)
7308                 return 0;
7309
7310         return -EIO;
7311 }
7312
7313 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7314  * successfully restored
7315  */
7316 static int tg3_test_msi(struct tg3 *tp)
7317 {
7318         struct net_device *dev = tp->dev;
7319         int err;
7320         u16 pci_cmd;
7321
7322         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7323                 return 0;
7324
7325         /* Turn off SERR reporting in case MSI terminates with Master
7326          * Abort.
7327          */
7328         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7329         pci_write_config_word(tp->pdev, PCI_COMMAND,
7330                               pci_cmd & ~PCI_COMMAND_SERR);
7331
7332         err = tg3_test_interrupt(tp);
7333
7334         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7335
7336         if (!err)
7337                 return 0;
7338
7339         /* other failures */
7340         if (err != -EIO)
7341                 return err;
7342
7343         /* MSI test failed, go back to INTx mode */
7344         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7345                "switching to INTx mode. Please report this failure to "
7346                "the PCI maintainer and include system chipset information.\n",
7347                        tp->dev->name);
7348
7349         free_irq(tp->pdev->irq, dev);
7350         pci_disable_msi(tp->pdev);
7351
7352         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7353
7354         err = tg3_request_irq(tp);
7355         if (err)
7356                 return err;
7357
7358         /* Need to reset the chip because the MSI cycle may have terminated
7359          * with Master Abort.
7360          */
7361         tg3_full_lock(tp, 1);
7362
7363         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7364         err = tg3_init_hw(tp, 1);
7365
7366         tg3_full_unlock(tp);
7367
7368         if (err)
7369                 free_irq(tp->pdev->irq, dev);
7370
7371         return err;
7372 }
7373
7374 static int tg3_open(struct net_device *dev)
7375 {
7376         struct tg3 *tp = netdev_priv(dev);
7377         int err;
7378
7379         netif_carrier_off(tp->dev);
7380
7381         tg3_full_lock(tp, 0);
7382
7383         err = tg3_set_power_state(tp, PCI_D0);
7384         if (err) {
7385                 tg3_full_unlock(tp);
7386                 return err;
7387         }
7388
7389         tg3_disable_ints(tp);
7390         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7391
7392         tg3_full_unlock(tp);
7393
7394         /* The placement of this call is tied
7395          * to the setup and use of Host TX descriptors.
7396          */
7397         err = tg3_alloc_consistent(tp);
7398         if (err)
7399                 return err;
7400
7401         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7402                 /* All MSI supporting chips should support tagged
7403                  * status.  Assert that this is the case.
7404                  */
7405                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7406                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7407                                "Not using MSI.\n", tp->dev->name);
7408                 } else if (pci_enable_msi(tp->pdev) == 0) {
7409                         u32 msi_mode;
7410
7411                         msi_mode = tr32(MSGINT_MODE);
7412                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7413                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7414                 }
7415         }
7416         err = tg3_request_irq(tp);
7417
7418         if (err) {
7419                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7420                         pci_disable_msi(tp->pdev);
7421                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7422                 }
7423                 tg3_free_consistent(tp);
7424                 return err;
7425         }
7426
7427         napi_enable(&tp->napi);
7428
7429         tg3_full_lock(tp, 0);
7430
7431         err = tg3_init_hw(tp, 1);
7432         if (err) {
7433                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7434                 tg3_free_rings(tp);
7435         } else {
7436                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7437                         tp->timer_offset = HZ;
7438                 else
7439                         tp->timer_offset = HZ / 10;
7440
7441                 BUG_ON(tp->timer_offset > HZ);
7442                 tp->timer_counter = tp->timer_multiplier =
7443                         (HZ / tp->timer_offset);
7444                 tp->asf_counter = tp->asf_multiplier =
7445                         ((HZ / tp->timer_offset) * 2);
7446
7447                 init_timer(&tp->timer);
7448                 tp->timer.expires = jiffies + tp->timer_offset;
7449                 tp->timer.data = (unsigned long) tp;
7450                 tp->timer.function = tg3_timer;
7451         }
7452
7453         tg3_full_unlock(tp);
7454
7455         if (err) {
7456                 napi_disable(&tp->napi);
7457                 free_irq(tp->pdev->irq, dev);
7458                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7459                         pci_disable_msi(tp->pdev);
7460                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7461                 }
7462                 tg3_free_consistent(tp);
7463                 return err;
7464         }
7465
7466         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7467                 err = tg3_test_msi(tp);
7468
7469                 if (err) {
7470                         tg3_full_lock(tp, 0);
7471
7472                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7473                                 pci_disable_msi(tp->pdev);
7474                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7475                         }
7476                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7477                         tg3_free_rings(tp);
7478                         tg3_free_consistent(tp);
7479
7480                         tg3_full_unlock(tp);
7481
7482                         napi_disable(&tp->napi);
7483
7484                         return err;
7485                 }
7486
7487                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7488                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7489                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7490
7491                                 tw32(PCIE_TRANSACTION_CFG,
7492                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7493                         }
7494                 }
7495         }
7496
7497         tg3_full_lock(tp, 0);
7498
7499         add_timer(&tp->timer);
7500         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7501         tg3_enable_ints(tp);
7502
7503         tg3_full_unlock(tp);
7504
7505         netif_start_queue(dev);
7506
7507         return 0;
7508 }
7509
7510 #if 0
7511 /*static*/ void tg3_dump_state(struct tg3 *tp)
7512 {
7513         u32 val32, val32_2, val32_3, val32_4, val32_5;
7514         u16 val16;
7515         int i;
7516
7517         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7518         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7519         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7520                val16, val32);
7521
7522         /* MAC block */
7523         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7524                tr32(MAC_MODE), tr32(MAC_STATUS));
7525         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7526                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7527         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7528                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7529         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7530                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7531
7532         /* Send data initiator control block */
7533         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7534                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7535         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7536                tr32(SNDDATAI_STATSCTRL));
7537
7538         /* Send data completion control block */
7539         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7540
7541         /* Send BD ring selector block */
7542         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7543                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7544
7545         /* Send BD initiator control block */
7546         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7547                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7548
7549         /* Send BD completion control block */
7550         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7551
7552         /* Receive list placement control block */
7553         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7554                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7555         printk("       RCVLPC_STATSCTRL[%08x]\n",
7556                tr32(RCVLPC_STATSCTRL));
7557
7558         /* Receive data and receive BD initiator control block */
7559         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7560                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7561
7562         /* Receive data completion control block */
7563         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7564                tr32(RCVDCC_MODE));
7565
7566         /* Receive BD initiator control block */
7567         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7568                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7569
7570         /* Receive BD completion control block */
7571         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7572                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7573
7574         /* Receive list selector control block */
7575         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7576                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7577
7578         /* Mbuf cluster free block */
7579         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7580                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7581
7582         /* Host coalescing control block */
7583         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7584                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7585         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7586                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7587                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7588         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7589                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7590                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7591         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7592                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7593         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7594                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7595
7596         /* Memory arbiter control block */
7597         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7598                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7599
7600         /* Buffer manager control block */
7601         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7602                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7603         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7604                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7605         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7606                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7607                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7608                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7609
7610         /* Read DMA control block */
7611         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7612                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7613
7614         /* Write DMA control block */
7615         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7616                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7617
7618         /* DMA completion block */
7619         printk("DEBUG: DMAC_MODE[%08x]\n",
7620                tr32(DMAC_MODE));
7621
7622         /* GRC block */
7623         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7624                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7625         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7626                tr32(GRC_LOCAL_CTRL));
7627
7628         /* TG3_BDINFOs */
7629         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7630                tr32(RCVDBDI_JUMBO_BD + 0x0),
7631                tr32(RCVDBDI_JUMBO_BD + 0x4),
7632                tr32(RCVDBDI_JUMBO_BD + 0x8),
7633                tr32(RCVDBDI_JUMBO_BD + 0xc));
7634         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7635                tr32(RCVDBDI_STD_BD + 0x0),
7636                tr32(RCVDBDI_STD_BD + 0x4),
7637                tr32(RCVDBDI_STD_BD + 0x8),
7638                tr32(RCVDBDI_STD_BD + 0xc));
7639         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7640                tr32(RCVDBDI_MINI_BD + 0x0),
7641                tr32(RCVDBDI_MINI_BD + 0x4),
7642                tr32(RCVDBDI_MINI_BD + 0x8),
7643                tr32(RCVDBDI_MINI_BD + 0xc));
7644
7645         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7646         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7647         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7648         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7649         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7650                val32, val32_2, val32_3, val32_4);
7651
7652         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7653         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7654         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7655         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7656         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7657                val32, val32_2, val32_3, val32_4);
7658
7659         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7660         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7661         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7662         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7663         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7664         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7665                val32, val32_2, val32_3, val32_4, val32_5);
7666
7667         /* SW status block */
7668         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7669                tp->hw_status->status,
7670                tp->hw_status->status_tag,
7671                tp->hw_status->rx_jumbo_consumer,
7672                tp->hw_status->rx_consumer,
7673                tp->hw_status->rx_mini_consumer,
7674                tp->hw_status->idx[0].rx_producer,
7675                tp->hw_status->idx[0].tx_consumer);
7676
7677         /* SW statistics block */
7678         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7679                ((u32 *)tp->hw_stats)[0],
7680                ((u32 *)tp->hw_stats)[1],
7681                ((u32 *)tp->hw_stats)[2],
7682                ((u32 *)tp->hw_stats)[3]);
7683
7684         /* Mailboxes */
7685         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7686                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7687                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7688                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7689                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7690
7691         /* NIC side send descriptors. */
7692         for (i = 0; i < 6; i++) {
7693                 unsigned long txd;
7694
7695                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7696                         + (i * sizeof(struct tg3_tx_buffer_desc));
7697                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7698                        i,
7699                        readl(txd + 0x0), readl(txd + 0x4),
7700                        readl(txd + 0x8), readl(txd + 0xc));
7701         }
7702
7703         /* NIC side RX descriptors. */
7704         for (i = 0; i < 6; i++) {
7705                 unsigned long rxd;
7706
7707                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7708                         + (i * sizeof(struct tg3_rx_buffer_desc));
7709                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7710                        i,
7711                        readl(rxd + 0x0), readl(rxd + 0x4),
7712                        readl(rxd + 0x8), readl(rxd + 0xc));
7713                 rxd += (4 * sizeof(u32));
7714                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7715                        i,
7716                        readl(rxd + 0x0), readl(rxd + 0x4),
7717                        readl(rxd + 0x8), readl(rxd + 0xc));
7718         }
7719
7720         for (i = 0; i < 6; i++) {
7721                 unsigned long rxd;
7722
7723                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7724                         + (i * sizeof(struct tg3_rx_buffer_desc));
7725                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7726                        i,
7727                        readl(rxd + 0x0), readl(rxd + 0x4),
7728                        readl(rxd + 0x8), readl(rxd + 0xc));
7729                 rxd += (4 * sizeof(u32));
7730                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7731                        i,
7732                        readl(rxd + 0x0), readl(rxd + 0x4),
7733                        readl(rxd + 0x8), readl(rxd + 0xc));
7734         }
7735 }
7736 #endif
7737
7738 static struct net_device_stats *tg3_get_stats(struct net_device *);
7739 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7740
7741 static int tg3_close(struct net_device *dev)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744
7745         napi_disable(&tp->napi);
7746         cancel_work_sync(&tp->reset_task);
7747
7748         netif_stop_queue(dev);
7749
7750         del_timer_sync(&tp->timer);
7751
7752         tg3_full_lock(tp, 1);
7753 #if 0
7754         tg3_dump_state(tp);
7755 #endif
7756
7757         tg3_disable_ints(tp);
7758
7759         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7760         tg3_free_rings(tp);
7761         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7762
7763         tg3_full_unlock(tp);
7764
7765         free_irq(tp->pdev->irq, dev);
7766         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7767                 pci_disable_msi(tp->pdev);
7768                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7769         }
7770
7771         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7772                sizeof(tp->net_stats_prev));
7773         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7774                sizeof(tp->estats_prev));
7775
7776         tg3_free_consistent(tp);
7777
7778         tg3_set_power_state(tp, PCI_D3hot);
7779
7780         netif_carrier_off(tp->dev);
7781
7782         return 0;
7783 }
7784
7785 static inline unsigned long get_stat64(tg3_stat64_t *val)
7786 {
7787         unsigned long ret;
7788
7789 #if (BITS_PER_LONG == 32)
7790         ret = val->low;
7791 #else
7792         ret = ((u64)val->high << 32) | ((u64)val->low);
7793 #endif
7794         return ret;
7795 }
7796
7797 static unsigned long calc_crc_errors(struct tg3 *tp)
7798 {
7799         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7800
7801         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7802             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7803              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7804                 u32 val;
7805
7806                 spin_lock_bh(&tp->lock);
7807                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7808                         tg3_writephy(tp, MII_TG3_TEST1,
7809                                      val | MII_TG3_TEST1_CRC_EN);
7810                         tg3_readphy(tp, 0x14, &val);
7811                 } else
7812                         val = 0;
7813                 spin_unlock_bh(&tp->lock);
7814
7815                 tp->phy_crc_errors += val;
7816
7817                 return tp->phy_crc_errors;
7818         }
7819
7820         return get_stat64(&hw_stats->rx_fcs_errors);
7821 }
7822
7823 #define ESTAT_ADD(member) \
7824         estats->member =        old_estats->member + \
7825                                 get_stat64(&hw_stats->member)
7826
7827 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7828 {
7829         struct tg3_ethtool_stats *estats = &tp->estats;
7830         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7831         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7832
7833         if (!hw_stats)
7834                 return old_estats;
7835
7836         ESTAT_ADD(rx_octets);
7837         ESTAT_ADD(rx_fragments);
7838         ESTAT_ADD(rx_ucast_packets);
7839         ESTAT_ADD(rx_mcast_packets);
7840         ESTAT_ADD(rx_bcast_packets);
7841         ESTAT_ADD(rx_fcs_errors);
7842         ESTAT_ADD(rx_align_errors);
7843         ESTAT_ADD(rx_xon_pause_rcvd);
7844         ESTAT_ADD(rx_xoff_pause_rcvd);
7845         ESTAT_ADD(rx_mac_ctrl_rcvd);
7846         ESTAT_ADD(rx_xoff_entered);
7847         ESTAT_ADD(rx_frame_too_long_errors);
7848         ESTAT_ADD(rx_jabbers);
7849         ESTAT_ADD(rx_undersize_packets);
7850         ESTAT_ADD(rx_in_length_errors);
7851         ESTAT_ADD(rx_out_length_errors);
7852         ESTAT_ADD(rx_64_or_less_octet_packets);
7853         ESTAT_ADD(rx_65_to_127_octet_packets);
7854         ESTAT_ADD(rx_128_to_255_octet_packets);
7855         ESTAT_ADD(rx_256_to_511_octet_packets);
7856         ESTAT_ADD(rx_512_to_1023_octet_packets);
7857         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7858         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7859         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7860         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7861         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7862
7863         ESTAT_ADD(tx_octets);
7864         ESTAT_ADD(tx_collisions);
7865         ESTAT_ADD(tx_xon_sent);
7866         ESTAT_ADD(tx_xoff_sent);
7867         ESTAT_ADD(tx_flow_control);
7868         ESTAT_ADD(tx_mac_errors);
7869         ESTAT_ADD(tx_single_collisions);
7870         ESTAT_ADD(tx_mult_collisions);
7871         ESTAT_ADD(tx_deferred);
7872         ESTAT_ADD(tx_excessive_collisions);
7873         ESTAT_ADD(tx_late_collisions);
7874         ESTAT_ADD(tx_collide_2times);
7875         ESTAT_ADD(tx_collide_3times);
7876         ESTAT_ADD(tx_collide_4times);
7877         ESTAT_ADD(tx_collide_5times);
7878         ESTAT_ADD(tx_collide_6times);
7879         ESTAT_ADD(tx_collide_7times);
7880         ESTAT_ADD(tx_collide_8times);
7881         ESTAT_ADD(tx_collide_9times);
7882         ESTAT_ADD(tx_collide_10times);
7883         ESTAT_ADD(tx_collide_11times);
7884         ESTAT_ADD(tx_collide_12times);
7885         ESTAT_ADD(tx_collide_13times);
7886         ESTAT_ADD(tx_collide_14times);
7887         ESTAT_ADD(tx_collide_15times);
7888         ESTAT_ADD(tx_ucast_packets);
7889         ESTAT_ADD(tx_mcast_packets);
7890         ESTAT_ADD(tx_bcast_packets);
7891         ESTAT_ADD(tx_carrier_sense_errors);
7892         ESTAT_ADD(tx_discards);
7893         ESTAT_ADD(tx_errors);
7894
7895         ESTAT_ADD(dma_writeq_full);
7896         ESTAT_ADD(dma_write_prioq_full);
7897         ESTAT_ADD(rxbds_empty);
7898         ESTAT_ADD(rx_discards);
7899         ESTAT_ADD(rx_errors);
7900         ESTAT_ADD(rx_threshold_hit);
7901
7902         ESTAT_ADD(dma_readq_full);
7903         ESTAT_ADD(dma_read_prioq_full);
7904         ESTAT_ADD(tx_comp_queue_full);
7905
7906         ESTAT_ADD(ring_set_send_prod_index);
7907         ESTAT_ADD(ring_status_update);
7908         ESTAT_ADD(nic_irqs);
7909         ESTAT_ADD(nic_avoided_irqs);
7910         ESTAT_ADD(nic_tx_threshold_hit);
7911
7912         return estats;
7913 }
7914
7915 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7916 {
7917         struct tg3 *tp = netdev_priv(dev);
7918         struct net_device_stats *stats = &tp->net_stats;
7919         struct net_device_stats *old_stats = &tp->net_stats_prev;
7920         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7921
7922         if (!hw_stats)
7923                 return old_stats;
7924
7925         stats->rx_packets = old_stats->rx_packets +
7926                 get_stat64(&hw_stats->rx_ucast_packets) +
7927                 get_stat64(&hw_stats->rx_mcast_packets) +
7928                 get_stat64(&hw_stats->rx_bcast_packets);
7929
7930         stats->tx_packets = old_stats->tx_packets +
7931                 get_stat64(&hw_stats->tx_ucast_packets) +
7932                 get_stat64(&hw_stats->tx_mcast_packets) +
7933                 get_stat64(&hw_stats->tx_bcast_packets);
7934
7935         stats->rx_bytes = old_stats->rx_bytes +
7936                 get_stat64(&hw_stats->rx_octets);
7937         stats->tx_bytes = old_stats->tx_bytes +
7938                 get_stat64(&hw_stats->tx_octets);
7939
7940         stats->rx_errors = old_stats->rx_errors +
7941                 get_stat64(&hw_stats->rx_errors);
7942         stats->tx_errors = old_stats->tx_errors +
7943                 get_stat64(&hw_stats->tx_errors) +
7944                 get_stat64(&hw_stats->tx_mac_errors) +
7945                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7946                 get_stat64(&hw_stats->tx_discards);
7947
7948         stats->multicast = old_stats->multicast +
7949                 get_stat64(&hw_stats->rx_mcast_packets);
7950         stats->collisions = old_stats->collisions +
7951                 get_stat64(&hw_stats->tx_collisions);
7952
7953         stats->rx_length_errors = old_stats->rx_length_errors +
7954                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7955                 get_stat64(&hw_stats->rx_undersize_packets);
7956
7957         stats->rx_over_errors = old_stats->rx_over_errors +
7958                 get_stat64(&hw_stats->rxbds_empty);
7959         stats->rx_frame_errors = old_stats->rx_frame_errors +
7960                 get_stat64(&hw_stats->rx_align_errors);
7961         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7962                 get_stat64(&hw_stats->tx_discards);
7963         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7964                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7965
7966         stats->rx_crc_errors = old_stats->rx_crc_errors +
7967                 calc_crc_errors(tp);
7968
7969         stats->rx_missed_errors = old_stats->rx_missed_errors +
7970                 get_stat64(&hw_stats->rx_discards);
7971
7972         return stats;
7973 }
7974
7975 static inline u32 calc_crc(unsigned char *buf, int len)
7976 {
7977         u32 reg;
7978         u32 tmp;
7979         int j, k;
7980
7981         reg = 0xffffffff;
7982
7983         for (j = 0; j < len; j++) {
7984                 reg ^= buf[j];
7985
7986                 for (k = 0; k < 8; k++) {
7987                         tmp = reg & 0x01;
7988
7989                         reg >>= 1;
7990
7991                         if (tmp) {
7992                                 reg ^= 0xedb88320;
7993                         }
7994                 }
7995         }
7996
7997         return ~reg;
7998 }
7999
8000 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8001 {
8002         /* accept or reject all multicast frames */
8003         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8004         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8005         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8006         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8007 }
8008
8009 static void __tg3_set_rx_mode(struct net_device *dev)
8010 {
8011         struct tg3 *tp = netdev_priv(dev);
8012         u32 rx_mode;
8013
8014         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8015                                   RX_MODE_KEEP_VLAN_TAG);
8016
8017         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8018          * flag clear.
8019          */
8020 #if TG3_VLAN_TAG_USED
8021         if (!tp->vlgrp &&
8022             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8023                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8024 #else
8025         /* By definition, VLAN is disabled always in this
8026          * case.
8027          */
8028         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8029                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8030 #endif
8031
8032         if (dev->flags & IFF_PROMISC) {
8033                 /* Promiscuous mode. */
8034                 rx_mode |= RX_MODE_PROMISC;
8035         } else if (dev->flags & IFF_ALLMULTI) {
8036                 /* Accept all multicast. */
8037                 tg3_set_multi (tp, 1);
8038         } else if (dev->mc_count < 1) {
8039                 /* Reject all multicast. */
8040                 tg3_set_multi (tp, 0);
8041         } else {
8042                 /* Accept one or more multicast(s). */
8043                 struct dev_mc_list *mclist;
8044                 unsigned int i;
8045                 u32 mc_filter[4] = { 0, };
8046                 u32 regidx;
8047                 u32 bit;
8048                 u32 crc;
8049
8050                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8051                      i++, mclist = mclist->next) {
8052
8053                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8054                         bit = ~crc & 0x7f;
8055                         regidx = (bit & 0x60) >> 5;
8056                         bit &= 0x1f;
8057                         mc_filter[regidx] |= (1 << bit);
8058                 }
8059
8060                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8061                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8062                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8063                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8064         }
8065
8066         if (rx_mode != tp->rx_mode) {
8067                 tp->rx_mode = rx_mode;
8068                 tw32_f(MAC_RX_MODE, rx_mode);
8069                 udelay(10);
8070         }
8071 }
8072
8073 static void tg3_set_rx_mode(struct net_device *dev)
8074 {
8075         struct tg3 *tp = netdev_priv(dev);
8076
8077         if (!netif_running(dev))
8078                 return;
8079
8080         tg3_full_lock(tp, 0);
8081         __tg3_set_rx_mode(dev);
8082         tg3_full_unlock(tp);
8083 }
8084
8085 #define TG3_REGDUMP_LEN         (32 * 1024)
8086
8087 static int tg3_get_regs_len(struct net_device *dev)
8088 {
8089         return TG3_REGDUMP_LEN;
8090 }
8091
8092 static void tg3_get_regs(struct net_device *dev,
8093                 struct ethtool_regs *regs, void *_p)
8094 {
8095         u32 *p = _p;
8096         struct tg3 *tp = netdev_priv(dev);
8097         u8 *orig_p = _p;
8098         int i;
8099
8100         regs->version = 0;
8101
8102         memset(p, 0, TG3_REGDUMP_LEN);
8103
8104         if (tp->link_config.phy_is_low_power)
8105                 return;
8106
8107         tg3_full_lock(tp, 0);
8108
8109 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8110 #define GET_REG32_LOOP(base,len)                \
8111 do {    p = (u32 *)(orig_p + (base));           \
8112         for (i = 0; i < len; i += 4)            \
8113                 __GET_REG32((base) + i);        \
8114 } while (0)
8115 #define GET_REG32_1(reg)                        \
8116 do {    p = (u32 *)(orig_p + (reg));            \
8117         __GET_REG32((reg));                     \
8118 } while (0)
8119
8120         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8121         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8122         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8123         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8124         GET_REG32_1(SNDDATAC_MODE);
8125         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8126         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8127         GET_REG32_1(SNDBDC_MODE);
8128         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8129         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8130         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8131         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8132         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8133         GET_REG32_1(RCVDCC_MODE);
8134         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8135         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8136         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8137         GET_REG32_1(MBFREE_MODE);
8138         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8139         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8140         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8141         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8142         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8143         GET_REG32_1(RX_CPU_MODE);
8144         GET_REG32_1(RX_CPU_STATE);
8145         GET_REG32_1(RX_CPU_PGMCTR);
8146         GET_REG32_1(RX_CPU_HWBKPT);
8147         GET_REG32_1(TX_CPU_MODE);
8148         GET_REG32_1(TX_CPU_STATE);
8149         GET_REG32_1(TX_CPU_PGMCTR);
8150         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8151         GET_REG32_LOOP(FTQ_RESET, 0x120);
8152         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8153         GET_REG32_1(DMAC_MODE);
8154         GET_REG32_LOOP(GRC_MODE, 0x4c);
8155         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8156                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8157
8158 #undef __GET_REG32
8159 #undef GET_REG32_LOOP
8160 #undef GET_REG32_1
8161
8162         tg3_full_unlock(tp);
8163 }
8164
8165 static int tg3_get_eeprom_len(struct net_device *dev)
8166 {
8167         struct tg3 *tp = netdev_priv(dev);
8168
8169         return tp->nvram_size;
8170 }
8171
8172 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8173 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8174
8175 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8176 {
8177         struct tg3 *tp = netdev_priv(dev);
8178         int ret;
8179         u8  *pd;
8180         u32 i, offset, len, val, b_offset, b_count;
8181
8182         if (tp->link_config.phy_is_low_power)
8183                 return -EAGAIN;
8184
8185         offset = eeprom->offset;
8186         len = eeprom->len;
8187         eeprom->len = 0;
8188
8189         eeprom->magic = TG3_EEPROM_MAGIC;
8190
8191         if (offset & 3) {
8192                 /* adjustments to start on required 4 byte boundary */
8193                 b_offset = offset & 3;
8194                 b_count = 4 - b_offset;
8195                 if (b_count > len) {
8196                         /* i.e. offset=1 len=2 */
8197                         b_count = len;
8198                 }
8199                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8200                 if (ret)
8201                         return ret;
8202                 val = cpu_to_le32(val);
8203                 memcpy(data, ((char*)&val) + b_offset, b_count);
8204                 len -= b_count;
8205                 offset += b_count;
8206                 eeprom->len += b_count;
8207         }
8208
8209         /* read bytes upto the last 4 byte boundary */
8210         pd = &data[eeprom->len];
8211         for (i = 0; i < (len - (len & 3)); i += 4) {
8212                 ret = tg3_nvram_read(tp, offset + i, &val);
8213                 if (ret) {
8214                         eeprom->len += i;
8215                         return ret;
8216                 }
8217                 val = cpu_to_le32(val);
8218                 memcpy(pd + i, &val, 4);
8219         }
8220         eeprom->len += i;
8221
8222         if (len & 3) {
8223                 /* read last bytes not ending on 4 byte boundary */
8224                 pd = &data[eeprom->len];
8225                 b_count = len & 3;
8226                 b_offset = offset + len - b_count;
8227                 ret = tg3_nvram_read(tp, b_offset, &val);
8228                 if (ret)
8229                         return ret;
8230                 val = cpu_to_le32(val);
8231                 memcpy(pd, ((char*)&val), b_count);
8232                 eeprom->len += b_count;
8233         }
8234         return 0;
8235 }
8236
8237 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8238
8239 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8240 {
8241         struct tg3 *tp = netdev_priv(dev);
8242         int ret;
8243         u32 offset, len, b_offset, odd_len, start, end;
8244         u8 *buf;
8245
8246         if (tp->link_config.phy_is_low_power)
8247                 return -EAGAIN;
8248
8249         if (eeprom->magic != TG3_EEPROM_MAGIC)
8250                 return -EINVAL;
8251
8252         offset = eeprom->offset;
8253         len = eeprom->len;
8254
8255         if ((b_offset = (offset & 3))) {
8256                 /* adjustments to start on required 4 byte boundary */
8257                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8258                 if (ret)
8259                         return ret;
8260                 start = cpu_to_le32(start);
8261                 len += b_offset;
8262                 offset &= ~3;
8263                 if (len < 4)
8264                         len = 4;
8265         }
8266
8267         odd_len = 0;
8268         if (len & 3) {
8269                 /* adjustments to end on required 4 byte boundary */
8270                 odd_len = 1;
8271                 len = (len + 3) & ~3;
8272                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8273                 if (ret)
8274                         return ret;
8275                 end = cpu_to_le32(end);
8276         }
8277
8278         buf = data;
8279         if (b_offset || odd_len) {
8280                 buf = kmalloc(len, GFP_KERNEL);
8281                 if (!buf)
8282                         return -ENOMEM;
8283                 if (b_offset)
8284                         memcpy(buf, &start, 4);
8285                 if (odd_len)
8286                         memcpy(buf+len-4, &end, 4);
8287                 memcpy(buf + b_offset, data, eeprom->len);
8288         }
8289
8290         ret = tg3_nvram_write_block(tp, offset, len, buf);
8291
8292         if (buf != data)
8293                 kfree(buf);
8294
8295         return ret;
8296 }
8297
8298 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8299 {
8300         struct tg3 *tp = netdev_priv(dev);
8301
8302         cmd->supported = (SUPPORTED_Autoneg);
8303
8304         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8305                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8306                                    SUPPORTED_1000baseT_Full);
8307
8308         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8309                 cmd->supported |= (SUPPORTED_100baseT_Half |
8310                                   SUPPORTED_100baseT_Full |
8311                                   SUPPORTED_10baseT_Half |
8312                                   SUPPORTED_10baseT_Full |
8313                                   SUPPORTED_MII);
8314                 cmd->port = PORT_TP;
8315         } else {
8316                 cmd->supported |= SUPPORTED_FIBRE;
8317                 cmd->port = PORT_FIBRE;
8318         }
8319
8320         cmd->advertising = tp->link_config.advertising;
8321         if (netif_running(dev)) {
8322                 cmd->speed = tp->link_config.active_speed;
8323                 cmd->duplex = tp->link_config.active_duplex;
8324         }
8325         cmd->phy_address = PHY_ADDR;
8326         cmd->transceiver = 0;
8327         cmd->autoneg = tp->link_config.autoneg;
8328         cmd->maxtxpkt = 0;
8329         cmd->maxrxpkt = 0;
8330         return 0;
8331 }
8332
8333 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8334 {
8335         struct tg3 *tp = netdev_priv(dev);
8336
8337         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8338                 /* These are the only valid advertisement bits allowed.  */
8339                 if (cmd->autoneg == AUTONEG_ENABLE &&
8340                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8341                                           ADVERTISED_1000baseT_Full |
8342                                           ADVERTISED_Autoneg |
8343                                           ADVERTISED_FIBRE)))
8344                         return -EINVAL;
8345                 /* Fiber can only do SPEED_1000.  */
8346                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8347                          (cmd->speed != SPEED_1000))
8348                         return -EINVAL;
8349         /* Copper cannot force SPEED_1000.  */
8350         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8351                    (cmd->speed == SPEED_1000))
8352                 return -EINVAL;
8353         else if ((cmd->speed == SPEED_1000) &&
8354                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8355                 return -EINVAL;
8356
8357         tg3_full_lock(tp, 0);
8358
8359         tp->link_config.autoneg = cmd->autoneg;
8360         if (cmd->autoneg == AUTONEG_ENABLE) {
8361                 tp->link_config.advertising = (cmd->advertising |
8362                                               ADVERTISED_Autoneg);
8363                 tp->link_config.speed = SPEED_INVALID;
8364                 tp->link_config.duplex = DUPLEX_INVALID;
8365         } else {
8366                 tp->link_config.advertising = 0;
8367                 tp->link_config.speed = cmd->speed;
8368                 tp->link_config.duplex = cmd->duplex;
8369         }
8370
8371         tp->link_config.orig_speed = tp->link_config.speed;
8372         tp->link_config.orig_duplex = tp->link_config.duplex;
8373         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8374
8375         if (netif_running(dev))
8376                 tg3_setup_phy(tp, 1);
8377
8378         tg3_full_unlock(tp);
8379
8380         return 0;
8381 }
8382
8383 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8384 {
8385         struct tg3 *tp = netdev_priv(dev);
8386
8387         strcpy(info->driver, DRV_MODULE_NAME);
8388         strcpy(info->version, DRV_MODULE_VERSION);
8389         strcpy(info->fw_version, tp->fw_ver);
8390         strcpy(info->bus_info, pci_name(tp->pdev));
8391 }
8392
8393 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8394 {
8395         struct tg3 *tp = netdev_priv(dev);
8396
8397         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8398                 wol->supported = WAKE_MAGIC;
8399         else
8400                 wol->supported = 0;
8401         wol->wolopts = 0;
8402         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8403                 wol->wolopts = WAKE_MAGIC;
8404         memset(&wol->sopass, 0, sizeof(wol->sopass));
8405 }
8406
8407 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8408 {
8409         struct tg3 *tp = netdev_priv(dev);
8410
8411         if (wol->wolopts & ~WAKE_MAGIC)
8412                 return -EINVAL;
8413         if ((wol->wolopts & WAKE_MAGIC) &&
8414             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8415                 return -EINVAL;
8416
8417         spin_lock_bh(&tp->lock);
8418         if (wol->wolopts & WAKE_MAGIC)
8419                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8420         else
8421                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8422         spin_unlock_bh(&tp->lock);
8423
8424         return 0;
8425 }
8426
8427 static u32 tg3_get_msglevel(struct net_device *dev)
8428 {
8429         struct tg3 *tp = netdev_priv(dev);
8430         return tp->msg_enable;
8431 }
8432
8433 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8434 {
8435         struct tg3 *tp = netdev_priv(dev);
8436         tp->msg_enable = value;
8437 }
8438
8439 static int tg3_set_tso(struct net_device *dev, u32 value)
8440 {
8441         struct tg3 *tp = netdev_priv(dev);
8442
8443         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8444                 if (value)
8445                         return -EINVAL;
8446                 return 0;
8447         }
8448         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8449             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8450                 if (value) {
8451                         dev->features |= NETIF_F_TSO6;
8452                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8453                                 dev->features |= NETIF_F_TSO_ECN;
8454                 } else
8455                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8456         }
8457         return ethtool_op_set_tso(dev, value);
8458 }
8459
8460 static int tg3_nway_reset(struct net_device *dev)
8461 {
8462         struct tg3 *tp = netdev_priv(dev);
8463         u32 bmcr;
8464         int r;
8465
8466         if (!netif_running(dev))
8467                 return -EAGAIN;
8468
8469         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8470                 return -EINVAL;
8471
8472         spin_lock_bh(&tp->lock);
8473         r = -EINVAL;
8474         tg3_readphy(tp, MII_BMCR, &bmcr);
8475         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8476             ((bmcr & BMCR_ANENABLE) ||
8477              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8478                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8479                                            BMCR_ANENABLE);
8480                 r = 0;
8481         }
8482         spin_unlock_bh(&tp->lock);
8483
8484         return r;
8485 }
8486
8487 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8488 {
8489         struct tg3 *tp = netdev_priv(dev);
8490
8491         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8492         ering->rx_mini_max_pending = 0;
8493         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8494                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8495         else
8496                 ering->rx_jumbo_max_pending = 0;
8497
8498         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8499
8500         ering->rx_pending = tp->rx_pending;
8501         ering->rx_mini_pending = 0;
8502         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8503                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8504         else
8505                 ering->rx_jumbo_pending = 0;
8506
8507         ering->tx_pending = tp->tx_pending;
8508 }
8509
8510 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8511 {
8512         struct tg3 *tp = netdev_priv(dev);
8513         int irq_sync = 0, err = 0;
8514
8515         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8516             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8517             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8518             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8519             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8520              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8521                 return -EINVAL;
8522
8523         if (netif_running(dev)) {
8524                 tg3_netif_stop(tp);
8525                 irq_sync = 1;
8526         }
8527
8528         tg3_full_lock(tp, irq_sync);
8529
8530         tp->rx_pending = ering->rx_pending;
8531
8532         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8533             tp->rx_pending > 63)
8534                 tp->rx_pending = 63;
8535         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8536         tp->tx_pending = ering->tx_pending;
8537
8538         if (netif_running(dev)) {
8539                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8540                 err = tg3_restart_hw(tp, 1);
8541                 if (!err)
8542                         tg3_netif_start(tp);
8543         }
8544
8545         tg3_full_unlock(tp);
8546
8547         return err;
8548 }
8549
8550 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8551 {
8552         struct tg3 *tp = netdev_priv(dev);
8553
8554         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8555         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8556         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8557 }
8558
8559 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8560 {
8561         struct tg3 *tp = netdev_priv(dev);
8562         int irq_sync = 0, err = 0;
8563
8564         if (netif_running(dev)) {
8565                 tg3_netif_stop(tp);
8566                 irq_sync = 1;
8567         }
8568
8569         tg3_full_lock(tp, irq_sync);
8570
8571         if (epause->autoneg)
8572                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8573         else
8574                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8575         if (epause->rx_pause)
8576                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8577         else
8578                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8579         if (epause->tx_pause)
8580                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8581         else
8582                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8583
8584         if (netif_running(dev)) {
8585                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8586                 err = tg3_restart_hw(tp, 1);
8587                 if (!err)
8588                         tg3_netif_start(tp);
8589         }
8590
8591         tg3_full_unlock(tp);
8592
8593         return err;
8594 }
8595
8596 static u32 tg3_get_rx_csum(struct net_device *dev)
8597 {
8598         struct tg3 *tp = netdev_priv(dev);
8599         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8600 }
8601
8602 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8603 {
8604         struct tg3 *tp = netdev_priv(dev);
8605
8606         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8607                 if (data != 0)
8608                         return -EINVAL;
8609                 return 0;
8610         }
8611
8612         spin_lock_bh(&tp->lock);
8613         if (data)
8614                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8615         else
8616                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8617         spin_unlock_bh(&tp->lock);
8618
8619         return 0;
8620 }
8621
8622 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8623 {
8624         struct tg3 *tp = netdev_priv(dev);
8625
8626         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8627                 if (data != 0)
8628                         return -EINVAL;
8629                 return 0;
8630         }
8631
8632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8634             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8636                 ethtool_op_set_tx_ipv6_csum(dev, data);
8637         else
8638                 ethtool_op_set_tx_csum(dev, data);
8639
8640         return 0;
8641 }
8642
8643 static int tg3_get_sset_count (struct net_device *dev, int sset)
8644 {
8645         switch (sset) {
8646         case ETH_SS_TEST:
8647                 return TG3_NUM_TEST;
8648         case ETH_SS_STATS:
8649                 return TG3_NUM_STATS;
8650         default:
8651                 return -EOPNOTSUPP;
8652         }
8653 }
8654
8655 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8656 {
8657         switch (stringset) {
8658         case ETH_SS_STATS:
8659                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8660                 break;
8661         case ETH_SS_TEST:
8662                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8663                 break;
8664         default:
8665                 WARN_ON(1);     /* we need a WARN() */
8666                 break;
8667         }
8668 }
8669
8670 static int tg3_phys_id(struct net_device *dev, u32 data)
8671 {
8672         struct tg3 *tp = netdev_priv(dev);
8673         int i;
8674
8675         if (!netif_running(tp->dev))
8676                 return -EAGAIN;
8677
8678         if (data == 0)
8679                 data = 2;
8680
8681         for (i = 0; i < (data * 2); i++) {
8682                 if ((i % 2) == 0)
8683                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8684                                            LED_CTRL_1000MBPS_ON |
8685                                            LED_CTRL_100MBPS_ON |
8686                                            LED_CTRL_10MBPS_ON |
8687                                            LED_CTRL_TRAFFIC_OVERRIDE |
8688                                            LED_CTRL_TRAFFIC_BLINK |
8689                                            LED_CTRL_TRAFFIC_LED);
8690
8691                 else
8692                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8693                                            LED_CTRL_TRAFFIC_OVERRIDE);
8694
8695                 if (msleep_interruptible(500))
8696                         break;
8697         }
8698         tw32(MAC_LED_CTRL, tp->led_ctrl);
8699         return 0;
8700 }
8701
8702 static void tg3_get_ethtool_stats (struct net_device *dev,
8703                                    struct ethtool_stats *estats, u64 *tmp_stats)
8704 {
8705         struct tg3 *tp = netdev_priv(dev);
8706         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8707 }
8708
8709 #define NVRAM_TEST_SIZE 0x100
8710 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8711 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8712 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8713 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8714 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8715
8716 static int tg3_test_nvram(struct tg3 *tp)
8717 {
8718         u32 *buf, csum, magic;
8719         int i, j, k, err = 0, size;
8720
8721         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8722                 return -EIO;
8723
8724         if (magic == TG3_EEPROM_MAGIC)
8725                 size = NVRAM_TEST_SIZE;
8726         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8727                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8728                     TG3_EEPROM_SB_FORMAT_1) {
8729                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8730                         case TG3_EEPROM_SB_REVISION_0:
8731                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8732                                 break;
8733                         case TG3_EEPROM_SB_REVISION_2:
8734                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8735                                 break;
8736                         case TG3_EEPROM_SB_REVISION_3:
8737                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8738                                 break;
8739                         default:
8740                                 return 0;
8741                         }
8742                 } else
8743                         return 0;
8744         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8745                 size = NVRAM_SELFBOOT_HW_SIZE;
8746         else
8747                 return -EIO;
8748
8749         buf = kmalloc(size, GFP_KERNEL);
8750         if (buf == NULL)
8751                 return -ENOMEM;
8752
8753         err = -EIO;
8754         for (i = 0, j = 0; i < size; i += 4, j++) {
8755                 u32 val;
8756
8757                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8758                         break;
8759                 buf[j] = cpu_to_le32(val);
8760         }
8761         if (i < size)
8762                 goto out;
8763
8764         /* Selfboot format */
8765         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8766             TG3_EEPROM_MAGIC_FW) {
8767                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8768
8769                 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
8770                     TG3_EEPROM_SB_REVISION_2) {
8771                         /* For rev 2, the csum doesn't include the MBA. */
8772                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8773                                 csum8 += buf8[i];
8774                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8775                                 csum8 += buf8[i];
8776                 } else {
8777                         for (i = 0; i < size; i++)
8778                                 csum8 += buf8[i];
8779                 }
8780
8781                 if (csum8 == 0) {
8782                         err = 0;
8783                         goto out;
8784                 }
8785
8786                 err = -EIO;
8787                 goto out;
8788         }
8789
8790         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8791             TG3_EEPROM_MAGIC_HW) {
8792                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8793                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8794                 u8 *buf8 = (u8 *) buf;
8795
8796                 /* Separate the parity bits and the data bytes.  */
8797                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8798                         if ((i == 0) || (i == 8)) {
8799                                 int l;
8800                                 u8 msk;
8801
8802                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8803                                         parity[k++] = buf8[i] & msk;
8804                                 i++;
8805                         }
8806                         else if (i == 16) {
8807                                 int l;
8808                                 u8 msk;
8809
8810                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8811                                         parity[k++] = buf8[i] & msk;
8812                                 i++;
8813
8814                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8815                                         parity[k++] = buf8[i] & msk;
8816                                 i++;
8817                         }
8818                         data[j++] = buf8[i];
8819                 }
8820
8821                 err = -EIO;
8822                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8823                         u8 hw8 = hweight8(data[i]);
8824
8825                         if ((hw8 & 0x1) && parity[i])
8826                                 goto out;
8827                         else if (!(hw8 & 0x1) && !parity[i])
8828                                 goto out;
8829                 }
8830                 err = 0;
8831                 goto out;
8832         }
8833
8834         /* Bootstrap checksum at offset 0x10 */
8835         csum = calc_crc((unsigned char *) buf, 0x10);
8836         if(csum != cpu_to_le32(buf[0x10/4]))
8837                 goto out;
8838
8839         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8840         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8841         if (csum != cpu_to_le32(buf[0xfc/4]))
8842                  goto out;
8843
8844         err = 0;
8845
8846 out:
8847         kfree(buf);
8848         return err;
8849 }
8850
8851 #define TG3_SERDES_TIMEOUT_SEC  2
8852 #define TG3_COPPER_TIMEOUT_SEC  6
8853
8854 static int tg3_test_link(struct tg3 *tp)
8855 {
8856         int i, max;
8857
8858         if (!netif_running(tp->dev))
8859                 return -ENODEV;
8860
8861         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8862                 max = TG3_SERDES_TIMEOUT_SEC;
8863         else
8864                 max = TG3_COPPER_TIMEOUT_SEC;
8865
8866         for (i = 0; i < max; i++) {
8867                 if (netif_carrier_ok(tp->dev))
8868                         return 0;
8869
8870                 if (msleep_interruptible(1000))
8871                         break;
8872         }
8873
8874         return -EIO;
8875 }
8876
8877 /* Only test the commonly used registers */
8878 static int tg3_test_registers(struct tg3 *tp)
8879 {
8880         int i, is_5705, is_5750;
8881         u32 offset, read_mask, write_mask, val, save_val, read_val;
8882         static struct {
8883                 u16 offset;
8884                 u16 flags;
8885 #define TG3_FL_5705     0x1
8886 #define TG3_FL_NOT_5705 0x2
8887 #define TG3_FL_NOT_5788 0x4
8888 #define TG3_FL_NOT_5750 0x8
8889                 u32 read_mask;
8890                 u32 write_mask;
8891         } reg_tbl[] = {
8892                 /* MAC Control Registers */
8893                 { MAC_MODE, TG3_FL_NOT_5705,
8894                         0x00000000, 0x00ef6f8c },
8895                 { MAC_MODE, TG3_FL_5705,
8896                         0x00000000, 0x01ef6b8c },
8897                 { MAC_STATUS, TG3_FL_NOT_5705,
8898                         0x03800107, 0x00000000 },
8899                 { MAC_STATUS, TG3_FL_5705,
8900                         0x03800100, 0x00000000 },
8901                 { MAC_ADDR_0_HIGH, 0x0000,
8902                         0x00000000, 0x0000ffff },
8903                 { MAC_ADDR_0_LOW, 0x0000,
8904                         0x00000000, 0xffffffff },
8905                 { MAC_RX_MTU_SIZE, 0x0000,
8906                         0x00000000, 0x0000ffff },
8907                 { MAC_TX_MODE, 0x0000,
8908                         0x00000000, 0x00000070 },
8909                 { MAC_TX_LENGTHS, 0x0000,
8910                         0x00000000, 0x00003fff },
8911                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8912                         0x00000000, 0x000007fc },
8913                 { MAC_RX_MODE, TG3_FL_5705,
8914                         0x00000000, 0x000007dc },
8915                 { MAC_HASH_REG_0, 0x0000,
8916                         0x00000000, 0xffffffff },
8917                 { MAC_HASH_REG_1, 0x0000,
8918                         0x00000000, 0xffffffff },
8919                 { MAC_HASH_REG_2, 0x0000,
8920                         0x00000000, 0xffffffff },
8921                 { MAC_HASH_REG_3, 0x0000,
8922                         0x00000000, 0xffffffff },
8923
8924                 /* Receive Data and Receive BD Initiator Control Registers. */
8925                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8926                         0x00000000, 0xffffffff },
8927                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8928                         0x00000000, 0xffffffff },
8929                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8930                         0x00000000, 0x00000003 },
8931                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8932                         0x00000000, 0xffffffff },
8933                 { RCVDBDI_STD_BD+0, 0x0000,
8934                         0x00000000, 0xffffffff },
8935                 { RCVDBDI_STD_BD+4, 0x0000,
8936                         0x00000000, 0xffffffff },
8937                 { RCVDBDI_STD_BD+8, 0x0000,
8938                         0x00000000, 0xffff0002 },
8939                 { RCVDBDI_STD_BD+0xc, 0x0000,
8940                         0x00000000, 0xffffffff },
8941
8942                 /* Receive BD Initiator Control Registers. */
8943                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8944                         0x00000000, 0xffffffff },
8945                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8946                         0x00000000, 0x000003ff },
8947                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8948                         0x00000000, 0xffffffff },
8949
8950                 /* Host Coalescing Control Registers. */
8951                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8952                         0x00000000, 0x00000004 },
8953                 { HOSTCC_MODE, TG3_FL_5705,
8954                         0x00000000, 0x000000f6 },
8955                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8956                         0x00000000, 0xffffffff },
8957                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8958                         0x00000000, 0x000003ff },
8959                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8960                         0x00000000, 0xffffffff },
8961                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8962                         0x00000000, 0x000003ff },
8963                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8964                         0x00000000, 0xffffffff },
8965                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8966                         0x00000000, 0x000000ff },
8967                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8968                         0x00000000, 0xffffffff },
8969                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8970                         0x00000000, 0x000000ff },
8971                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8972                         0x00000000, 0xffffffff },
8973                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8974                         0x00000000, 0xffffffff },
8975                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8976                         0x00000000, 0xffffffff },
8977                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8978                         0x00000000, 0x000000ff },
8979                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8980                         0x00000000, 0xffffffff },
8981                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8982                         0x00000000, 0x000000ff },
8983                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8984                         0x00000000, 0xffffffff },
8985                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8986                         0x00000000, 0xffffffff },
8987                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8988                         0x00000000, 0xffffffff },
8989                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8990                         0x00000000, 0xffffffff },
8991                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8992                         0x00000000, 0xffffffff },
8993                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8994                         0xffffffff, 0x00000000 },
8995                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8996                         0xffffffff, 0x00000000 },
8997
8998                 /* Buffer Manager Control Registers. */
8999                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9000                         0x00000000, 0x007fff80 },
9001                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9002                         0x00000000, 0x007fffff },
9003                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9004                         0x00000000, 0x0000003f },
9005                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9006                         0x00000000, 0x000001ff },
9007                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9008                         0x00000000, 0x000001ff },
9009                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9010                         0xffffffff, 0x00000000 },
9011                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9012                         0xffffffff, 0x00000000 },
9013
9014                 /* Mailbox Registers */
9015                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9016                         0x00000000, 0x000001ff },
9017                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9018                         0x00000000, 0x000001ff },
9019                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9020                         0x00000000, 0x000007ff },
9021                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9022                         0x00000000, 0x000001ff },
9023
9024                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9025         };
9026
9027         is_5705 = is_5750 = 0;
9028         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9029                 is_5705 = 1;
9030                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9031                         is_5750 = 1;
9032         }
9033
9034         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9035                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9036                         continue;
9037
9038                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9039                         continue;
9040
9041                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9042                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9043                         continue;
9044
9045                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9046                         continue;
9047
9048                 offset = (u32) reg_tbl[i].offset;
9049                 read_mask = reg_tbl[i].read_mask;
9050                 write_mask = reg_tbl[i].write_mask;
9051
9052                 /* Save the original register content */
9053                 save_val = tr32(offset);
9054
9055                 /* Determine the read-only value. */
9056                 read_val = save_val & read_mask;
9057
9058                 /* Write zero to the register, then make sure the read-only bits
9059                  * are not changed and the read/write bits are all zeros.
9060                  */
9061                 tw32(offset, 0);
9062
9063                 val = tr32(offset);
9064
9065                 /* Test the read-only and read/write bits. */
9066                 if (((val & read_mask) != read_val) || (val & write_mask))
9067                         goto out;
9068
9069                 /* Write ones to all the bits defined by RdMask and WrMask, then
9070                  * make sure the read-only bits are not changed and the
9071                  * read/write bits are all ones.
9072                  */
9073                 tw32(offset, read_mask | write_mask);
9074
9075                 val = tr32(offset);
9076
9077                 /* Test the read-only bits. */
9078                 if ((val & read_mask) != read_val)
9079                         goto out;
9080
9081                 /* Test the read/write bits. */
9082                 if ((val & write_mask) != write_mask)
9083                         goto out;
9084
9085                 tw32(offset, save_val);
9086         }
9087
9088         return 0;
9089
9090 out:
9091         if (netif_msg_hw(tp))
9092                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9093                        offset);
9094         tw32(offset, save_val);
9095         return -EIO;
9096 }
9097
9098 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9099 {
9100         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9101         int i;
9102         u32 j;
9103
9104         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9105                 for (j = 0; j < len; j += 4) {
9106                         u32 val;
9107
9108                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9109                         tg3_read_mem(tp, offset + j, &val);
9110                         if (val != test_pattern[i])
9111                                 return -EIO;
9112                 }
9113         }
9114         return 0;
9115 }
9116
9117 static int tg3_test_memory(struct tg3 *tp)
9118 {
9119         static struct mem_entry {
9120                 u32 offset;
9121                 u32 len;
9122         } mem_tbl_570x[] = {
9123                 { 0x00000000, 0x00b50},
9124                 { 0x00002000, 0x1c000},
9125                 { 0xffffffff, 0x00000}
9126         }, mem_tbl_5705[] = {
9127                 { 0x00000100, 0x0000c},
9128                 { 0x00000200, 0x00008},
9129                 { 0x00004000, 0x00800},
9130                 { 0x00006000, 0x01000},
9131                 { 0x00008000, 0x02000},
9132                 { 0x00010000, 0x0e000},
9133                 { 0xffffffff, 0x00000}
9134         }, mem_tbl_5755[] = {
9135                 { 0x00000200, 0x00008},
9136                 { 0x00004000, 0x00800},
9137                 { 0x00006000, 0x00800},
9138                 { 0x00008000, 0x02000},
9139                 { 0x00010000, 0x0c000},
9140                 { 0xffffffff, 0x00000}
9141         }, mem_tbl_5906[] = {
9142                 { 0x00000200, 0x00008},
9143                 { 0x00004000, 0x00400},
9144                 { 0x00006000, 0x00400},
9145                 { 0x00008000, 0x01000},
9146                 { 0x00010000, 0x01000},
9147                 { 0xffffffff, 0x00000}
9148         };
9149         struct mem_entry *mem_tbl;
9150         int err = 0;
9151         int i;
9152
9153         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9154                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9155                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9156                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9157                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9158                         mem_tbl = mem_tbl_5755;
9159                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9160                         mem_tbl = mem_tbl_5906;
9161                 else
9162                         mem_tbl = mem_tbl_5705;
9163         } else
9164                 mem_tbl = mem_tbl_570x;
9165
9166         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9167                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9168                     mem_tbl[i].len)) != 0)
9169                         break;
9170         }
9171
9172         return err;
9173 }
9174
9175 #define TG3_MAC_LOOPBACK        0
9176 #define TG3_PHY_LOOPBACK        1
9177
9178 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9179 {
9180         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9181         u32 desc_idx;
9182         struct sk_buff *skb, *rx_skb;
9183         u8 *tx_data;
9184         dma_addr_t map;
9185         int num_pkts, tx_len, rx_len, i, err;
9186         struct tg3_rx_buffer_desc *desc;
9187
9188         if (loopback_mode == TG3_MAC_LOOPBACK) {
9189                 /* HW errata - mac loopback fails in some cases on 5780.
9190                  * Normal traffic and PHY loopback are not affected by
9191                  * errata.
9192                  */
9193                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9194                         return 0;
9195
9196                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9197                            MAC_MODE_PORT_INT_LPBACK;
9198                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9199                         mac_mode |= MAC_MODE_LINK_POLARITY;
9200                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9201                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9202                 else
9203                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9204                 tw32(MAC_MODE, mac_mode);
9205         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9206                 u32 val;
9207
9208                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9209                         u32 phytest;
9210
9211                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9212                                 u32 phy;
9213
9214                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9215                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9216                                 if (!tg3_readphy(tp, 0x1b, &phy))
9217                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9218                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9219                         }
9220                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9221                 } else
9222                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9223
9224                 tg3_phy_toggle_automdix(tp, 0);
9225
9226                 tg3_writephy(tp, MII_BMCR, val);
9227                 udelay(40);
9228
9229                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9230                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9231                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9232                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9233                 } else
9234                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9235
9236                 /* reset to prevent losing 1st rx packet intermittently */
9237                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9238                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9239                         udelay(10);
9240                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9241                 }
9242                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9243                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9244                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9245                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9246                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9247                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9248                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9249                 }
9250                 tw32(MAC_MODE, mac_mode);
9251         }
9252         else
9253                 return -EINVAL;
9254
9255         err = -EIO;
9256
9257         tx_len = 1514;
9258         skb = netdev_alloc_skb(tp->dev, tx_len);
9259         if (!skb)
9260                 return -ENOMEM;
9261
9262         tx_data = skb_put(skb, tx_len);
9263         memcpy(tx_data, tp->dev->dev_addr, 6);
9264         memset(tx_data + 6, 0x0, 8);
9265
9266         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9267
9268         for (i = 14; i < tx_len; i++)
9269                 tx_data[i] = (u8) (i & 0xff);
9270
9271         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9272
9273         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9274              HOSTCC_MODE_NOW);
9275
9276         udelay(10);
9277
9278         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9279
9280         num_pkts = 0;
9281
9282         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9283
9284         tp->tx_prod++;
9285         num_pkts++;
9286
9287         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9288                      tp->tx_prod);
9289         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9290
9291         udelay(10);
9292
9293         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9294         for (i = 0; i < 25; i++) {
9295                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9296                        HOSTCC_MODE_NOW);
9297
9298                 udelay(10);
9299
9300                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9301                 rx_idx = tp->hw_status->idx[0].rx_producer;
9302                 if ((tx_idx == tp->tx_prod) &&
9303                     (rx_idx == (rx_start_idx + num_pkts)))
9304                         break;
9305         }
9306
9307         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9308         dev_kfree_skb(skb);
9309
9310         if (tx_idx != tp->tx_prod)
9311                 goto out;
9312
9313         if (rx_idx != rx_start_idx + num_pkts)
9314                 goto out;
9315
9316         desc = &tp->rx_rcb[rx_start_idx];
9317         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9318         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9319         if (opaque_key != RXD_OPAQUE_RING_STD)
9320                 goto out;
9321
9322         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9323             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9324                 goto out;
9325
9326         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9327         if (rx_len != tx_len)
9328                 goto out;
9329
9330         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9331
9332         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9333         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9334
9335         for (i = 14; i < tx_len; i++) {
9336                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9337                         goto out;
9338         }
9339         err = 0;
9340
9341         /* tg3_free_rings will unmap and free the rx_skb */
9342 out:
9343         return err;
9344 }
9345
9346 #define TG3_MAC_LOOPBACK_FAILED         1
9347 #define TG3_PHY_LOOPBACK_FAILED         2
9348 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9349                                          TG3_PHY_LOOPBACK_FAILED)
9350
9351 static int tg3_test_loopback(struct tg3 *tp)
9352 {
9353         int err = 0;
9354         u32 cpmuctrl = 0;
9355
9356         if (!netif_running(tp->dev))
9357                 return TG3_LOOPBACK_FAILED;
9358
9359         err = tg3_reset_hw(tp, 1);
9360         if (err)
9361                 return TG3_LOOPBACK_FAILED;
9362
9363         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
9364             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
9365                 int i;
9366                 u32 status;
9367
9368                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9369
9370                 /* Wait for up to 40 microseconds to acquire lock. */
9371                 for (i = 0; i < 4; i++) {
9372                         status = tr32(TG3_CPMU_MUTEX_GNT);
9373                         if (status == CPMU_MUTEX_GNT_DRIVER)
9374                                 break;
9375                         udelay(10);
9376                 }
9377
9378                 if (status != CPMU_MUTEX_GNT_DRIVER)
9379                         return TG3_LOOPBACK_FAILED;
9380
9381                 /* Turn off power management based on link speed. */
9382                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9383                 tw32(TG3_CPMU_CTRL,
9384                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9385                                   CPMU_CTRL_LINK_AWARE_MODE));
9386         }
9387
9388         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9389                 err |= TG3_MAC_LOOPBACK_FAILED;
9390
9391         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
9392             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
9393                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9394
9395                 /* Release the mutex */
9396                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9397         }
9398
9399         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9400                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9401                         err |= TG3_PHY_LOOPBACK_FAILED;
9402         }
9403
9404         return err;
9405 }
9406
9407 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9408                           u64 *data)
9409 {
9410         struct tg3 *tp = netdev_priv(dev);
9411
9412         if (tp->link_config.phy_is_low_power)
9413                 tg3_set_power_state(tp, PCI_D0);
9414
9415         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9416
9417         if (tg3_test_nvram(tp) != 0) {
9418                 etest->flags |= ETH_TEST_FL_FAILED;
9419                 data[0] = 1;
9420         }
9421         if (tg3_test_link(tp) != 0) {
9422                 etest->flags |= ETH_TEST_FL_FAILED;
9423                 data[1] = 1;
9424         }
9425         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9426                 int err, irq_sync = 0;
9427
9428                 if (netif_running(dev)) {
9429                         tg3_netif_stop(tp);
9430                         irq_sync = 1;
9431                 }
9432
9433                 tg3_full_lock(tp, irq_sync);
9434
9435                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9436                 err = tg3_nvram_lock(tp);
9437                 tg3_halt_cpu(tp, RX_CPU_BASE);
9438                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9439                         tg3_halt_cpu(tp, TX_CPU_BASE);
9440                 if (!err)
9441                         tg3_nvram_unlock(tp);
9442
9443                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9444                         tg3_phy_reset(tp);
9445
9446                 if (tg3_test_registers(tp) != 0) {
9447                         etest->flags |= ETH_TEST_FL_FAILED;
9448                         data[2] = 1;
9449                 }
9450                 if (tg3_test_memory(tp) != 0) {
9451                         etest->flags |= ETH_TEST_FL_FAILED;
9452                         data[3] = 1;
9453                 }
9454                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9455                         etest->flags |= ETH_TEST_FL_FAILED;
9456
9457                 tg3_full_unlock(tp);
9458
9459                 if (tg3_test_interrupt(tp) != 0) {
9460                         etest->flags |= ETH_TEST_FL_FAILED;
9461                         data[5] = 1;
9462                 }
9463
9464                 tg3_full_lock(tp, 0);
9465
9466                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9467                 if (netif_running(dev)) {
9468                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9469                         if (!tg3_restart_hw(tp, 1))
9470                                 tg3_netif_start(tp);
9471                 }
9472
9473                 tg3_full_unlock(tp);
9474         }
9475         if (tp->link_config.phy_is_low_power)
9476                 tg3_set_power_state(tp, PCI_D3hot);
9477
9478 }
9479
9480 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9481 {
9482         struct mii_ioctl_data *data = if_mii(ifr);
9483         struct tg3 *tp = netdev_priv(dev);
9484         int err;
9485
9486         switch(cmd) {
9487         case SIOCGMIIPHY:
9488                 data->phy_id = PHY_ADDR;
9489
9490                 /* fallthru */
9491         case SIOCGMIIREG: {
9492                 u32 mii_regval;
9493
9494                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9495                         break;                  /* We have no PHY */
9496
9497                 if (tp->link_config.phy_is_low_power)
9498                         return -EAGAIN;
9499
9500                 spin_lock_bh(&tp->lock);
9501                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9502                 spin_unlock_bh(&tp->lock);
9503
9504                 data->val_out = mii_regval;
9505
9506                 return err;
9507         }
9508
9509         case SIOCSMIIREG:
9510                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9511                         break;                  /* We have no PHY */
9512
9513                 if (!capable(CAP_NET_ADMIN))
9514                         return -EPERM;
9515
9516                 if (tp->link_config.phy_is_low_power)
9517                         return -EAGAIN;
9518
9519                 spin_lock_bh(&tp->lock);
9520                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9521                 spin_unlock_bh(&tp->lock);
9522
9523                 return err;
9524
9525         default:
9526                 /* do nothing */
9527                 break;
9528         }
9529         return -EOPNOTSUPP;
9530 }
9531
9532 #if TG3_VLAN_TAG_USED
9533 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9534 {
9535         struct tg3 *tp = netdev_priv(dev);
9536
9537         if (netif_running(dev))
9538                 tg3_netif_stop(tp);
9539
9540         tg3_full_lock(tp, 0);
9541
9542         tp->vlgrp = grp;
9543
9544         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9545         __tg3_set_rx_mode(dev);
9546
9547         if (netif_running(dev))
9548                 tg3_netif_start(tp);
9549
9550         tg3_full_unlock(tp);
9551 }
9552 #endif
9553
9554 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9555 {
9556         struct tg3 *tp = netdev_priv(dev);
9557
9558         memcpy(ec, &tp->coal, sizeof(*ec));
9559         return 0;
9560 }
9561
9562 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9563 {
9564         struct tg3 *tp = netdev_priv(dev);
9565         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9566         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9567
9568         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9569                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9570                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9571                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9572                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9573         }
9574
9575         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9576             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9577             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9578             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9579             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9580             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9581             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9582             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9583             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9584             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9585                 return -EINVAL;
9586
9587         /* No rx interrupts will be generated if both are zero */
9588         if ((ec->rx_coalesce_usecs == 0) &&
9589             (ec->rx_max_coalesced_frames == 0))
9590                 return -EINVAL;
9591
9592         /* No tx interrupts will be generated if both are zero */
9593         if ((ec->tx_coalesce_usecs == 0) &&
9594             (ec->tx_max_coalesced_frames == 0))
9595                 return -EINVAL;
9596
9597         /* Only copy relevant parameters, ignore all others. */
9598         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9599         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9600         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9601         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9602         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9603         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9604         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9605         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9606         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9607
9608         if (netif_running(dev)) {
9609                 tg3_full_lock(tp, 0);
9610                 __tg3_set_coalesce(tp, &tp->coal);
9611                 tg3_full_unlock(tp);
9612         }
9613         return 0;
9614 }
9615
9616 static const struct ethtool_ops tg3_ethtool_ops = {
9617         .get_settings           = tg3_get_settings,
9618         .set_settings           = tg3_set_settings,
9619         .get_drvinfo            = tg3_get_drvinfo,
9620         .get_regs_len           = tg3_get_regs_len,
9621         .get_regs               = tg3_get_regs,
9622         .get_wol                = tg3_get_wol,
9623         .set_wol                = tg3_set_wol,
9624         .get_msglevel           = tg3_get_msglevel,
9625         .set_msglevel           = tg3_set_msglevel,
9626         .nway_reset             = tg3_nway_reset,
9627         .get_link               = ethtool_op_get_link,
9628         .get_eeprom_len         = tg3_get_eeprom_len,
9629         .get_eeprom             = tg3_get_eeprom,
9630         .set_eeprom             = tg3_set_eeprom,
9631         .get_ringparam          = tg3_get_ringparam,
9632         .set_ringparam          = tg3_set_ringparam,
9633         .get_pauseparam         = tg3_get_pauseparam,
9634         .set_pauseparam         = tg3_set_pauseparam,
9635         .get_rx_csum            = tg3_get_rx_csum,
9636         .set_rx_csum            = tg3_set_rx_csum,
9637         .set_tx_csum            = tg3_set_tx_csum,
9638         .set_sg                 = ethtool_op_set_sg,
9639         .set_tso                = tg3_set_tso,
9640         .self_test              = tg3_self_test,
9641         .get_strings            = tg3_get_strings,
9642         .phys_id                = tg3_phys_id,
9643         .get_ethtool_stats      = tg3_get_ethtool_stats,
9644         .get_coalesce           = tg3_get_coalesce,
9645         .set_coalesce           = tg3_set_coalesce,
9646         .get_sset_count         = tg3_get_sset_count,
9647 };
9648
9649 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9650 {
9651         u32 cursize, val, magic;
9652
9653         tp->nvram_size = EEPROM_CHIP_SIZE;
9654
9655         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9656                 return;
9657
9658         if ((magic != TG3_EEPROM_MAGIC) &&
9659             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9660             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9661                 return;
9662
9663         /*
9664          * Size the chip by reading offsets at increasing powers of two.
9665          * When we encounter our validation signature, we know the addressing
9666          * has wrapped around, and thus have our chip size.
9667          */
9668         cursize = 0x10;
9669
9670         while (cursize < tp->nvram_size) {
9671                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9672                         return;
9673
9674                 if (val == magic)
9675                         break;
9676
9677                 cursize <<= 1;
9678         }
9679
9680         tp->nvram_size = cursize;
9681 }
9682
9683 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9684 {
9685         u32 val;
9686
9687         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9688                 return;
9689
9690         /* Selfboot format */
9691         if (val != TG3_EEPROM_MAGIC) {
9692                 tg3_get_eeprom_size(tp);
9693                 return;
9694         }
9695
9696         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9697                 if (val != 0) {
9698                         tp->nvram_size = (val >> 16) * 1024;
9699                         return;
9700                 }
9701         }
9702         tp->nvram_size = 0x80000;
9703 }
9704
9705 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9706 {
9707         u32 nvcfg1;
9708
9709         nvcfg1 = tr32(NVRAM_CFG1);
9710         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9711                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9712         }
9713         else {
9714                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9715                 tw32(NVRAM_CFG1, nvcfg1);
9716         }
9717
9718         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9719             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9720                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9721                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9722                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9723                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9724                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9725                                 break;
9726                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9727                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9728                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9729                                 break;
9730                         case FLASH_VENDOR_ATMEL_EEPROM:
9731                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9732                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9733                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9734                                 break;
9735                         case FLASH_VENDOR_ST:
9736                                 tp->nvram_jedecnum = JEDEC_ST;
9737                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9738                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9739                                 break;
9740                         case FLASH_VENDOR_SAIFUN:
9741                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9742                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9743                                 break;
9744                         case FLASH_VENDOR_SST_SMALL:
9745                         case FLASH_VENDOR_SST_LARGE:
9746                                 tp->nvram_jedecnum = JEDEC_SST;
9747                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9748                                 break;
9749                 }
9750         }
9751         else {
9752                 tp->nvram_jedecnum = JEDEC_ATMEL;
9753                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9754                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9755         }
9756 }
9757
9758 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9759 {
9760         u32 nvcfg1;
9761
9762         nvcfg1 = tr32(NVRAM_CFG1);
9763
9764         /* NVRAM protection for TPM */
9765         if (nvcfg1 & (1 << 27))
9766                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9767
9768         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9769                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9770                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9771                         tp->nvram_jedecnum = JEDEC_ATMEL;
9772                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9773                         break;
9774                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9775                         tp->nvram_jedecnum = JEDEC_ATMEL;
9776                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9777                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9778                         break;
9779                 case FLASH_5752VENDOR_ST_M45PE10:
9780                 case FLASH_5752VENDOR_ST_M45PE20:
9781                 case FLASH_5752VENDOR_ST_M45PE40:
9782                         tp->nvram_jedecnum = JEDEC_ST;
9783                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9784                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9785                         break;
9786         }
9787
9788         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9789                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9790                         case FLASH_5752PAGE_SIZE_256:
9791                                 tp->nvram_pagesize = 256;
9792                                 break;
9793                         case FLASH_5752PAGE_SIZE_512:
9794                                 tp->nvram_pagesize = 512;
9795                                 break;
9796                         case FLASH_5752PAGE_SIZE_1K:
9797                                 tp->nvram_pagesize = 1024;
9798                                 break;
9799                         case FLASH_5752PAGE_SIZE_2K:
9800                                 tp->nvram_pagesize = 2048;
9801                                 break;
9802                         case FLASH_5752PAGE_SIZE_4K:
9803                                 tp->nvram_pagesize = 4096;
9804                                 break;
9805                         case FLASH_5752PAGE_SIZE_264:
9806                                 tp->nvram_pagesize = 264;
9807                                 break;
9808                 }
9809         }
9810         else {
9811                 /* For eeprom, set pagesize to maximum eeprom size */
9812                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9813
9814                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9815                 tw32(NVRAM_CFG1, nvcfg1);
9816         }
9817 }
9818
9819 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9820 {
9821         u32 nvcfg1, protect = 0;
9822
9823         nvcfg1 = tr32(NVRAM_CFG1);
9824
9825         /* NVRAM protection for TPM */
9826         if (nvcfg1 & (1 << 27)) {
9827                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9828                 protect = 1;
9829         }
9830
9831         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9832         switch (nvcfg1) {
9833                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9834                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9835                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9836                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9837                         tp->nvram_jedecnum = JEDEC_ATMEL;
9838                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9839                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9840                         tp->nvram_pagesize = 264;
9841                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9842                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9843                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9844                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9845                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9846                         else
9847                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9848                         break;
9849                 case FLASH_5752VENDOR_ST_M45PE10:
9850                 case FLASH_5752VENDOR_ST_M45PE20:
9851                 case FLASH_5752VENDOR_ST_M45PE40:
9852                         tp->nvram_jedecnum = JEDEC_ST;
9853                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9854                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9855                         tp->nvram_pagesize = 256;
9856                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9857                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9858                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9859                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9860                         else
9861                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9862                         break;
9863         }
9864 }
9865
9866 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9867 {
9868         u32 nvcfg1;
9869
9870         nvcfg1 = tr32(NVRAM_CFG1);
9871
9872         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9873                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9874                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9875                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9876                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9877                         tp->nvram_jedecnum = JEDEC_ATMEL;
9878                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9879                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9880
9881                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9882                         tw32(NVRAM_CFG1, nvcfg1);
9883                         break;
9884                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9885                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9886                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9887                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9888                         tp->nvram_jedecnum = JEDEC_ATMEL;
9889                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9890                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9891                         tp->nvram_pagesize = 264;
9892                         break;
9893                 case FLASH_5752VENDOR_ST_M45PE10:
9894                 case FLASH_5752VENDOR_ST_M45PE20:
9895                 case FLASH_5752VENDOR_ST_M45PE40:
9896                         tp->nvram_jedecnum = JEDEC_ST;
9897                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9898                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9899                         tp->nvram_pagesize = 256;
9900                         break;
9901         }
9902 }
9903
9904 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9905 {
9906         u32 nvcfg1, protect = 0;
9907
9908         nvcfg1 = tr32(NVRAM_CFG1);
9909
9910         /* NVRAM protection for TPM */
9911         if (nvcfg1 & (1 << 27)) {
9912                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9913                 protect = 1;
9914         }
9915
9916         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9917         switch (nvcfg1) {
9918                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9919                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9920                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9921                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9922                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9923                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9924                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9925                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9926                         tp->nvram_jedecnum = JEDEC_ATMEL;
9927                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9928                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9929                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9930                         tp->nvram_pagesize = 256;
9931                         break;
9932                 case FLASH_5761VENDOR_ST_A_M45PE20:
9933                 case FLASH_5761VENDOR_ST_A_M45PE40:
9934                 case FLASH_5761VENDOR_ST_A_M45PE80:
9935                 case FLASH_5761VENDOR_ST_A_M45PE16:
9936                 case FLASH_5761VENDOR_ST_M_M45PE20:
9937                 case FLASH_5761VENDOR_ST_M_M45PE40:
9938                 case FLASH_5761VENDOR_ST_M_M45PE80:
9939                 case FLASH_5761VENDOR_ST_M_M45PE16:
9940                         tp->nvram_jedecnum = JEDEC_ST;
9941                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9942                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9943                         tp->nvram_pagesize = 256;
9944                         break;
9945         }
9946
9947         if (protect) {
9948                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9949         } else {
9950                 switch (nvcfg1) {
9951                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9952                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9953                         case FLASH_5761VENDOR_ST_A_M45PE16:
9954                         case FLASH_5761VENDOR_ST_M_M45PE16:
9955                                 tp->nvram_size = 0x100000;
9956                                 break;
9957                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9958                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9959                         case FLASH_5761VENDOR_ST_A_M45PE80:
9960                         case FLASH_5761VENDOR_ST_M_M45PE80:
9961                                 tp->nvram_size = 0x80000;
9962                                 break;
9963                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9964                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9965                         case FLASH_5761VENDOR_ST_A_M45PE40:
9966                         case FLASH_5761VENDOR_ST_M_M45PE40:
9967                                 tp->nvram_size = 0x40000;
9968                                 break;
9969                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9970                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9971                         case FLASH_5761VENDOR_ST_A_M45PE20:
9972                         case FLASH_5761VENDOR_ST_M_M45PE20:
9973                                 tp->nvram_size = 0x20000;
9974                                 break;
9975                 }
9976         }
9977 }
9978
9979 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9980 {
9981         tp->nvram_jedecnum = JEDEC_ATMEL;
9982         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9983         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9984 }
9985
9986 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9987 static void __devinit tg3_nvram_init(struct tg3 *tp)
9988 {
9989         tw32_f(GRC_EEPROM_ADDR,
9990              (EEPROM_ADDR_FSM_RESET |
9991               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9992                EEPROM_ADDR_CLKPERD_SHIFT)));
9993
9994         msleep(1);
9995
9996         /* Enable seeprom accesses. */
9997         tw32_f(GRC_LOCAL_CTRL,
9998              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9999         udelay(100);
10000
10001         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10002             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10003                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10004
10005                 if (tg3_nvram_lock(tp)) {
10006                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10007                                "tg3_nvram_init failed.\n", tp->dev->name);
10008                         return;
10009                 }
10010                 tg3_enable_nvram_access(tp);
10011
10012                 tp->nvram_size = 0;
10013
10014                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10015                         tg3_get_5752_nvram_info(tp);
10016                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10017                         tg3_get_5755_nvram_info(tp);
10018                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10019                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10020                         tg3_get_5787_nvram_info(tp);
10021                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10022                         tg3_get_5761_nvram_info(tp);
10023                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10024                         tg3_get_5906_nvram_info(tp);
10025                 else
10026                         tg3_get_nvram_info(tp);
10027
10028                 if (tp->nvram_size == 0)
10029                         tg3_get_nvram_size(tp);
10030
10031                 tg3_disable_nvram_access(tp);
10032                 tg3_nvram_unlock(tp);
10033
10034         } else {
10035                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10036
10037                 tg3_get_eeprom_size(tp);
10038         }
10039 }
10040
10041 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10042                                         u32 offset, u32 *val)
10043 {
10044         u32 tmp;
10045         int i;
10046
10047         if (offset > EEPROM_ADDR_ADDR_MASK ||
10048             (offset % 4) != 0)
10049                 return -EINVAL;
10050
10051         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10052                                         EEPROM_ADDR_DEVID_MASK |
10053                                         EEPROM_ADDR_READ);
10054         tw32(GRC_EEPROM_ADDR,
10055              tmp |
10056              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10057              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10058               EEPROM_ADDR_ADDR_MASK) |
10059              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10060
10061         for (i = 0; i < 1000; i++) {
10062                 tmp = tr32(GRC_EEPROM_ADDR);
10063
10064                 if (tmp & EEPROM_ADDR_COMPLETE)
10065                         break;
10066                 msleep(1);
10067         }
10068         if (!(tmp & EEPROM_ADDR_COMPLETE))
10069                 return -EBUSY;
10070
10071         *val = tr32(GRC_EEPROM_DATA);
10072         return 0;
10073 }
10074
10075 #define NVRAM_CMD_TIMEOUT 10000
10076
10077 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10078 {
10079         int i;
10080
10081         tw32(NVRAM_CMD, nvram_cmd);
10082         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10083                 udelay(10);
10084                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10085                         udelay(10);
10086                         break;
10087                 }
10088         }
10089         if (i == NVRAM_CMD_TIMEOUT) {
10090                 return -EBUSY;
10091         }
10092         return 0;
10093 }
10094
10095 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10096 {
10097         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10098             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10099             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10100            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10101             (tp->nvram_jedecnum == JEDEC_ATMEL))
10102
10103                 addr = ((addr / tp->nvram_pagesize) <<
10104                         ATMEL_AT45DB0X1B_PAGE_POS) +
10105                        (addr % tp->nvram_pagesize);
10106
10107         return addr;
10108 }
10109
10110 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10111 {
10112         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10113             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10114             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10115            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10116             (tp->nvram_jedecnum == JEDEC_ATMEL))
10117
10118                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10119                         tp->nvram_pagesize) +
10120                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10121
10122         return addr;
10123 }
10124
10125 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10126 {
10127         int ret;
10128
10129         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10130                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10131
10132         offset = tg3_nvram_phys_addr(tp, offset);
10133
10134         if (offset > NVRAM_ADDR_MSK)
10135                 return -EINVAL;
10136
10137         ret = tg3_nvram_lock(tp);
10138         if (ret)
10139                 return ret;
10140
10141         tg3_enable_nvram_access(tp);
10142
10143         tw32(NVRAM_ADDR, offset);
10144         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10145                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10146
10147         if (ret == 0)
10148                 *val = swab32(tr32(NVRAM_RDDATA));
10149
10150         tg3_disable_nvram_access(tp);
10151
10152         tg3_nvram_unlock(tp);
10153
10154         return ret;
10155 }
10156
10157 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10158 {
10159         int err;
10160         u32 tmp;
10161
10162         err = tg3_nvram_read(tp, offset, &tmp);
10163         *val = swab32(tmp);
10164         return err;
10165 }
10166
10167 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10168                                     u32 offset, u32 len, u8 *buf)
10169 {
10170         int i, j, rc = 0;
10171         u32 val;
10172
10173         for (i = 0; i < len; i += 4) {
10174                 u32 addr, data;
10175
10176                 addr = offset + i;
10177
10178                 memcpy(&data, buf + i, 4);
10179
10180                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10181
10182                 val = tr32(GRC_EEPROM_ADDR);
10183                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10184
10185                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10186                         EEPROM_ADDR_READ);
10187                 tw32(GRC_EEPROM_ADDR, val |
10188                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10189                         (addr & EEPROM_ADDR_ADDR_MASK) |
10190                         EEPROM_ADDR_START |
10191                         EEPROM_ADDR_WRITE);
10192
10193                 for (j = 0; j < 1000; j++) {
10194                         val = tr32(GRC_EEPROM_ADDR);
10195
10196                         if (val & EEPROM_ADDR_COMPLETE)
10197                                 break;
10198                         msleep(1);
10199                 }
10200                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10201                         rc = -EBUSY;
10202                         break;
10203                 }
10204         }
10205
10206         return rc;
10207 }
10208
10209 /* offset and length are dword aligned */
10210 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10211                 u8 *buf)
10212 {
10213         int ret = 0;
10214         u32 pagesize = tp->nvram_pagesize;
10215         u32 pagemask = pagesize - 1;
10216         u32 nvram_cmd;
10217         u8 *tmp;
10218
10219         tmp = kmalloc(pagesize, GFP_KERNEL);
10220         if (tmp == NULL)
10221                 return -ENOMEM;
10222
10223         while (len) {
10224                 int j;
10225                 u32 phy_addr, page_off, size;
10226
10227                 phy_addr = offset & ~pagemask;
10228
10229                 for (j = 0; j < pagesize; j += 4) {
10230                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10231                                                 (u32 *) (tmp + j))))
10232                                 break;
10233                 }
10234                 if (ret)
10235                         break;
10236
10237                 page_off = offset & pagemask;
10238                 size = pagesize;
10239                 if (len < size)
10240                         size = len;
10241
10242                 len -= size;
10243
10244                 memcpy(tmp + page_off, buf, size);
10245
10246                 offset = offset + (pagesize - page_off);
10247
10248                 tg3_enable_nvram_access(tp);
10249
10250                 /*
10251                  * Before we can erase the flash page, we need
10252                  * to issue a special "write enable" command.
10253                  */
10254                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10255
10256                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10257                         break;
10258
10259                 /* Erase the target page */
10260                 tw32(NVRAM_ADDR, phy_addr);
10261
10262                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10263                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10264
10265                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10266                         break;
10267
10268                 /* Issue another write enable to start the write. */
10269                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10270
10271                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10272                         break;
10273
10274                 for (j = 0; j < pagesize; j += 4) {
10275                         u32 data;
10276
10277                         data = *((u32 *) (tmp + j));
10278                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10279
10280                         tw32(NVRAM_ADDR, phy_addr + j);
10281
10282                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10283                                 NVRAM_CMD_WR;
10284
10285                         if (j == 0)
10286                                 nvram_cmd |= NVRAM_CMD_FIRST;
10287                         else if (j == (pagesize - 4))
10288                                 nvram_cmd |= NVRAM_CMD_LAST;
10289
10290                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10291                                 break;
10292                 }
10293                 if (ret)
10294                         break;
10295         }
10296
10297         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10298         tg3_nvram_exec_cmd(tp, nvram_cmd);
10299
10300         kfree(tmp);
10301
10302         return ret;
10303 }
10304
10305 /* offset and length are dword aligned */
10306 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10307                 u8 *buf)
10308 {
10309         int i, ret = 0;
10310
10311         for (i = 0; i < len; i += 4, offset += 4) {
10312                 u32 data, page_off, phy_addr, nvram_cmd;
10313
10314                 memcpy(&data, buf + i, 4);
10315                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10316
10317                 page_off = offset % tp->nvram_pagesize;
10318
10319                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10320
10321                 tw32(NVRAM_ADDR, phy_addr);
10322
10323                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10324
10325                 if ((page_off == 0) || (i == 0))
10326                         nvram_cmd |= NVRAM_CMD_FIRST;
10327                 if (page_off == (tp->nvram_pagesize - 4))
10328                         nvram_cmd |= NVRAM_CMD_LAST;
10329
10330                 if (i == (len - 4))
10331                         nvram_cmd |= NVRAM_CMD_LAST;
10332
10333                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10334                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10335                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10336                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10337                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10338                     (tp->nvram_jedecnum == JEDEC_ST) &&
10339                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10340
10341                         if ((ret = tg3_nvram_exec_cmd(tp,
10342                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10343                                 NVRAM_CMD_DONE)))
10344
10345                                 break;
10346                 }
10347                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10348                         /* We always do complete word writes to eeprom. */
10349                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10350                 }
10351
10352                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10353                         break;
10354         }
10355         return ret;
10356 }
10357
10358 /* offset and length are dword aligned */
10359 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10360 {
10361         int ret;
10362
10363         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10364                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10365                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10366                 udelay(40);
10367         }
10368
10369         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10370                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10371         }
10372         else {
10373                 u32 grc_mode;
10374
10375                 ret = tg3_nvram_lock(tp);
10376                 if (ret)
10377                         return ret;
10378
10379                 tg3_enable_nvram_access(tp);
10380                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10381                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10382                         tw32(NVRAM_WRITE1, 0x406);
10383
10384                 grc_mode = tr32(GRC_MODE);
10385                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10386
10387                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10388                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10389
10390                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10391                                 buf);
10392                 }
10393                 else {
10394                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10395                                 buf);
10396                 }
10397
10398                 grc_mode = tr32(GRC_MODE);
10399                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10400
10401                 tg3_disable_nvram_access(tp);
10402                 tg3_nvram_unlock(tp);
10403         }
10404
10405         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10406                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10407                 udelay(40);
10408         }
10409
10410         return ret;
10411 }
10412
10413 struct subsys_tbl_ent {
10414         u16 subsys_vendor, subsys_devid;
10415         u32 phy_id;
10416 };
10417
10418 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10419         /* Broadcom boards. */
10420         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10421         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10422         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10423         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10424         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10425         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10426         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10427         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10428         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10429         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10430         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10431
10432         /* 3com boards. */
10433         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10434         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10435         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10436         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10437         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10438
10439         /* DELL boards. */
10440         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10441         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10442         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10443         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10444
10445         /* Compaq boards. */
10446         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10447         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10448         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10449         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10450         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10451
10452         /* IBM boards. */
10453         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10454 };
10455
10456 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10457 {
10458         int i;
10459
10460         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10461                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10462                      tp->pdev->subsystem_vendor) &&
10463                     (subsys_id_to_phy_id[i].subsys_devid ==
10464                      tp->pdev->subsystem_device))
10465                         return &subsys_id_to_phy_id[i];
10466         }
10467         return NULL;
10468 }
10469
10470 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10471 {
10472         u32 val;
10473         u16 pmcsr;
10474
10475         /* On some early chips the SRAM cannot be accessed in D3hot state,
10476          * so need make sure we're in D0.
10477          */
10478         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10479         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10480         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10481         msleep(1);
10482
10483         /* Make sure register accesses (indirect or otherwise)
10484          * will function correctly.
10485          */
10486         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10487                                tp->misc_host_ctrl);
10488
10489         /* The memory arbiter has to be enabled in order for SRAM accesses
10490          * to succeed.  Normally on powerup the tg3 chip firmware will make
10491          * sure it is enabled, but other entities such as system netboot
10492          * code might disable it.
10493          */
10494         val = tr32(MEMARB_MODE);
10495         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10496
10497         tp->phy_id = PHY_ID_INVALID;
10498         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10499
10500         /* Assume an onboard device and WOL capable by default.  */
10501         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10502
10503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10504                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10505                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10506                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10507                 }
10508                 val = tr32(VCPU_CFGSHDW);
10509                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10510                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10511                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10512                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10513                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10514                 return;
10515         }
10516
10517         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10518         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10519                 u32 nic_cfg, led_cfg;
10520                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10521                 int eeprom_phy_serdes = 0;
10522
10523                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10524                 tp->nic_sram_data_cfg = nic_cfg;
10525
10526                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10527                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10528                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10529                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10530                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10531                     (ver > 0) && (ver < 0x100))
10532                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10533
10534                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10535                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10536                         eeprom_phy_serdes = 1;
10537
10538                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10539                 if (nic_phy_id != 0) {
10540                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10541                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10542
10543                         eeprom_phy_id  = (id1 >> 16) << 10;
10544                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10545                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10546                 } else
10547                         eeprom_phy_id = 0;
10548
10549                 tp->phy_id = eeprom_phy_id;
10550                 if (eeprom_phy_serdes) {
10551                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10552                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10553                         else
10554                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10555                 }
10556
10557                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10558                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10559                                     SHASTA_EXT_LED_MODE_MASK);
10560                 else
10561                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10562
10563                 switch (led_cfg) {
10564                 default:
10565                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10566                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10567                         break;
10568
10569                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10570                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10571                         break;
10572
10573                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10574                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10575
10576                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10577                          * read on some older 5700/5701 bootcode.
10578                          */
10579                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10580                             ASIC_REV_5700 ||
10581                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10582                             ASIC_REV_5701)
10583                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10584
10585                         break;
10586
10587                 case SHASTA_EXT_LED_SHARED:
10588                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10589                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10590                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10591                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10592                                                  LED_CTRL_MODE_PHY_2);
10593                         break;
10594
10595                 case SHASTA_EXT_LED_MAC:
10596                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10597                         break;
10598
10599                 case SHASTA_EXT_LED_COMBO:
10600                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10601                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10602                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10603                                                  LED_CTRL_MODE_PHY_2);
10604                         break;
10605
10606                 };
10607
10608                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10609                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10610                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10611                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10612
10613                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0)
10614                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10615
10616                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10617                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10618                         if ((tp->pdev->subsystem_vendor ==
10619                              PCI_VENDOR_ID_ARIMA) &&
10620                             (tp->pdev->subsystem_device == 0x205a ||
10621                              tp->pdev->subsystem_device == 0x2063))
10622                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10623                 } else {
10624                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10625                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10626                 }
10627
10628                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10629                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10630                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10631                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10632                 }
10633                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10634                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10635                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10636                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10637                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10638
10639                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10640                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10641                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10642
10643                 if (cfg2 & (1 << 17))
10644                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10645
10646                 /* serdes signal pre-emphasis in register 0x590 set by */
10647                 /* bootcode if bit 18 is set */
10648                 if (cfg2 & (1 << 18))
10649                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10650
10651                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10652                         u32 cfg3;
10653
10654                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10655                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10656                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10657                 }
10658         }
10659 }
10660
10661 static int __devinit tg3_phy_probe(struct tg3 *tp)
10662 {
10663         u32 hw_phy_id_1, hw_phy_id_2;
10664         u32 hw_phy_id, hw_phy_id_masked;
10665         int err;
10666
10667         /* Reading the PHY ID register can conflict with ASF
10668          * firwmare access to the PHY hardware.
10669          */
10670         err = 0;
10671         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10672             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10673                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10674         } else {
10675                 /* Now read the physical PHY_ID from the chip and verify
10676                  * that it is sane.  If it doesn't look good, we fall back
10677                  * to either the hard-coded table based PHY_ID and failing
10678                  * that the value found in the eeprom area.
10679                  */
10680                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10681                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10682
10683                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10684                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10685                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10686
10687                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10688         }
10689
10690         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10691                 tp->phy_id = hw_phy_id;
10692                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10693                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10694                 else
10695                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10696         } else {
10697                 if (tp->phy_id != PHY_ID_INVALID) {
10698                         /* Do nothing, phy ID already set up in
10699                          * tg3_get_eeprom_hw_cfg().
10700                          */
10701                 } else {
10702                         struct subsys_tbl_ent *p;
10703
10704                         /* No eeprom signature?  Try the hardcoded
10705                          * subsys device table.
10706                          */
10707                         p = lookup_by_subsys(tp);
10708                         if (!p)
10709                                 return -ENODEV;
10710
10711                         tp->phy_id = p->phy_id;
10712                         if (!tp->phy_id ||
10713                             tp->phy_id == PHY_ID_BCM8002)
10714                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10715                 }
10716         }
10717
10718         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10719             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10720             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10721                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10722
10723                 tg3_readphy(tp, MII_BMSR, &bmsr);
10724                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10725                     (bmsr & BMSR_LSTATUS))
10726                         goto skip_phy_reset;
10727
10728                 err = tg3_phy_reset(tp);
10729                 if (err)
10730                         return err;
10731
10732                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10733                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10734                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10735                 tg3_ctrl = 0;
10736                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10737                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10738                                     MII_TG3_CTRL_ADV_1000_FULL);
10739                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10740                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10741                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10742                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10743                 }
10744
10745                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10746                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10747                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10748                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10749                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10750
10751                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10752                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10753
10754                         tg3_writephy(tp, MII_BMCR,
10755                                      BMCR_ANENABLE | BMCR_ANRESTART);
10756                 }
10757                 tg3_phy_set_wirespeed(tp);
10758
10759                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10760                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10761                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10762         }
10763
10764 skip_phy_reset:
10765         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10766                 err = tg3_init_5401phy_dsp(tp);
10767                 if (err)
10768                         return err;
10769         }
10770
10771         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10772                 err = tg3_init_5401phy_dsp(tp);
10773         }
10774
10775         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10776                 tp->link_config.advertising =
10777                         (ADVERTISED_1000baseT_Half |
10778                          ADVERTISED_1000baseT_Full |
10779                          ADVERTISED_Autoneg |
10780                          ADVERTISED_FIBRE);
10781         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10782                 tp->link_config.advertising &=
10783                         ~(ADVERTISED_1000baseT_Half |
10784                           ADVERTISED_1000baseT_Full);
10785
10786         return err;
10787 }
10788
10789 static void __devinit tg3_read_partno(struct tg3 *tp)
10790 {
10791         unsigned char vpd_data[256];
10792         unsigned int i;
10793         u32 magic;
10794
10795         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10796                 goto out_not_found;
10797
10798         if (magic == TG3_EEPROM_MAGIC) {
10799                 for (i = 0; i < 256; i += 4) {
10800                         u32 tmp;
10801
10802                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10803                                 goto out_not_found;
10804
10805                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10806                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10807                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10808                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10809                 }
10810         } else {
10811                 int vpd_cap;
10812
10813                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10814                 for (i = 0; i < 256; i += 4) {
10815                         u32 tmp, j = 0;
10816                         u16 tmp16;
10817
10818                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10819                                               i);
10820                         while (j++ < 100) {
10821                                 pci_read_config_word(tp->pdev, vpd_cap +
10822                                                      PCI_VPD_ADDR, &tmp16);
10823                                 if (tmp16 & 0x8000)
10824                                         break;
10825                                 msleep(1);
10826                         }
10827                         if (!(tmp16 & 0x8000))
10828                                 goto out_not_found;
10829
10830                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10831                                               &tmp);
10832                         tmp = cpu_to_le32(tmp);
10833                         memcpy(&vpd_data[i], &tmp, 4);
10834                 }
10835         }
10836
10837         /* Now parse and find the part number. */
10838         for (i = 0; i < 254; ) {
10839                 unsigned char val = vpd_data[i];
10840                 unsigned int block_end;
10841
10842                 if (val == 0x82 || val == 0x91) {
10843                         i = (i + 3 +
10844                              (vpd_data[i + 1] +
10845                               (vpd_data[i + 2] << 8)));
10846                         continue;
10847                 }
10848
10849                 if (val != 0x90)
10850                         goto out_not_found;
10851
10852                 block_end = (i + 3 +
10853                              (vpd_data[i + 1] +
10854                               (vpd_data[i + 2] << 8)));
10855                 i += 3;
10856
10857                 if (block_end > 256)
10858                         goto out_not_found;
10859
10860                 while (i < (block_end - 2)) {
10861                         if (vpd_data[i + 0] == 'P' &&
10862                             vpd_data[i + 1] == 'N') {
10863                                 int partno_len = vpd_data[i + 2];
10864
10865                                 i += 3;
10866                                 if (partno_len > 24 || (partno_len + i) > 256)
10867                                         goto out_not_found;
10868
10869                                 memcpy(tp->board_part_number,
10870                                        &vpd_data[i], partno_len);
10871
10872                                 /* Success. */
10873                                 return;
10874                         }
10875                         i += 3 + vpd_data[i + 2];
10876                 }
10877
10878                 /* Part number not found. */
10879                 goto out_not_found;
10880         }
10881
10882 out_not_found:
10883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10884                 strcpy(tp->board_part_number, "BCM95906");
10885         else
10886                 strcpy(tp->board_part_number, "none");
10887 }
10888
10889 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10890 {
10891         u32 val;
10892
10893         if (tg3_nvram_read_swab(tp, offset, &val) ||
10894             (val & 0xfc000000) != 0x0c000000 ||
10895             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10896             val != 0)
10897                 return 0;
10898
10899         return 1;
10900 }
10901
10902 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10903 {
10904         u32 val, offset, start;
10905         u32 ver_offset;
10906         int i, bcnt;
10907
10908         if (tg3_nvram_read_swab(tp, 0, &val))
10909                 return;
10910
10911         if (val != TG3_EEPROM_MAGIC)
10912                 return;
10913
10914         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10915             tg3_nvram_read_swab(tp, 0x4, &start))
10916                 return;
10917
10918         offset = tg3_nvram_logical_addr(tp, offset);
10919
10920         if (!tg3_fw_img_is_valid(tp, offset) ||
10921             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10922                 return;
10923
10924         offset = offset + ver_offset - start;
10925         for (i = 0; i < 16; i += 4) {
10926                 if (tg3_nvram_read(tp, offset + i, &val))
10927                         return;
10928
10929                 val = le32_to_cpu(val);
10930                 memcpy(tp->fw_ver + i, &val, 4);
10931         }
10932
10933         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10934              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
10935                 return;
10936
10937         for (offset = TG3_NVM_DIR_START;
10938              offset < TG3_NVM_DIR_END;
10939              offset += TG3_NVM_DIRENT_SIZE) {
10940                 if (tg3_nvram_read_swab(tp, offset, &val))
10941                         return;
10942
10943                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10944                         break;
10945         }
10946
10947         if (offset == TG3_NVM_DIR_END)
10948                 return;
10949
10950         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10951                 start = 0x08000000;
10952         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10953                 return;
10954
10955         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10956             !tg3_fw_img_is_valid(tp, offset) ||
10957             tg3_nvram_read_swab(tp, offset + 8, &val))
10958                 return;
10959
10960         offset += val - start;
10961
10962         bcnt = strlen(tp->fw_ver);
10963
10964         tp->fw_ver[bcnt++] = ',';
10965         tp->fw_ver[bcnt++] = ' ';
10966
10967         for (i = 0; i < 4; i++) {
10968                 if (tg3_nvram_read(tp, offset, &val))
10969                         return;
10970
10971                 val = le32_to_cpu(val);
10972                 offset += sizeof(val);
10973
10974                 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10975                         memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10976                         break;
10977                 }
10978
10979                 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10980                 bcnt += sizeof(val);
10981         }
10982
10983         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10984 }
10985
10986 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10987
10988 static int __devinit tg3_get_invariants(struct tg3 *tp)
10989 {
10990         static struct pci_device_id write_reorder_chipsets[] = {
10991                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10992                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10993                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10994                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10995                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10996                              PCI_DEVICE_ID_VIA_8385_0) },
10997                 { },
10998         };
10999         u32 misc_ctrl_reg;
11000         u32 cacheline_sz_reg;
11001         u32 pci_state_reg, grc_misc_cfg;
11002         u32 val;
11003         u16 pci_cmd;
11004         int err, pcie_cap;
11005
11006         /* Force memory write invalidate off.  If we leave it on,
11007          * then on 5700_BX chips we have to enable a workaround.
11008          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11009          * to match the cacheline size.  The Broadcom driver have this
11010          * workaround but turns MWI off all the times so never uses
11011          * it.  This seems to suggest that the workaround is insufficient.
11012          */
11013         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11014         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11015         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11016
11017         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11018          * has the register indirect write enable bit set before
11019          * we try to access any of the MMIO registers.  It is also
11020          * critical that the PCI-X hw workaround situation is decided
11021          * before that as well.
11022          */
11023         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11024                               &misc_ctrl_reg);
11025
11026         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11027                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11029                 u32 prod_id_asic_rev;
11030
11031                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11032                                       &prod_id_asic_rev);
11033                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11034         }
11035
11036         /* Wrong chip ID in 5752 A0. This code can be removed later
11037          * as A0 is not in production.
11038          */
11039         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11040                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11041
11042         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11043          * we need to disable memory and use config. cycles
11044          * only to access all registers. The 5702/03 chips
11045          * can mistakenly decode the special cycles from the
11046          * ICH chipsets as memory write cycles, causing corruption
11047          * of register and memory space. Only certain ICH bridges
11048          * will drive special cycles with non-zero data during the
11049          * address phase which can fall within the 5703's address
11050          * range. This is not an ICH bug as the PCI spec allows
11051          * non-zero address during special cycles. However, only
11052          * these ICH bridges are known to drive non-zero addresses
11053          * during special cycles.
11054          *
11055          * Since special cycles do not cross PCI bridges, we only
11056          * enable this workaround if the 5703 is on the secondary
11057          * bus of these ICH bridges.
11058          */
11059         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11060             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11061                 static struct tg3_dev_id {
11062                         u32     vendor;
11063                         u32     device;
11064                         u32     rev;
11065                 } ich_chipsets[] = {
11066                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11067                           PCI_ANY_ID },
11068                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11069                           PCI_ANY_ID },
11070                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11071                           0xa },
11072                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11073                           PCI_ANY_ID },
11074                         { },
11075                 };
11076                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11077                 struct pci_dev *bridge = NULL;
11078
11079                 while (pci_id->vendor != 0) {
11080                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11081                                                 bridge);
11082                         if (!bridge) {
11083                                 pci_id++;
11084                                 continue;
11085                         }
11086                         if (pci_id->rev != PCI_ANY_ID) {
11087                                 if (bridge->revision > pci_id->rev)
11088                                         continue;
11089                         }
11090                         if (bridge->subordinate &&
11091                             (bridge->subordinate->number ==
11092                              tp->pdev->bus->number)) {
11093
11094                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11095                                 pci_dev_put(bridge);
11096                                 break;
11097                         }
11098                 }
11099         }
11100
11101         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11102          * DMA addresses > 40-bit. This bridge may have other additional
11103          * 57xx devices behind it in some 4-port NIC designs for example.
11104          * Any tg3 device found behind the bridge will also need the 40-bit
11105          * DMA workaround.
11106          */
11107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11109                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11110                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11111                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11112         }
11113         else {
11114                 struct pci_dev *bridge = NULL;
11115
11116                 do {
11117                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11118                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11119                                                 bridge);
11120                         if (bridge && bridge->subordinate &&
11121                             (bridge->subordinate->number <=
11122                              tp->pdev->bus->number) &&
11123                             (bridge->subordinate->subordinate >=
11124                              tp->pdev->bus->number)) {
11125                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11126                                 pci_dev_put(bridge);
11127                                 break;
11128                         }
11129                 } while (bridge);
11130         }
11131
11132         /* Initialize misc host control in PCI block. */
11133         tp->misc_host_ctrl |= (misc_ctrl_reg &
11134                                MISC_HOST_CTRL_CHIPREV);
11135         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11136                                tp->misc_host_ctrl);
11137
11138         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11139                               &cacheline_sz_reg);
11140
11141         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11142         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11143         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11144         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11145
11146         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11147             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11148                 tp->pdev_peer = tg3_find_peer(tp);
11149
11150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11156             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11157             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11158                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11159
11160         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11161             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11162                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11163
11164         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11165                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11166                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11167                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11168                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11169                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11170                      tp->pdev_peer == tp->pdev))
11171                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11172
11173                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11175                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11176                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11177                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11178                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11179                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11180                 } else {
11181                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11183                                 ASIC_REV_5750 &&
11184                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11185                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11186                 }
11187         }
11188
11189         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11190             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11191             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11192             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11193             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11194             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11195             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11196             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11197                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11198
11199         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11200         if (pcie_cap != 0) {
11201                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11202                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11203                         u16 lnkctl;
11204
11205                         pci_read_config_word(tp->pdev,
11206                                              pcie_cap + PCI_EXP_LNKCTL,
11207                                              &lnkctl);
11208                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11209                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11210                 }
11211         }
11212
11213         /* If we have an AMD 762 or VIA K8T800 chipset, write
11214          * reordering to the mailbox registers done by the host
11215          * controller can cause major troubles.  We read back from
11216          * every mailbox register write to force the writes to be
11217          * posted to the chip in order.
11218          */
11219         if (pci_dev_present(write_reorder_chipsets) &&
11220             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11221                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11222
11223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11224             tp->pci_lat_timer < 64) {
11225                 tp->pci_lat_timer = 64;
11226
11227                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11228                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11229                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11230                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11231
11232                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11233                                        cacheline_sz_reg);
11234         }
11235
11236         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11237             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11238                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11239                 if (!tp->pcix_cap) {
11240                         printk(KERN_ERR PFX "Cannot find PCI-X "
11241                                             "capability, aborting.\n");
11242                         return -EIO;
11243                 }
11244         }
11245
11246         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11247                               &pci_state_reg);
11248
11249         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11250                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11251
11252                 /* If this is a 5700 BX chipset, and we are in PCI-X
11253                  * mode, enable register write workaround.
11254                  *
11255                  * The workaround is to use indirect register accesses
11256                  * for all chip writes not to mailbox registers.
11257                  */
11258                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11259                         u32 pm_reg;
11260
11261                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11262
11263                         /* The chip can have it's power management PCI config
11264                          * space registers clobbered due to this bug.
11265                          * So explicitly force the chip into D0 here.
11266                          */
11267                         pci_read_config_dword(tp->pdev,
11268                                               tp->pm_cap + PCI_PM_CTRL,
11269                                               &pm_reg);
11270                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11271                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11272                         pci_write_config_dword(tp->pdev,
11273                                                tp->pm_cap + PCI_PM_CTRL,
11274                                                pm_reg);
11275
11276                         /* Also, force SERR#/PERR# in PCI command. */
11277                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11278                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11279                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11280                 }
11281         }
11282
11283         /* 5700 BX chips need to have their TX producer index mailboxes
11284          * written twice to workaround a bug.
11285          */
11286         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11287                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11288
11289         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11290                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11291         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11292                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11293
11294         /* Chip-specific fixup from Broadcom driver */
11295         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11296             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11297                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11298                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11299         }
11300
11301         /* Default fast path register access methods */
11302         tp->read32 = tg3_read32;
11303         tp->write32 = tg3_write32;
11304         tp->read32_mbox = tg3_read32;
11305         tp->write32_mbox = tg3_write32;
11306         tp->write32_tx_mbox = tg3_write32;
11307         tp->write32_rx_mbox = tg3_write32;
11308
11309         /* Various workaround register access methods */
11310         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11311                 tp->write32 = tg3_write_indirect_reg32;
11312         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11313                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11314                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11315                 /*
11316                  * Back to back register writes can cause problems on these
11317                  * chips, the workaround is to read back all reg writes
11318                  * except those to mailbox regs.
11319                  *
11320                  * See tg3_write_indirect_reg32().
11321                  */
11322                 tp->write32 = tg3_write_flush_reg32;
11323         }
11324
11325
11326         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11327             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11328                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11329                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11330                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11331         }
11332
11333         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11334                 tp->read32 = tg3_read_indirect_reg32;
11335                 tp->write32 = tg3_write_indirect_reg32;
11336                 tp->read32_mbox = tg3_read_indirect_mbox;
11337                 tp->write32_mbox = tg3_write_indirect_mbox;
11338                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11339                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11340
11341                 iounmap(tp->regs);
11342                 tp->regs = NULL;
11343
11344                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11345                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11346                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11347         }
11348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11349                 tp->read32_mbox = tg3_read32_mbox_5906;
11350                 tp->write32_mbox = tg3_write32_mbox_5906;
11351                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11352                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11353         }
11354
11355         if (tp->write32 == tg3_write_indirect_reg32 ||
11356             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11357              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11358               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11359                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11360
11361         /* Get eeprom hw config before calling tg3_set_power_state().
11362          * In particular, the TG3_FLG2_IS_NIC flag must be
11363          * determined before calling tg3_set_power_state() so that
11364          * we know whether or not to switch out of Vaux power.
11365          * When the flag is set, it means that GPIO1 is used for eeprom
11366          * write protect and also implies that it is a LOM where GPIOs
11367          * are not used to switch power.
11368          */
11369         tg3_get_eeprom_hw_cfg(tp);
11370
11371         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11372                 /* Allow reads and writes to the
11373                  * APE register and memory space.
11374                  */
11375                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11376                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11377                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11378                                        pci_state_reg);
11379         }
11380
11381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11383                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11384
11385         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11386          * GPIO1 driven high will bring 5700's external PHY out of reset.
11387          * It is also used as eeprom write protect on LOMs.
11388          */
11389         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11390         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11391             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11392                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11393                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11394         /* Unused GPIO3 must be driven as output on 5752 because there
11395          * are no pull-up resistors on unused GPIO pins.
11396          */
11397         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11398                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11399
11400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11401                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11402
11403         /* Force the chip into D0. */
11404         err = tg3_set_power_state(tp, PCI_D0);
11405         if (err) {
11406                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11407                        pci_name(tp->pdev));
11408                 return err;
11409         }
11410
11411         /* 5700 B0 chips do not support checksumming correctly due
11412          * to hardware bugs.
11413          */
11414         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11415                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11416
11417         /* Derive initial jumbo mode from MTU assigned in
11418          * ether_setup() via the alloc_etherdev() call
11419          */
11420         if (tp->dev->mtu > ETH_DATA_LEN &&
11421             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11422                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11423
11424         /* Determine WakeOnLan speed to use. */
11425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11426             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11427             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11428             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11429                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11430         } else {
11431                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11432         }
11433
11434         /* A few boards don't want Ethernet@WireSpeed phy feature */
11435         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11436             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11437              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11438              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11439             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11440             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11441                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11442
11443         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11444             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11445                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11446         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11447                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11448
11449         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11450                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11451                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11452                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11453                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11454                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11455                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11456                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11457                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11458                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11459                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11460                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11461         }
11462
11463         tp->coalesce_mode = 0;
11464         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11465             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11466                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11467
11468         /* Initialize MAC MI mode, polling disabled. */
11469         tw32_f(MAC_MI_MODE, tp->mi_mode);
11470         udelay(80);
11471
11472         /* Initialize data/descriptor byte/word swapping. */
11473         val = tr32(GRC_MODE);
11474         val &= GRC_MODE_HOST_STACKUP;
11475         tw32(GRC_MODE, val | tp->grc_mode);
11476
11477         tg3_switch_clocks(tp);
11478
11479         /* Clear this out for sanity. */
11480         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11481
11482         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11483                               &pci_state_reg);
11484         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11485             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11486                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11487
11488                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11489                     chiprevid == CHIPREV_ID_5701_B0 ||
11490                     chiprevid == CHIPREV_ID_5701_B2 ||
11491                     chiprevid == CHIPREV_ID_5701_B5) {
11492                         void __iomem *sram_base;
11493
11494                         /* Write some dummy words into the SRAM status block
11495                          * area, see if it reads back correctly.  If the return
11496                          * value is bad, force enable the PCIX workaround.
11497                          */
11498                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11499
11500                         writel(0x00000000, sram_base);
11501                         writel(0x00000000, sram_base + 4);
11502                         writel(0xffffffff, sram_base + 4);
11503                         if (readl(sram_base) != 0x00000000)
11504                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11505                 }
11506         }
11507
11508         udelay(50);
11509         tg3_nvram_init(tp);
11510
11511         grc_misc_cfg = tr32(GRC_MISC_CFG);
11512         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11513
11514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11515             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11516              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11517                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11518
11519         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11520             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11521                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11522         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11523                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11524                                       HOSTCC_MODE_CLRTICK_TXBD);
11525
11526                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11527                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11528                                        tp->misc_host_ctrl);
11529         }
11530
11531         /* these are limited to 10/100 only */
11532         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11533              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11534             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11535              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11536              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11537               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11538               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11539             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11540              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11541               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11542               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11544                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11545
11546         err = tg3_phy_probe(tp);
11547         if (err) {
11548                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11549                        pci_name(tp->pdev), err);
11550                 /* ... but do not return immediately ... */
11551         }
11552
11553         tg3_read_partno(tp);
11554         tg3_read_fw_ver(tp);
11555
11556         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11557                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11558         } else {
11559                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11560                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11561                 else
11562                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11563         }
11564
11565         /* 5700 {AX,BX} chips have a broken status block link
11566          * change bit implementation, so we must use the
11567          * status register in those cases.
11568          */
11569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11570                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11571         else
11572                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11573
11574         /* The led_ctrl is set during tg3_phy_probe, here we might
11575          * have to force the link status polling mechanism based
11576          * upon subsystem IDs.
11577          */
11578         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11580             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11581                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11582                                   TG3_FLAG_USE_LINKCHG_REG);
11583         }
11584
11585         /* For all SERDES we poll the MAC status register. */
11586         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11587                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11588         else
11589                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11590
11591         /* All chips before 5787 can get confused if TX buffers
11592          * straddle the 4GB address boundary in some cases.
11593          */
11594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11595             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11599                 tp->dev->hard_start_xmit = tg3_start_xmit;
11600         else
11601                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11602
11603         tp->rx_offset = 2;
11604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11605             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11606                 tp->rx_offset = 0;
11607
11608         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11609
11610         /* Increment the rx prod index on the rx std ring by at most
11611          * 8 for these chips to workaround hw errata.
11612          */
11613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11615             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11616                 tp->rx_std_max_post = 8;
11617
11618         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11619                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11620                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11621
11622         return err;
11623 }
11624
11625 #ifdef CONFIG_SPARC
11626 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11627 {
11628         struct net_device *dev = tp->dev;
11629         struct pci_dev *pdev = tp->pdev;
11630         struct device_node *dp = pci_device_to_OF_node(pdev);
11631         const unsigned char *addr;
11632         int len;
11633
11634         addr = of_get_property(dp, "local-mac-address", &len);
11635         if (addr && len == 6) {
11636                 memcpy(dev->dev_addr, addr, 6);
11637                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11638                 return 0;
11639         }
11640         return -ENODEV;
11641 }
11642
11643 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11644 {
11645         struct net_device *dev = tp->dev;
11646
11647         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11648         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11649         return 0;
11650 }
11651 #endif
11652
11653 static int __devinit tg3_get_device_address(struct tg3 *tp)
11654 {
11655         struct net_device *dev = tp->dev;
11656         u32 hi, lo, mac_offset;
11657         int addr_ok = 0;
11658
11659 #ifdef CONFIG_SPARC
11660         if (!tg3_get_macaddr_sparc(tp))
11661                 return 0;
11662 #endif
11663
11664         mac_offset = 0x7c;
11665         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11666             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11667                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11668                         mac_offset = 0xcc;
11669                 if (tg3_nvram_lock(tp))
11670                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11671                 else
11672                         tg3_nvram_unlock(tp);
11673         }
11674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11675                 mac_offset = 0x10;
11676
11677         /* First try to get it from MAC address mailbox. */
11678         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11679         if ((hi >> 16) == 0x484b) {
11680                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11681                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11682
11683                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11684                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11685                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11686                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11687                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11688
11689                 /* Some old bootcode may report a 0 MAC address in SRAM */
11690                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11691         }
11692         if (!addr_ok) {
11693                 /* Next, try NVRAM. */
11694                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11695                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11696                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11697                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11698                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11699                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11700                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11701                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11702                 }
11703                 /* Finally just fetch it out of the MAC control regs. */
11704                 else {
11705                         hi = tr32(MAC_ADDR_0_HIGH);
11706                         lo = tr32(MAC_ADDR_0_LOW);
11707
11708                         dev->dev_addr[5] = lo & 0xff;
11709                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11710                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11711                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11712                         dev->dev_addr[1] = hi & 0xff;
11713                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11714                 }
11715         }
11716
11717         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11718 #ifdef CONFIG_SPARC64
11719                 if (!tg3_get_default_macaddr_sparc(tp))
11720                         return 0;
11721 #endif
11722                 return -EINVAL;
11723         }
11724         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11725         return 0;
11726 }
11727
11728 #define BOUNDARY_SINGLE_CACHELINE       1
11729 #define BOUNDARY_MULTI_CACHELINE        2
11730
11731 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11732 {
11733         int cacheline_size;
11734         u8 byte;
11735         int goal;
11736
11737         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11738         if (byte == 0)
11739                 cacheline_size = 1024;
11740         else
11741                 cacheline_size = (int) byte * 4;
11742
11743         /* On 5703 and later chips, the boundary bits have no
11744          * effect.
11745          */
11746         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11747             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11748             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11749                 goto out;
11750
11751 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11752         goal = BOUNDARY_MULTI_CACHELINE;
11753 #else
11754 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11755         goal = BOUNDARY_SINGLE_CACHELINE;
11756 #else
11757         goal = 0;
11758 #endif
11759 #endif
11760
11761         if (!goal)
11762                 goto out;
11763
11764         /* PCI controllers on most RISC systems tend to disconnect
11765          * when a device tries to burst across a cache-line boundary.
11766          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11767          *
11768          * Unfortunately, for PCI-E there are only limited
11769          * write-side controls for this, and thus for reads
11770          * we will still get the disconnects.  We'll also waste
11771          * these PCI cycles for both read and write for chips
11772          * other than 5700 and 5701 which do not implement the
11773          * boundary bits.
11774          */
11775         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11776             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11777                 switch (cacheline_size) {
11778                 case 16:
11779                 case 32:
11780                 case 64:
11781                 case 128:
11782                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11783                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11784                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11785                         } else {
11786                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11787                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11788                         }
11789                         break;
11790
11791                 case 256:
11792                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11793                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11794                         break;
11795
11796                 default:
11797                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11798                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11799                         break;
11800                 };
11801         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11802                 switch (cacheline_size) {
11803                 case 16:
11804                 case 32:
11805                 case 64:
11806                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11807                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11808                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11809                                 break;
11810                         }
11811                         /* fallthrough */
11812                 case 128:
11813                 default:
11814                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11815                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11816                         break;
11817                 };
11818         } else {
11819                 switch (cacheline_size) {
11820                 case 16:
11821                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11822                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11823                                         DMA_RWCTRL_WRITE_BNDRY_16);
11824                                 break;
11825                         }
11826                         /* fallthrough */
11827                 case 32:
11828                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11829                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11830                                         DMA_RWCTRL_WRITE_BNDRY_32);
11831                                 break;
11832                         }
11833                         /* fallthrough */
11834                 case 64:
11835                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11836                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11837                                         DMA_RWCTRL_WRITE_BNDRY_64);
11838                                 break;
11839                         }
11840                         /* fallthrough */
11841                 case 128:
11842                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11843                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11844                                         DMA_RWCTRL_WRITE_BNDRY_128);
11845                                 break;
11846                         }
11847                         /* fallthrough */
11848                 case 256:
11849                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11850                                 DMA_RWCTRL_WRITE_BNDRY_256);
11851                         break;
11852                 case 512:
11853                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11854                                 DMA_RWCTRL_WRITE_BNDRY_512);
11855                         break;
11856                 case 1024:
11857                 default:
11858                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11859                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11860                         break;
11861                 };
11862         }
11863
11864 out:
11865         return val;
11866 }
11867
11868 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11869 {
11870         struct tg3_internal_buffer_desc test_desc;
11871         u32 sram_dma_descs;
11872         int i, ret;
11873
11874         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11875
11876         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11877         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11878         tw32(RDMAC_STATUS, 0);
11879         tw32(WDMAC_STATUS, 0);
11880
11881         tw32(BUFMGR_MODE, 0);
11882         tw32(FTQ_RESET, 0);
11883
11884         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11885         test_desc.addr_lo = buf_dma & 0xffffffff;
11886         test_desc.nic_mbuf = 0x00002100;
11887         test_desc.len = size;
11888
11889         /*
11890          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11891          * the *second* time the tg3 driver was getting loaded after an
11892          * initial scan.
11893          *
11894          * Broadcom tells me:
11895          *   ...the DMA engine is connected to the GRC block and a DMA
11896          *   reset may affect the GRC block in some unpredictable way...
11897          *   The behavior of resets to individual blocks has not been tested.
11898          *
11899          * Broadcom noted the GRC reset will also reset all sub-components.
11900          */
11901         if (to_device) {
11902                 test_desc.cqid_sqid = (13 << 8) | 2;
11903
11904                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11905                 udelay(40);
11906         } else {
11907                 test_desc.cqid_sqid = (16 << 8) | 7;
11908
11909                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11910                 udelay(40);
11911         }
11912         test_desc.flags = 0x00000005;
11913
11914         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11915                 u32 val;
11916
11917                 val = *(((u32 *)&test_desc) + i);
11918                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11919                                        sram_dma_descs + (i * sizeof(u32)));
11920                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11921         }
11922         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11923
11924         if (to_device) {
11925                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11926         } else {
11927                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11928         }
11929
11930         ret = -ENODEV;
11931         for (i = 0; i < 40; i++) {
11932                 u32 val;
11933
11934                 if (to_device)
11935                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11936                 else
11937                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11938                 if ((val & 0xffff) == sram_dma_descs) {
11939                         ret = 0;
11940                         break;
11941                 }
11942
11943                 udelay(100);
11944         }
11945
11946         return ret;
11947 }
11948
11949 #define TEST_BUFFER_SIZE        0x2000
11950
11951 static int __devinit tg3_test_dma(struct tg3 *tp)
11952 {
11953         dma_addr_t buf_dma;
11954         u32 *buf, saved_dma_rwctrl;
11955         int ret;
11956
11957         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11958         if (!buf) {
11959                 ret = -ENOMEM;
11960                 goto out_nofree;
11961         }
11962
11963         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11964                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11965
11966         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11967
11968         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11969                 /* DMA read watermark not used on PCIE */
11970                 tp->dma_rwctrl |= 0x00180000;
11971         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11974                         tp->dma_rwctrl |= 0x003f0000;
11975                 else
11976                         tp->dma_rwctrl |= 0x003f000f;
11977         } else {
11978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11979                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11980                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11981                         u32 read_water = 0x7;
11982
11983                         /* If the 5704 is behind the EPB bridge, we can
11984                          * do the less restrictive ONE_DMA workaround for
11985                          * better performance.
11986                          */
11987                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11988                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11989                                 tp->dma_rwctrl |= 0x8000;
11990                         else if (ccval == 0x6 || ccval == 0x7)
11991                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11992
11993                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11994                                 read_water = 4;
11995                         /* Set bit 23 to enable PCIX hw bug fix */
11996                         tp->dma_rwctrl |=
11997                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11998                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11999                                 (1 << 23);
12000                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12001                         /* 5780 always in PCIX mode */
12002                         tp->dma_rwctrl |= 0x00144000;
12003                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12004                         /* 5714 always in PCIX mode */
12005                         tp->dma_rwctrl |= 0x00148000;
12006                 } else {
12007                         tp->dma_rwctrl |= 0x001b000f;
12008                 }
12009         }
12010
12011         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12012             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12013                 tp->dma_rwctrl &= 0xfffffff0;
12014
12015         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12016             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12017                 /* Remove this if it causes problems for some boards. */
12018                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12019
12020                 /* On 5700/5701 chips, we need to set this bit.
12021                  * Otherwise the chip will issue cacheline transactions
12022                  * to streamable DMA memory with not all the byte
12023                  * enables turned on.  This is an error on several
12024                  * RISC PCI controllers, in particular sparc64.
12025                  *
12026                  * On 5703/5704 chips, this bit has been reassigned
12027                  * a different meaning.  In particular, it is used
12028                  * on those chips to enable a PCI-X workaround.
12029                  */
12030                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12031         }
12032
12033         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12034
12035 #if 0
12036         /* Unneeded, already done by tg3_get_invariants.  */
12037         tg3_switch_clocks(tp);
12038 #endif
12039
12040         ret = 0;
12041         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12042             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12043                 goto out;
12044
12045         /* It is best to perform DMA test with maximum write burst size
12046          * to expose the 5700/5701 write DMA bug.
12047          */
12048         saved_dma_rwctrl = tp->dma_rwctrl;
12049         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12050         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12051
12052         while (1) {
12053                 u32 *p = buf, i;
12054
12055                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12056                         p[i] = i;
12057
12058                 /* Send the buffer to the chip. */
12059                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12060                 if (ret) {
12061                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12062                         break;
12063                 }
12064
12065 #if 0
12066                 /* validate data reached card RAM correctly. */
12067                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12068                         u32 val;
12069                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12070                         if (le32_to_cpu(val) != p[i]) {
12071                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12072                                 /* ret = -ENODEV here? */
12073                         }
12074                         p[i] = 0;
12075                 }
12076 #endif
12077                 /* Now read it back. */
12078                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12079                 if (ret) {
12080                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12081
12082                         break;
12083                 }
12084
12085                 /* Verify it. */
12086                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12087                         if (p[i] == i)
12088                                 continue;
12089
12090                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12091                             DMA_RWCTRL_WRITE_BNDRY_16) {
12092                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12093                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12094                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12095                                 break;
12096                         } else {
12097                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12098                                 ret = -ENODEV;
12099                                 goto out;
12100                         }
12101                 }
12102
12103                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12104                         /* Success. */
12105                         ret = 0;
12106                         break;
12107                 }
12108         }
12109         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12110             DMA_RWCTRL_WRITE_BNDRY_16) {
12111                 static struct pci_device_id dma_wait_state_chipsets[] = {
12112                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12113                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12114                         { },
12115                 };
12116
12117                 /* DMA test passed without adjusting DMA boundary,
12118                  * now look for chipsets that are known to expose the
12119                  * DMA bug without failing the test.
12120                  */
12121                 if (pci_dev_present(dma_wait_state_chipsets)) {
12122                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12123                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12124                 }
12125                 else
12126                         /* Safe to use the calculated DMA boundary. */
12127                         tp->dma_rwctrl = saved_dma_rwctrl;
12128
12129                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12130         }
12131
12132 out:
12133         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12134 out_nofree:
12135         return ret;
12136 }
12137
12138 static void __devinit tg3_init_link_config(struct tg3 *tp)
12139 {
12140         tp->link_config.advertising =
12141                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12142                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12143                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12144                  ADVERTISED_Autoneg | ADVERTISED_MII);
12145         tp->link_config.speed = SPEED_INVALID;
12146         tp->link_config.duplex = DUPLEX_INVALID;
12147         tp->link_config.autoneg = AUTONEG_ENABLE;
12148         tp->link_config.active_speed = SPEED_INVALID;
12149         tp->link_config.active_duplex = DUPLEX_INVALID;
12150         tp->link_config.phy_is_low_power = 0;
12151         tp->link_config.orig_speed = SPEED_INVALID;
12152         tp->link_config.orig_duplex = DUPLEX_INVALID;
12153         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12154 }
12155
12156 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12157 {
12158         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12159                 tp->bufmgr_config.mbuf_read_dma_low_water =
12160                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12161                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12162                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12163                 tp->bufmgr_config.mbuf_high_water =
12164                         DEFAULT_MB_HIGH_WATER_5705;
12165                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12166                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12167                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12168                         tp->bufmgr_config.mbuf_high_water =
12169                                 DEFAULT_MB_HIGH_WATER_5906;
12170                 }
12171
12172                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12173                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12174                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12175                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12176                 tp->bufmgr_config.mbuf_high_water_jumbo =
12177                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12178         } else {
12179                 tp->bufmgr_config.mbuf_read_dma_low_water =
12180                         DEFAULT_MB_RDMA_LOW_WATER;
12181                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12182                         DEFAULT_MB_MACRX_LOW_WATER;
12183                 tp->bufmgr_config.mbuf_high_water =
12184                         DEFAULT_MB_HIGH_WATER;
12185
12186                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12187                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12188                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12189                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12190                 tp->bufmgr_config.mbuf_high_water_jumbo =
12191                         DEFAULT_MB_HIGH_WATER_JUMBO;
12192         }
12193
12194         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12195         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12196 }
12197
12198 static char * __devinit tg3_phy_string(struct tg3 *tp)
12199 {
12200         switch (tp->phy_id & PHY_ID_MASK) {
12201         case PHY_ID_BCM5400:    return "5400";
12202         case PHY_ID_BCM5401:    return "5401";
12203         case PHY_ID_BCM5411:    return "5411";
12204         case PHY_ID_BCM5701:    return "5701";
12205         case PHY_ID_BCM5703:    return "5703";
12206         case PHY_ID_BCM5704:    return "5704";
12207         case PHY_ID_BCM5705:    return "5705";
12208         case PHY_ID_BCM5750:    return "5750";
12209         case PHY_ID_BCM5752:    return "5752";
12210         case PHY_ID_BCM5714:    return "5714";
12211         case PHY_ID_BCM5780:    return "5780";
12212         case PHY_ID_BCM5755:    return "5755";
12213         case PHY_ID_BCM5787:    return "5787";
12214         case PHY_ID_BCM5784:    return "5784";
12215         case PHY_ID_BCM5756:    return "5722/5756";
12216         case PHY_ID_BCM5906:    return "5906";
12217         case PHY_ID_BCM5761:    return "5761";
12218         case PHY_ID_BCM8002:    return "8002/serdes";
12219         case 0:                 return "serdes";
12220         default:                return "unknown";
12221         };
12222 }
12223
12224 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12225 {
12226         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12227                 strcpy(str, "PCI Express");
12228                 return str;
12229         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12230                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12231
12232                 strcpy(str, "PCIX:");
12233
12234                 if ((clock_ctrl == 7) ||
12235                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12236                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12237                         strcat(str, "133MHz");
12238                 else if (clock_ctrl == 0)
12239                         strcat(str, "33MHz");
12240                 else if (clock_ctrl == 2)
12241                         strcat(str, "50MHz");
12242                 else if (clock_ctrl == 4)
12243                         strcat(str, "66MHz");
12244                 else if (clock_ctrl == 6)
12245                         strcat(str, "100MHz");
12246         } else {
12247                 strcpy(str, "PCI:");
12248                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12249                         strcat(str, "66MHz");
12250                 else
12251                         strcat(str, "33MHz");
12252         }
12253         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12254                 strcat(str, ":32-bit");
12255         else
12256                 strcat(str, ":64-bit");
12257         return str;
12258 }
12259
12260 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12261 {
12262         struct pci_dev *peer;
12263         unsigned int func, devnr = tp->pdev->devfn & ~7;
12264
12265         for (func = 0; func < 8; func++) {
12266                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12267                 if (peer && peer != tp->pdev)
12268                         break;
12269                 pci_dev_put(peer);
12270         }
12271         /* 5704 can be configured in single-port mode, set peer to
12272          * tp->pdev in that case.
12273          */
12274         if (!peer) {
12275                 peer = tp->pdev;
12276                 return peer;
12277         }
12278
12279         /*
12280          * We don't need to keep the refcount elevated; there's no way
12281          * to remove one half of this device without removing the other
12282          */
12283         pci_dev_put(peer);
12284
12285         return peer;
12286 }
12287
12288 static void __devinit tg3_init_coal(struct tg3 *tp)
12289 {
12290         struct ethtool_coalesce *ec = &tp->coal;
12291
12292         memset(ec, 0, sizeof(*ec));
12293         ec->cmd = ETHTOOL_GCOALESCE;
12294         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12295         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12296         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12297         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12298         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12299         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12300         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12301         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12302         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12303
12304         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12305                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12306                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12307                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12308                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12309                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12310         }
12311
12312         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12313                 ec->rx_coalesce_usecs_irq = 0;
12314                 ec->tx_coalesce_usecs_irq = 0;
12315                 ec->stats_block_coalesce_usecs = 0;
12316         }
12317 }
12318
12319 static int __devinit tg3_init_one(struct pci_dev *pdev,
12320                                   const struct pci_device_id *ent)
12321 {
12322         static int tg3_version_printed = 0;
12323         unsigned long tg3reg_base, tg3reg_len;
12324         struct net_device *dev;
12325         struct tg3 *tp;
12326         int i, err, pm_cap;
12327         char str[40];
12328         u64 dma_mask, persist_dma_mask;
12329
12330         if (tg3_version_printed++ == 0)
12331                 printk(KERN_INFO "%s", version);
12332
12333         err = pci_enable_device(pdev);
12334         if (err) {
12335                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12336                        "aborting.\n");
12337                 return err;
12338         }
12339
12340         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12341                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12342                        "base address, aborting.\n");
12343                 err = -ENODEV;
12344                 goto err_out_disable_pdev;
12345         }
12346
12347         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12348         if (err) {
12349                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12350                        "aborting.\n");
12351                 goto err_out_disable_pdev;
12352         }
12353
12354         pci_set_master(pdev);
12355
12356         /* Find power-management capability. */
12357         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12358         if (pm_cap == 0) {
12359                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12360                        "aborting.\n");
12361                 err = -EIO;
12362                 goto err_out_free_res;
12363         }
12364
12365         tg3reg_base = pci_resource_start(pdev, 0);
12366         tg3reg_len = pci_resource_len(pdev, 0);
12367
12368         dev = alloc_etherdev(sizeof(*tp));
12369         if (!dev) {
12370                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12371                 err = -ENOMEM;
12372                 goto err_out_free_res;
12373         }
12374
12375         SET_NETDEV_DEV(dev, &pdev->dev);
12376
12377 #if TG3_VLAN_TAG_USED
12378         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12379         dev->vlan_rx_register = tg3_vlan_rx_register;
12380 #endif
12381
12382         tp = netdev_priv(dev);
12383         tp->pdev = pdev;
12384         tp->dev = dev;
12385         tp->pm_cap = pm_cap;
12386         tp->mac_mode = TG3_DEF_MAC_MODE;
12387         tp->rx_mode = TG3_DEF_RX_MODE;
12388         tp->tx_mode = TG3_DEF_TX_MODE;
12389         tp->mi_mode = MAC_MI_MODE_BASE;
12390         if (tg3_debug > 0)
12391                 tp->msg_enable = tg3_debug;
12392         else
12393                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12394
12395         /* The word/byte swap controls here control register access byte
12396          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12397          * setting below.
12398          */
12399         tp->misc_host_ctrl =
12400                 MISC_HOST_CTRL_MASK_PCI_INT |
12401                 MISC_HOST_CTRL_WORD_SWAP |
12402                 MISC_HOST_CTRL_INDIR_ACCESS |
12403                 MISC_HOST_CTRL_PCISTATE_RW;
12404
12405         /* The NONFRM (non-frame) byte/word swap controls take effect
12406          * on descriptor entries, anything which isn't packet data.
12407          *
12408          * The StrongARM chips on the board (one for tx, one for rx)
12409          * are running in big-endian mode.
12410          */
12411         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12412                         GRC_MODE_WSWAP_NONFRM_DATA);
12413 #ifdef __BIG_ENDIAN
12414         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12415 #endif
12416         spin_lock_init(&tp->lock);
12417         spin_lock_init(&tp->indirect_lock);
12418         INIT_WORK(&tp->reset_task, tg3_reset_task);
12419
12420         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12421         if (!tp->regs) {
12422                 printk(KERN_ERR PFX "Cannot map device registers, "
12423                        "aborting.\n");
12424                 err = -ENOMEM;
12425                 goto err_out_free_dev;
12426         }
12427
12428         tg3_init_link_config(tp);
12429
12430         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12431         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12432         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12433
12434         dev->open = tg3_open;
12435         dev->stop = tg3_close;
12436         dev->get_stats = tg3_get_stats;
12437         dev->set_multicast_list = tg3_set_rx_mode;
12438         dev->set_mac_address = tg3_set_mac_addr;
12439         dev->do_ioctl = tg3_ioctl;
12440         dev->tx_timeout = tg3_tx_timeout;
12441         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12442         dev->ethtool_ops = &tg3_ethtool_ops;
12443         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12444         dev->change_mtu = tg3_change_mtu;
12445         dev->irq = pdev->irq;
12446 #ifdef CONFIG_NET_POLL_CONTROLLER
12447         dev->poll_controller = tg3_poll_controller;
12448 #endif
12449
12450         err = tg3_get_invariants(tp);
12451         if (err) {
12452                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12453                        "aborting.\n");
12454                 goto err_out_iounmap;
12455         }
12456
12457         /* The EPB bridge inside 5714, 5715, and 5780 and any
12458          * device behind the EPB cannot support DMA addresses > 40-bit.
12459          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12460          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12461          * do DMA address check in tg3_start_xmit().
12462          */
12463         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12464                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12465         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12466                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12467 #ifdef CONFIG_HIGHMEM
12468                 dma_mask = DMA_64BIT_MASK;
12469 #endif
12470         } else
12471                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12472
12473         /* Configure DMA attributes. */
12474         if (dma_mask > DMA_32BIT_MASK) {
12475                 err = pci_set_dma_mask(pdev, dma_mask);
12476                 if (!err) {
12477                         dev->features |= NETIF_F_HIGHDMA;
12478                         err = pci_set_consistent_dma_mask(pdev,
12479                                                           persist_dma_mask);
12480                         if (err < 0) {
12481                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12482                                        "DMA for consistent allocations\n");
12483                                 goto err_out_iounmap;
12484                         }
12485                 }
12486         }
12487         if (err || dma_mask == DMA_32BIT_MASK) {
12488                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12489                 if (err) {
12490                         printk(KERN_ERR PFX "No usable DMA configuration, "
12491                                "aborting.\n");
12492                         goto err_out_iounmap;
12493                 }
12494         }
12495
12496         tg3_init_bufmgr_config(tp);
12497
12498         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12499                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12500         }
12501         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12503             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12504             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12505             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12506                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12507         } else {
12508                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12509         }
12510
12511         /* TSO is on by default on chips that support hardware TSO.
12512          * Firmware TSO on older chips gives lower performance, so it
12513          * is off by default, but can be enabled using ethtool.
12514          */
12515         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12516                 dev->features |= NETIF_F_TSO;
12517                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12518                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12519                         dev->features |= NETIF_F_TSO6;
12520                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12521                         dev->features |= NETIF_F_TSO_ECN;
12522         }
12523
12524
12525         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12526             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12527             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12528                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12529                 tp->rx_pending = 63;
12530         }
12531
12532         err = tg3_get_device_address(tp);
12533         if (err) {
12534                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12535                        "aborting.\n");
12536                 goto err_out_iounmap;
12537         }
12538
12539         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12540                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12541                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12542                                "base address for APE, aborting.\n");
12543                         err = -ENODEV;
12544                         goto err_out_iounmap;
12545                 }
12546
12547                 tg3reg_base = pci_resource_start(pdev, 2);
12548                 tg3reg_len = pci_resource_len(pdev, 2);
12549
12550                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12551                 if (tp->aperegs == 0UL) {
12552                         printk(KERN_ERR PFX "Cannot map APE registers, "
12553                                "aborting.\n");
12554                         err = -ENOMEM;
12555                         goto err_out_iounmap;
12556                 }
12557
12558                 tg3_ape_lock_init(tp);
12559         }
12560
12561         /*
12562          * Reset chip in case UNDI or EFI driver did not shutdown
12563          * DMA self test will enable WDMAC and we'll see (spurious)
12564          * pending DMA on the PCI bus at that point.
12565          */
12566         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12567             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12568                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12569                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12570         }
12571
12572         err = tg3_test_dma(tp);
12573         if (err) {
12574                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12575                 goto err_out_apeunmap;
12576         }
12577
12578         /* Tigon3 can do ipv4 only... and some chips have buggy
12579          * checksumming.
12580          */
12581         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12582                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12583                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12584                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12585                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12586                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12587                         dev->features |= NETIF_F_IPV6_CSUM;
12588
12589                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12590         } else
12591                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12592
12593         /* flow control autonegotiation is default behavior */
12594         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12595
12596         tg3_init_coal(tp);
12597
12598         pci_set_drvdata(pdev, dev);
12599
12600         err = register_netdev(dev);
12601         if (err) {
12602                 printk(KERN_ERR PFX "Cannot register net device, "
12603                        "aborting.\n");
12604                 goto err_out_apeunmap;
12605         }
12606
12607         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12608                dev->name,
12609                tp->board_part_number,
12610                tp->pci_chip_rev_id,
12611                tg3_phy_string(tp),
12612                tg3_bus_string(tp, str),
12613                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12614                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12615                  "10/100/1000Base-T")));
12616
12617         for (i = 0; i < 6; i++)
12618                 printk("%2.2x%c", dev->dev_addr[i],
12619                        i == 5 ? '\n' : ':');
12620
12621         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12622                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12623                dev->name,
12624                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12625                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12626                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12627                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12628                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12629                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12630         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12631                dev->name, tp->dma_rwctrl,
12632                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12633                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12634
12635         return 0;
12636
12637 err_out_apeunmap:
12638         if (tp->aperegs) {
12639                 iounmap(tp->aperegs);
12640                 tp->aperegs = NULL;
12641         }
12642
12643 err_out_iounmap:
12644         if (tp->regs) {
12645                 iounmap(tp->regs);
12646                 tp->regs = NULL;
12647         }
12648
12649 err_out_free_dev:
12650         free_netdev(dev);
12651
12652 err_out_free_res:
12653         pci_release_regions(pdev);
12654
12655 err_out_disable_pdev:
12656         pci_disable_device(pdev);
12657         pci_set_drvdata(pdev, NULL);
12658         return err;
12659 }
12660
12661 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12662 {
12663         struct net_device *dev = pci_get_drvdata(pdev);
12664
12665         if (dev) {
12666                 struct tg3 *tp = netdev_priv(dev);
12667
12668                 flush_scheduled_work();
12669                 unregister_netdev(dev);
12670                 if (tp->aperegs) {
12671                         iounmap(tp->aperegs);
12672                         tp->aperegs = NULL;
12673                 }
12674                 if (tp->regs) {
12675                         iounmap(tp->regs);
12676                         tp->regs = NULL;
12677                 }
12678                 free_netdev(dev);
12679                 pci_release_regions(pdev);
12680                 pci_disable_device(pdev);
12681                 pci_set_drvdata(pdev, NULL);
12682         }
12683 }
12684
12685 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12686 {
12687         struct net_device *dev = pci_get_drvdata(pdev);
12688         struct tg3 *tp = netdev_priv(dev);
12689         int err;
12690
12691         /* PCI register 4 needs to be saved whether netif_running() or not.
12692          * MSI address and data need to be saved if using MSI and
12693          * netif_running().
12694          */
12695         pci_save_state(pdev);
12696
12697         if (!netif_running(dev))
12698                 return 0;
12699
12700         flush_scheduled_work();
12701         tg3_netif_stop(tp);
12702
12703         del_timer_sync(&tp->timer);
12704
12705         tg3_full_lock(tp, 1);
12706         tg3_disable_ints(tp);
12707         tg3_full_unlock(tp);
12708
12709         netif_device_detach(dev);
12710
12711         tg3_full_lock(tp, 0);
12712         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12713         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12714         tg3_full_unlock(tp);
12715
12716         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12717         if (err) {
12718                 tg3_full_lock(tp, 0);
12719
12720                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12721                 if (tg3_restart_hw(tp, 1))
12722                         goto out;
12723
12724                 tp->timer.expires = jiffies + tp->timer_offset;
12725                 add_timer(&tp->timer);
12726
12727                 netif_device_attach(dev);
12728                 tg3_netif_start(tp);
12729
12730 out:
12731                 tg3_full_unlock(tp);
12732         }
12733
12734         return err;
12735 }
12736
12737 static int tg3_resume(struct pci_dev *pdev)
12738 {
12739         struct net_device *dev = pci_get_drvdata(pdev);
12740         struct tg3 *tp = netdev_priv(dev);
12741         int err;
12742
12743         pci_restore_state(tp->pdev);
12744
12745         if (!netif_running(dev))
12746                 return 0;
12747
12748         err = tg3_set_power_state(tp, PCI_D0);
12749         if (err)
12750                 return err;
12751
12752         netif_device_attach(dev);
12753
12754         tg3_full_lock(tp, 0);
12755
12756         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12757         err = tg3_restart_hw(tp, 1);
12758         if (err)
12759                 goto out;
12760
12761         tp->timer.expires = jiffies + tp->timer_offset;
12762         add_timer(&tp->timer);
12763
12764         tg3_netif_start(tp);
12765
12766 out:
12767         tg3_full_unlock(tp);
12768
12769         return err;
12770 }
12771
12772 static struct pci_driver tg3_driver = {
12773         .name           = DRV_MODULE_NAME,
12774         .id_table       = tg3_pci_tbl,
12775         .probe          = tg3_init_one,
12776         .remove         = __devexit_p(tg3_remove_one),
12777         .suspend        = tg3_suspend,
12778         .resume         = tg3_resume
12779 };
12780
12781 static int __init tg3_init(void)
12782 {
12783         return pci_register_driver(&tg3_driver);
12784 }
12785
12786 static void __exit tg3_cleanup(void)
12787 {
12788         pci_unregister_driver(&tg3_driver);
12789 }
12790
12791 module_init(tg3_init);
12792 module_exit(tg3_cleanup);