]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Add 1000T & 1000X flowctl adv helpers
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.86"
68 #define DRV_MODULE_RELDATE      "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110                 u32 val;
1111
1112                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1115                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116                         udelay(40);
1117                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118                 }
1119
1120                 /* Disable GPHY autopowerdown. */
1121                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122                              MII_TG3_MISC_SHDW_WREN |
1123                              MII_TG3_MISC_SHDW_APD_SEL |
1124                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125         }
1126
1127 out:
1128         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135         }
1136         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137                 tg3_writephy(tp, 0x1c, 0x8d68);
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139         }
1140         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149         }
1150         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155                         tg3_writephy(tp, MII_TG3_TEST1,
1156                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1157                 } else
1158                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160         }
1161         /* Set Extended packet length bit (bit 14) on all chips that */
1162         /* support jumbo frames */
1163         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164                 /* Cannot do read-modify-write on 5401 */
1165                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167                 u32 phy_reg;
1168
1169                 /* Set bit 14 with read-modify-write to preserve other bits */
1170                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173         }
1174
1175         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176          * jumbo frames transmission.
1177          */
1178         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179                 u32 phy_reg;
1180
1181                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184         }
1185
1186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187                 /* adjust output voltage */
1188                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189         }
1190
1191         tg3_phy_toggle_automdix(tp, 1);
1192         tg3_phy_set_wirespeed(tp);
1193         return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198         struct tg3 *tp_peer = tp;
1199
1200         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201                 return;
1202
1203         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205                 struct net_device *dev_peer;
1206
1207                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208                 /* remove_one() may have been run on the peer. */
1209                 if (!dev_peer)
1210                         tp_peer = tp;
1211                 else
1212                         tp_peer = netdev_priv(dev_peer);
1213         }
1214
1215         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                     (GRC_LCLCTRL_GPIO_OE0 |
1223                                      GRC_LCLCTRL_GPIO_OE1 |
1224                                      GRC_LCLCTRL_GPIO_OE2 |
1225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1227                                     100);
1228                 } else {
1229                         u32 no_gpio2;
1230                         u32 grc_local_ctrl = 0;
1231
1232                         if (tp_peer != tp &&
1233                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234                                 return;
1235
1236                         /* Workaround to prevent overdrawing Amps. */
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238                             ASIC_REV_5714) {
1239                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241                                             grc_local_ctrl, 100);
1242                         }
1243
1244                         /* On 5753 and variants, GPIO2 cannot be used. */
1245                         no_gpio2 = tp->nic_sram_data_cfg &
1246                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249                                          GRC_LCLCTRL_GPIO_OE1 |
1250                                          GRC_LCLCTRL_GPIO_OE2 |
1251                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1253                         if (no_gpio2) {
1254                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1256                         }
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                                     grc_local_ctrl, 100);
1264
1265                         if (!no_gpio2) {
1266                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                             grc_local_ctrl, 100);
1269                         }
1270                 }
1271         } else {
1272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274                         if (tp_peer != tp &&
1275                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276                                 return;
1277
1278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279                                     (GRC_LCLCTRL_GPIO_OE1 |
1280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283                                     GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286                                     (GRC_LCLCTRL_GPIO_OE1 |
1287                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288                 }
1289         }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295                 return 1;
1296         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297                 if (speed != SPEED_10)
1298                         return 1;
1299         } else if (speed == SPEED_10)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN     0
1308 #define RESET_KIND_INIT         1
1309 #define RESET_KIND_SUSPEND      2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318         u32 val;
1319
1320         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325                         sg_dig_ctrl |=
1326                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329                 }
1330                 return;
1331         }
1332
1333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334                 tg3_bmcr_reset(tp);
1335                 val = tr32(GRC_MISC_CFG);
1336                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337                 udelay(40);
1338                 return;
1339         } else {
1340                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343         }
1344
1345         /* The PHY should not be powered down on some chips because
1346          * of bugs.
1347          */
1348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352                 return;
1353
1354         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359         }
1360
1361         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366         u32 misc_host_ctrl;
1367         u16 power_control, power_caps;
1368         int pm = tp->pm_cap;
1369
1370         /* Make sure register accesses (indirect or otherwise)
1371          * will function correctly.
1372          */
1373         pci_write_config_dword(tp->pdev,
1374                                TG3PCI_MISC_HOST_CTRL,
1375                                tp->misc_host_ctrl);
1376
1377         pci_read_config_word(tp->pdev,
1378                              pm + PCI_PM_CTRL,
1379                              &power_control);
1380         power_control |= PCI_PM_CTRL_PME_STATUS;
1381         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382         switch (state) {
1383         case PCI_D0:
1384                 power_control |= 0;
1385                 pci_write_config_word(tp->pdev,
1386                                       pm + PCI_PM_CTRL,
1387                                       power_control);
1388                 udelay(100);    /* Delay after power state change */
1389
1390                 /* Switch out of Vaux if it is a NIC */
1391                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394                 return 0;
1395
1396         case PCI_D1:
1397                 power_control |= 1;
1398                 break;
1399
1400         case PCI_D2:
1401                 power_control |= 2;
1402                 break;
1403
1404         case PCI_D3hot:
1405                 power_control |= 3;
1406                 break;
1407
1408         default:
1409                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410                        "requested.\n",
1411                        tp->dev->name, state);
1412                 return -EINVAL;
1413         };
1414
1415         power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418         tw32(TG3PCI_MISC_HOST_CTRL,
1419              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421         if (tp->link_config.phy_is_low_power == 0) {
1422                 tp->link_config.phy_is_low_power = 1;
1423                 tp->link_config.orig_speed = tp->link_config.speed;
1424                 tp->link_config.orig_duplex = tp->link_config.duplex;
1425                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426         }
1427
1428         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429                 tp->link_config.speed = SPEED_10;
1430                 tp->link_config.duplex = DUPLEX_HALF;
1431                 tp->link_config.autoneg = AUTONEG_ENABLE;
1432                 tg3_setup_phy(tp, 0);
1433         }
1434
1435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436                 u32 val;
1437
1438                 val = tr32(GRC_VCPU_EXT_CTRL);
1439                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441                 int i;
1442                 u32 val;
1443
1444                 for (i = 0; i < 200; i++) {
1445                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447                                 break;
1448                         msleep(1);
1449                 }
1450         }
1451         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453                                                      WOL_DRV_STATE_SHUTDOWN |
1454                                                      WOL_DRV_WOL |
1455                                                      WOL_SET_MAGIC_PKT);
1456
1457         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460                 u32 mac_mode;
1461
1462                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464                         udelay(40);
1465
1466                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468                         else
1469                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473                             ASIC_REV_5700) {
1474                                 u32 speed = (tp->tg3_flags &
1475                                              TG3_FLAG_WOL_SPEED_100MB) ?
1476                                              SPEED_100 : SPEED_10;
1477                                 if (tg3_5700_link_polarity(tp, speed))
1478                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1479                                 else
1480                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481                         }
1482                 } else {
1483                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1484                 }
1485
1486                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493                 tw32_f(MAC_MODE, mac_mode);
1494                 udelay(100);
1495
1496                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497                 udelay(10);
1498         }
1499
1500         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503                 u32 base_val;
1504
1505                 base_val = tp->pci_clock_ctrl;
1506                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507                              CLOCK_CTRL_TXCLK_DISABLE);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514                 /* do nothing */
1515         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517                 u32 newbits1, newbits2;
1518
1519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522                                     CLOCK_CTRL_TXCLK_DISABLE |
1523                                     CLOCK_CTRL_ALTCLK);
1524                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526                         newbits1 = CLOCK_CTRL_625_CORE;
1527                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528                 } else {
1529                         newbits1 = CLOCK_CTRL_ALTCLK;
1530                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531                 }
1532
1533                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534                             40);
1535
1536                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537                             40);
1538
1539                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540                         u32 newbits3;
1541
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545                                             CLOCK_CTRL_TXCLK_DISABLE |
1546                                             CLOCK_CTRL_44MHZ_CORE);
1547                         } else {
1548                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549                         }
1550
1551                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552                                     tp->pci_clock_ctrl | newbits3, 40);
1553                 }
1554         }
1555
1556         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559                 tg3_power_down_phy(tp);
1560
1561         tg3_frob_aux_power(tp);
1562
1563         /* Workaround for unstable PLL clock */
1564         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566                 u32 val = tr32(0x7d00);
1567
1568                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569                 tw32(0x7d00, val);
1570                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571                         int err;
1572
1573                         err = tg3_nvram_lock(tp);
1574                         tg3_halt_cpu(tp, RX_CPU_BASE);
1575                         if (!err)
1576                                 tg3_nvram_unlock(tp);
1577                 }
1578         }
1579
1580         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582         /* Finally, set the new power state. */
1583         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584         udelay(100);    /* Delay after power state change */
1585
1586         return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591         if (!netif_carrier_ok(tp->dev)) {
1592                 if (netif_msg_link(tp))
1593                         printk(KERN_INFO PFX "%s: Link is down.\n",
1594                                tp->dev->name);
1595         } else if (netif_msg_link(tp)) {
1596                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597                        tp->dev->name,
1598                        (tp->link_config.active_speed == SPEED_1000 ?
1599                         1000 :
1600                         (tp->link_config.active_speed == SPEED_100 ?
1601                          100 : 10)),
1602                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1603                         "full" : "half"));
1604
1605                 printk(KERN_INFO PFX
1606                        "%s: Flow control is %s for TX and %s for RX.\n",
1607                        tp->dev->name,
1608                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609                        "on" : "off",
1610                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611                        "on" : "off");
1612         }
1613 }
1614
1615 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1616 {
1617         u16 miireg;
1618
1619         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1620                 miireg = ADVERTISE_PAUSE_CAP;
1621         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1622                 miireg = ADVERTISE_PAUSE_ASYM;
1623         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1624                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1625         else
1626                 miireg = 0;
1627
1628         return miireg;
1629 }
1630
1631 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1632 {
1633         u16 miireg;
1634
1635         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1636                 miireg = ADVERTISE_1000XPAUSE;
1637         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1638                 miireg = ADVERTISE_1000XPSE_ASYM;
1639         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1640                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1641         else
1642                 miireg = 0;
1643
1644         return miireg;
1645 }
1646
1647 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1648 {
1649         u8 cap = 0;
1650
1651         if (lcladv & ADVERTISE_PAUSE_CAP) {
1652                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1653                         if (rmtadv & LPA_PAUSE_CAP)
1654                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1655                         else if (rmtadv & LPA_PAUSE_ASYM)
1656                                 cap = TG3_FLOW_CTRL_RX;
1657                 } else {
1658                         if (rmtadv & LPA_PAUSE_CAP)
1659                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1660                 }
1661         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1662                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1663                         cap = TG3_FLOW_CTRL_TX;
1664         }
1665
1666         return cap;
1667 }
1668
1669 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1670 {
1671         u8 cap = 0;
1672
1673         if (lcladv & ADVERTISE_1000XPAUSE) {
1674                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1675                         if (rmtadv & LPA_1000XPAUSE)
1676                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1677                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1678                                 cap = TG3_FLOW_CTRL_RX;
1679                 } else {
1680                         if (rmtadv & LPA_1000XPAUSE)
1681                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1682                 }
1683         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1684                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1685                         cap = TG3_FLOW_CTRL_TX;
1686         }
1687
1688         return cap;
1689 }
1690
1691 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1692 {
1693         u8 new_tg3_flags = 0;
1694         u32 old_rx_mode = tp->rx_mode;
1695         u32 old_tx_mode = tp->tx_mode;
1696
1697         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1698                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1699                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1700                                                                    remote_adv);
1701                 else
1702                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1703                                                                    remote_adv);
1704         } else {
1705                 new_tg3_flags = tp->link_config.flowctrl;
1706         }
1707
1708         tp->link_config.active_flowctrl = new_tg3_flags;
1709
1710         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1711                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1712         else
1713                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1714
1715         if (old_rx_mode != tp->rx_mode) {
1716                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1717         }
1718
1719         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1720                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1721         else
1722                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1723
1724         if (old_tx_mode != tp->tx_mode) {
1725                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1726         }
1727 }
1728
1729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1730 {
1731         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1732         case MII_TG3_AUX_STAT_10HALF:
1733                 *speed = SPEED_10;
1734                 *duplex = DUPLEX_HALF;
1735                 break;
1736
1737         case MII_TG3_AUX_STAT_10FULL:
1738                 *speed = SPEED_10;
1739                 *duplex = DUPLEX_FULL;
1740                 break;
1741
1742         case MII_TG3_AUX_STAT_100HALF:
1743                 *speed = SPEED_100;
1744                 *duplex = DUPLEX_HALF;
1745                 break;
1746
1747         case MII_TG3_AUX_STAT_100FULL:
1748                 *speed = SPEED_100;
1749                 *duplex = DUPLEX_FULL;
1750                 break;
1751
1752         case MII_TG3_AUX_STAT_1000HALF:
1753                 *speed = SPEED_1000;
1754                 *duplex = DUPLEX_HALF;
1755                 break;
1756
1757         case MII_TG3_AUX_STAT_1000FULL:
1758                 *speed = SPEED_1000;
1759                 *duplex = DUPLEX_FULL;
1760                 break;
1761
1762         default:
1763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1764                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1765                                  SPEED_10;
1766                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1767                                   DUPLEX_HALF;
1768                         break;
1769                 }
1770                 *speed = SPEED_INVALID;
1771                 *duplex = DUPLEX_INVALID;
1772                 break;
1773         };
1774 }
1775
1776 static void tg3_phy_copper_begin(struct tg3 *tp)
1777 {
1778         u32 new_adv;
1779         int i;
1780
1781         if (tp->link_config.phy_is_low_power) {
1782                 /* Entering low power mode.  Disable gigabit and
1783                  * 100baseT advertisements.
1784                  */
1785                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1786
1787                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1788                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1789                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1790                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1791
1792                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1793         } else if (tp->link_config.speed == SPEED_INVALID) {
1794                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1795                         tp->link_config.advertising &=
1796                                 ~(ADVERTISED_1000baseT_Half |
1797                                   ADVERTISED_1000baseT_Full);
1798
1799                 new_adv = ADVERTISE_CSMA;
1800                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1801                         new_adv |= ADVERTISE_10HALF;
1802                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1803                         new_adv |= ADVERTISE_10FULL;
1804                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1805                         new_adv |= ADVERTISE_100HALF;
1806                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1807                         new_adv |= ADVERTISE_100FULL;
1808
1809                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1810
1811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1812
1813                 if (tp->link_config.advertising &
1814                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1815                         new_adv = 0;
1816                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1817                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1818                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1819                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1820                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1821                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1822                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1823                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1824                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1825                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1826                 } else {
1827                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1828                 }
1829         } else {
1830                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1831                 new_adv |= ADVERTISE_CSMA;
1832
1833                 /* Asking for a specific link mode. */
1834                 if (tp->link_config.speed == SPEED_1000) {
1835                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1836
1837                         if (tp->link_config.duplex == DUPLEX_FULL)
1838                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1839                         else
1840                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1841                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1842                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1843                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1844                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1845                 } else {
1846                         if (tp->link_config.speed == SPEED_100) {
1847                                 if (tp->link_config.duplex == DUPLEX_FULL)
1848                                         new_adv |= ADVERTISE_100FULL;
1849                                 else
1850                                         new_adv |= ADVERTISE_100HALF;
1851                         } else {
1852                                 if (tp->link_config.duplex == DUPLEX_FULL)
1853                                         new_adv |= ADVERTISE_10FULL;
1854                                 else
1855                                         new_adv |= ADVERTISE_10HALF;
1856                         }
1857                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1858
1859                         new_adv = 0;
1860                 }
1861
1862                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1863         }
1864
1865         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1866             tp->link_config.speed != SPEED_INVALID) {
1867                 u32 bmcr, orig_bmcr;
1868
1869                 tp->link_config.active_speed = tp->link_config.speed;
1870                 tp->link_config.active_duplex = tp->link_config.duplex;
1871
1872                 bmcr = 0;
1873                 switch (tp->link_config.speed) {
1874                 default:
1875                 case SPEED_10:
1876                         break;
1877
1878                 case SPEED_100:
1879                         bmcr |= BMCR_SPEED100;
1880                         break;
1881
1882                 case SPEED_1000:
1883                         bmcr |= TG3_BMCR_SPEED1000;
1884                         break;
1885                 };
1886
1887                 if (tp->link_config.duplex == DUPLEX_FULL)
1888                         bmcr |= BMCR_FULLDPLX;
1889
1890                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1891                     (bmcr != orig_bmcr)) {
1892                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1893                         for (i = 0; i < 1500; i++) {
1894                                 u32 tmp;
1895
1896                                 udelay(10);
1897                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1898                                     tg3_readphy(tp, MII_BMSR, &tmp))
1899                                         continue;
1900                                 if (!(tmp & BMSR_LSTATUS)) {
1901                                         udelay(40);
1902                                         break;
1903                                 }
1904                         }
1905                         tg3_writephy(tp, MII_BMCR, bmcr);
1906                         udelay(40);
1907                 }
1908         } else {
1909                 tg3_writephy(tp, MII_BMCR,
1910                              BMCR_ANENABLE | BMCR_ANRESTART);
1911         }
1912 }
1913
1914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1915 {
1916         int err;
1917
1918         /* Turn off tap power management. */
1919         /* Set Extended packet length bit */
1920         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1921
1922         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1923         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1924
1925         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1926         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1927
1928         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1929         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1930
1931         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1932         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1933
1934         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1935         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1936
1937         udelay(40);
1938
1939         return err;
1940 }
1941
1942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1943 {
1944         u32 adv_reg, all_mask = 0;
1945
1946         if (mask & ADVERTISED_10baseT_Half)
1947                 all_mask |= ADVERTISE_10HALF;
1948         if (mask & ADVERTISED_10baseT_Full)
1949                 all_mask |= ADVERTISE_10FULL;
1950         if (mask & ADVERTISED_100baseT_Half)
1951                 all_mask |= ADVERTISE_100HALF;
1952         if (mask & ADVERTISED_100baseT_Full)
1953                 all_mask |= ADVERTISE_100FULL;
1954
1955         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1956                 return 0;
1957
1958         if ((adv_reg & all_mask) != all_mask)
1959                 return 0;
1960         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1961                 u32 tg3_ctrl;
1962
1963                 all_mask = 0;
1964                 if (mask & ADVERTISED_1000baseT_Half)
1965                         all_mask |= ADVERTISE_1000HALF;
1966                 if (mask & ADVERTISED_1000baseT_Full)
1967                         all_mask |= ADVERTISE_1000FULL;
1968
1969                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1970                         return 0;
1971
1972                 if ((tg3_ctrl & all_mask) != all_mask)
1973                         return 0;
1974         }
1975         return 1;
1976 }
1977
1978 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1979 {
1980         int current_link_up;
1981         u32 bmsr, dummy;
1982         u16 current_speed;
1983         u8 current_duplex;
1984         int i, err;
1985
1986         tw32(MAC_EVENT, 0);
1987
1988         tw32_f(MAC_STATUS,
1989              (MAC_STATUS_SYNC_CHANGED |
1990               MAC_STATUS_CFG_CHANGED |
1991               MAC_STATUS_MI_COMPLETION |
1992               MAC_STATUS_LNKSTATE_CHANGED));
1993         udelay(40);
1994
1995         tp->mi_mode = MAC_MI_MODE_BASE;
1996         tw32_f(MAC_MI_MODE, tp->mi_mode);
1997         udelay(80);
1998
1999         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2000
2001         /* Some third-party PHYs need to be reset on link going
2002          * down.
2003          */
2004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2005              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2006              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2007             netif_carrier_ok(tp->dev)) {
2008                 tg3_readphy(tp, MII_BMSR, &bmsr);
2009                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2010                     !(bmsr & BMSR_LSTATUS))
2011                         force_reset = 1;
2012         }
2013         if (force_reset)
2014                 tg3_phy_reset(tp);
2015
2016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2019                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2020                         bmsr = 0;
2021
2022                 if (!(bmsr & BMSR_LSTATUS)) {
2023                         err = tg3_init_5401phy_dsp(tp);
2024                         if (err)
2025                                 return err;
2026
2027                         tg3_readphy(tp, MII_BMSR, &bmsr);
2028                         for (i = 0; i < 1000; i++) {
2029                                 udelay(10);
2030                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2031                                     (bmsr & BMSR_LSTATUS)) {
2032                                         udelay(40);
2033                                         break;
2034                                 }
2035                         }
2036
2037                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2038                             !(bmsr & BMSR_LSTATUS) &&
2039                             tp->link_config.active_speed == SPEED_1000) {
2040                                 err = tg3_phy_reset(tp);
2041                                 if (!err)
2042                                         err = tg3_init_5401phy_dsp(tp);
2043                                 if (err)
2044                                         return err;
2045                         }
2046                 }
2047         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2048                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2049                 /* 5701 {A0,B0} CRC bug workaround */
2050                 tg3_writephy(tp, 0x15, 0x0a75);
2051                 tg3_writephy(tp, 0x1c, 0x8c68);
2052                 tg3_writephy(tp, 0x1c, 0x8d68);
2053                 tg3_writephy(tp, 0x1c, 0x8c68);
2054         }
2055
2056         /* Clear pending interrupts... */
2057         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2058         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2059
2060         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2061                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2062         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2063                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2064
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2067                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2068                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2069                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2070                 else
2071                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2072         }
2073
2074         current_link_up = 0;
2075         current_speed = SPEED_INVALID;
2076         current_duplex = DUPLEX_INVALID;
2077
2078         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2079                 u32 val;
2080
2081                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2082                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2083                 if (!(val & (1 << 10))) {
2084                         val |= (1 << 10);
2085                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2086                         goto relink;
2087                 }
2088         }
2089
2090         bmsr = 0;
2091         for (i = 0; i < 100; i++) {
2092                 tg3_readphy(tp, MII_BMSR, &bmsr);
2093                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2094                     (bmsr & BMSR_LSTATUS))
2095                         break;
2096                 udelay(40);
2097         }
2098
2099         if (bmsr & BMSR_LSTATUS) {
2100                 u32 aux_stat, bmcr;
2101
2102                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2103                 for (i = 0; i < 2000; i++) {
2104                         udelay(10);
2105                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2106                             aux_stat)
2107                                 break;
2108                 }
2109
2110                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2111                                              &current_speed,
2112                                              &current_duplex);
2113
2114                 bmcr = 0;
2115                 for (i = 0; i < 200; i++) {
2116                         tg3_readphy(tp, MII_BMCR, &bmcr);
2117                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2118                                 continue;
2119                         if (bmcr && bmcr != 0x7fff)
2120                                 break;
2121                         udelay(10);
2122                 }
2123
2124                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2125                         if (bmcr & BMCR_ANENABLE) {
2126                                 current_link_up = 1;
2127
2128                                 /* Force autoneg restart if we are exiting
2129                                  * low power mode.
2130                                  */
2131                                 if (!tg3_copper_is_advertising_all(tp,
2132                                                 tp->link_config.advertising))
2133                                         current_link_up = 0;
2134                         } else {
2135                                 current_link_up = 0;
2136                         }
2137                 } else {
2138                         if (!(bmcr & BMCR_ANENABLE) &&
2139                             tp->link_config.speed == current_speed &&
2140                             tp->link_config.duplex == current_duplex) {
2141                                 current_link_up = 1;
2142                         } else {
2143                                 current_link_up = 0;
2144                         }
2145                 }
2146
2147                 tp->link_config.active_speed = current_speed;
2148                 tp->link_config.active_duplex = current_duplex;
2149         }
2150
2151         if (current_link_up == 1 &&
2152             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2153             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2154                 u32 local_adv, remote_adv;
2155
2156                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2157                         local_adv = 0;
2158
2159                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2160                         remote_adv = 0;
2161
2162                 /* If we are not advertising what has been requested,
2163                  * bring the link down and reconfigure.
2164                  */
2165                 if (local_adv !=
2166                     tg3_advert_flowctrl_1000T(tp->link_config.flowctrl)) {
2167                         current_link_up = 0;
2168                 } else {
2169                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2170                 }
2171         }
2172 relink:
2173         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2174                 u32 tmp;
2175
2176                 tg3_phy_copper_begin(tp);
2177
2178                 tg3_readphy(tp, MII_BMSR, &tmp);
2179                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2180                     (tmp & BMSR_LSTATUS))
2181                         current_link_up = 1;
2182         }
2183
2184         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2185         if (current_link_up == 1) {
2186                 if (tp->link_config.active_speed == SPEED_100 ||
2187                     tp->link_config.active_speed == SPEED_10)
2188                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2189                 else
2190                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2191         } else
2192                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2193
2194         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2195         if (tp->link_config.active_duplex == DUPLEX_HALF)
2196                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2197
2198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2199                 if (current_link_up == 1 &&
2200                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2201                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2202                 else
2203                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2204         }
2205
2206         /* ??? Without this setting Netgear GA302T PHY does not
2207          * ??? send/receive packets...
2208          */
2209         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2210             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2211                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2212                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2213                 udelay(80);
2214         }
2215
2216         tw32_f(MAC_MODE, tp->mac_mode);
2217         udelay(40);
2218
2219         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2220                 /* Polled via timer. */
2221                 tw32_f(MAC_EVENT, 0);
2222         } else {
2223                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2224         }
2225         udelay(40);
2226
2227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_speed == SPEED_1000 &&
2230             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2231              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2232                 udelay(120);
2233                 tw32_f(MAC_STATUS,
2234                      (MAC_STATUS_SYNC_CHANGED |
2235                       MAC_STATUS_CFG_CHANGED));
2236                 udelay(40);
2237                 tg3_write_mem(tp,
2238                               NIC_SRAM_FIRMWARE_MBOX,
2239                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2240         }
2241
2242         if (current_link_up != netif_carrier_ok(tp->dev)) {
2243                 if (current_link_up)
2244                         netif_carrier_on(tp->dev);
2245                 else
2246                         netif_carrier_off(tp->dev);
2247                 tg3_link_report(tp);
2248         }
2249
2250         return 0;
2251 }
2252
2253 struct tg3_fiber_aneginfo {
2254         int state;
2255 #define ANEG_STATE_UNKNOWN              0
2256 #define ANEG_STATE_AN_ENABLE            1
2257 #define ANEG_STATE_RESTART_INIT         2
2258 #define ANEG_STATE_RESTART              3
2259 #define ANEG_STATE_DISABLE_LINK_OK      4
2260 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2261 #define ANEG_STATE_ABILITY_DETECT       6
2262 #define ANEG_STATE_ACK_DETECT_INIT      7
2263 #define ANEG_STATE_ACK_DETECT           8
2264 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2265 #define ANEG_STATE_COMPLETE_ACK         10
2266 #define ANEG_STATE_IDLE_DETECT_INIT     11
2267 #define ANEG_STATE_IDLE_DETECT          12
2268 #define ANEG_STATE_LINK_OK              13
2269 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2270 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2271
2272         u32 flags;
2273 #define MR_AN_ENABLE            0x00000001
2274 #define MR_RESTART_AN           0x00000002
2275 #define MR_AN_COMPLETE          0x00000004
2276 #define MR_PAGE_RX              0x00000008
2277 #define MR_NP_LOADED            0x00000010
2278 #define MR_TOGGLE_TX            0x00000020
2279 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2280 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2281 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2282 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2283 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2284 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2285 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2286 #define MR_TOGGLE_RX            0x00002000
2287 #define MR_NP_RX                0x00004000
2288
2289 #define MR_LINK_OK              0x80000000
2290
2291         unsigned long link_time, cur_time;
2292
2293         u32 ability_match_cfg;
2294         int ability_match_count;
2295
2296         char ability_match, idle_match, ack_match;
2297
2298         u32 txconfig, rxconfig;
2299 #define ANEG_CFG_NP             0x00000080
2300 #define ANEG_CFG_ACK            0x00000040
2301 #define ANEG_CFG_RF2            0x00000020
2302 #define ANEG_CFG_RF1            0x00000010
2303 #define ANEG_CFG_PS2            0x00000001
2304 #define ANEG_CFG_PS1            0x00008000
2305 #define ANEG_CFG_HD             0x00004000
2306 #define ANEG_CFG_FD             0x00002000
2307 #define ANEG_CFG_INVAL          0x00001f06
2308
2309 };
2310 #define ANEG_OK         0
2311 #define ANEG_DONE       1
2312 #define ANEG_TIMER_ENAB 2
2313 #define ANEG_FAILED     -1
2314
2315 #define ANEG_STATE_SETTLE_TIME  10000
2316
2317 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2318                                    struct tg3_fiber_aneginfo *ap)
2319 {
2320         unsigned long delta;
2321         u32 rx_cfg_reg;
2322         int ret;
2323
2324         if (ap->state == ANEG_STATE_UNKNOWN) {
2325                 ap->rxconfig = 0;
2326                 ap->link_time = 0;
2327                 ap->cur_time = 0;
2328                 ap->ability_match_cfg = 0;
2329                 ap->ability_match_count = 0;
2330                 ap->ability_match = 0;
2331                 ap->idle_match = 0;
2332                 ap->ack_match = 0;
2333         }
2334         ap->cur_time++;
2335
2336         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2337                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2338
2339                 if (rx_cfg_reg != ap->ability_match_cfg) {
2340                         ap->ability_match_cfg = rx_cfg_reg;
2341                         ap->ability_match = 0;
2342                         ap->ability_match_count = 0;
2343                 } else {
2344                         if (++ap->ability_match_count > 1) {
2345                                 ap->ability_match = 1;
2346                                 ap->ability_match_cfg = rx_cfg_reg;
2347                         }
2348                 }
2349                 if (rx_cfg_reg & ANEG_CFG_ACK)
2350                         ap->ack_match = 1;
2351                 else
2352                         ap->ack_match = 0;
2353
2354                 ap->idle_match = 0;
2355         } else {
2356                 ap->idle_match = 1;
2357                 ap->ability_match_cfg = 0;
2358                 ap->ability_match_count = 0;
2359                 ap->ability_match = 0;
2360                 ap->ack_match = 0;
2361
2362                 rx_cfg_reg = 0;
2363         }
2364
2365         ap->rxconfig = rx_cfg_reg;
2366         ret = ANEG_OK;
2367
2368         switch(ap->state) {
2369         case ANEG_STATE_UNKNOWN:
2370                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2371                         ap->state = ANEG_STATE_AN_ENABLE;
2372
2373                 /* fallthru */
2374         case ANEG_STATE_AN_ENABLE:
2375                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2376                 if (ap->flags & MR_AN_ENABLE) {
2377                         ap->link_time = 0;
2378                         ap->cur_time = 0;
2379                         ap->ability_match_cfg = 0;
2380                         ap->ability_match_count = 0;
2381                         ap->ability_match = 0;
2382                         ap->idle_match = 0;
2383                         ap->ack_match = 0;
2384
2385                         ap->state = ANEG_STATE_RESTART_INIT;
2386                 } else {
2387                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2388                 }
2389                 break;
2390
2391         case ANEG_STATE_RESTART_INIT:
2392                 ap->link_time = ap->cur_time;
2393                 ap->flags &= ~(MR_NP_LOADED);
2394                 ap->txconfig = 0;
2395                 tw32(MAC_TX_AUTO_NEG, 0);
2396                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2397                 tw32_f(MAC_MODE, tp->mac_mode);
2398                 udelay(40);
2399
2400                 ret = ANEG_TIMER_ENAB;
2401                 ap->state = ANEG_STATE_RESTART;
2402
2403                 /* fallthru */
2404         case ANEG_STATE_RESTART:
2405                 delta = ap->cur_time - ap->link_time;
2406                 if (delta > ANEG_STATE_SETTLE_TIME) {
2407                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2408                 } else {
2409                         ret = ANEG_TIMER_ENAB;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_DISABLE_LINK_OK:
2414                 ret = ANEG_DONE;
2415                 break;
2416
2417         case ANEG_STATE_ABILITY_DETECT_INIT:
2418                 ap->flags &= ~(MR_TOGGLE_TX);
2419                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2420                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2421                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2422                 tw32_f(MAC_MODE, tp->mac_mode);
2423                 udelay(40);
2424
2425                 ap->state = ANEG_STATE_ABILITY_DETECT;
2426                 break;
2427
2428         case ANEG_STATE_ABILITY_DETECT:
2429                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2430                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2431                 }
2432                 break;
2433
2434         case ANEG_STATE_ACK_DETECT_INIT:
2435                 ap->txconfig |= ANEG_CFG_ACK;
2436                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2437                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2438                 tw32_f(MAC_MODE, tp->mac_mode);
2439                 udelay(40);
2440
2441                 ap->state = ANEG_STATE_ACK_DETECT;
2442
2443                 /* fallthru */
2444         case ANEG_STATE_ACK_DETECT:
2445                 if (ap->ack_match != 0) {
2446                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2447                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2448                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2449                         } else {
2450                                 ap->state = ANEG_STATE_AN_ENABLE;
2451                         }
2452                 } else if (ap->ability_match != 0 &&
2453                            ap->rxconfig == 0) {
2454                         ap->state = ANEG_STATE_AN_ENABLE;
2455                 }
2456                 break;
2457
2458         case ANEG_STATE_COMPLETE_ACK_INIT:
2459                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2460                         ret = ANEG_FAILED;
2461                         break;
2462                 }
2463                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2464                                MR_LP_ADV_HALF_DUPLEX |
2465                                MR_LP_ADV_SYM_PAUSE |
2466                                MR_LP_ADV_ASYM_PAUSE |
2467                                MR_LP_ADV_REMOTE_FAULT1 |
2468                                MR_LP_ADV_REMOTE_FAULT2 |
2469                                MR_LP_ADV_NEXT_PAGE |
2470                                MR_TOGGLE_RX |
2471                                MR_NP_RX);
2472                 if (ap->rxconfig & ANEG_CFG_FD)
2473                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2474                 if (ap->rxconfig & ANEG_CFG_HD)
2475                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2476                 if (ap->rxconfig & ANEG_CFG_PS1)
2477                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2478                 if (ap->rxconfig & ANEG_CFG_PS2)
2479                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2480                 if (ap->rxconfig & ANEG_CFG_RF1)
2481                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2482                 if (ap->rxconfig & ANEG_CFG_RF2)
2483                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2484                 if (ap->rxconfig & ANEG_CFG_NP)
2485                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2486
2487                 ap->link_time = ap->cur_time;
2488
2489                 ap->flags ^= (MR_TOGGLE_TX);
2490                 if (ap->rxconfig & 0x0008)
2491                         ap->flags |= MR_TOGGLE_RX;
2492                 if (ap->rxconfig & ANEG_CFG_NP)
2493                         ap->flags |= MR_NP_RX;
2494                 ap->flags |= MR_PAGE_RX;
2495
2496                 ap->state = ANEG_STATE_COMPLETE_ACK;
2497                 ret = ANEG_TIMER_ENAB;
2498                 break;
2499
2500         case ANEG_STATE_COMPLETE_ACK:
2501                 if (ap->ability_match != 0 &&
2502                     ap->rxconfig == 0) {
2503                         ap->state = ANEG_STATE_AN_ENABLE;
2504                         break;
2505                 }
2506                 delta = ap->cur_time - ap->link_time;
2507                 if (delta > ANEG_STATE_SETTLE_TIME) {
2508                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2509                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2510                         } else {
2511                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2512                                     !(ap->flags & MR_NP_RX)) {
2513                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2514                                 } else {
2515                                         ret = ANEG_FAILED;
2516                                 }
2517                         }
2518                 }
2519                 break;
2520
2521         case ANEG_STATE_IDLE_DETECT_INIT:
2522                 ap->link_time = ap->cur_time;
2523                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2524                 tw32_f(MAC_MODE, tp->mac_mode);
2525                 udelay(40);
2526
2527                 ap->state = ANEG_STATE_IDLE_DETECT;
2528                 ret = ANEG_TIMER_ENAB;
2529                 break;
2530
2531         case ANEG_STATE_IDLE_DETECT:
2532                 if (ap->ability_match != 0 &&
2533                     ap->rxconfig == 0) {
2534                         ap->state = ANEG_STATE_AN_ENABLE;
2535                         break;
2536                 }
2537                 delta = ap->cur_time - ap->link_time;
2538                 if (delta > ANEG_STATE_SETTLE_TIME) {
2539                         /* XXX another gem from the Broadcom driver :( */
2540                         ap->state = ANEG_STATE_LINK_OK;
2541                 }
2542                 break;
2543
2544         case ANEG_STATE_LINK_OK:
2545                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2546                 ret = ANEG_DONE;
2547                 break;
2548
2549         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2550                 /* ??? unimplemented */
2551                 break;
2552
2553         case ANEG_STATE_NEXT_PAGE_WAIT:
2554                 /* ??? unimplemented */
2555                 break;
2556
2557         default:
2558                 ret = ANEG_FAILED;
2559                 break;
2560         };
2561
2562         return ret;
2563 }
2564
2565 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2566 {
2567         int res = 0;
2568         struct tg3_fiber_aneginfo aninfo;
2569         int status = ANEG_FAILED;
2570         unsigned int tick;
2571         u32 tmp;
2572
2573         tw32_f(MAC_TX_AUTO_NEG, 0);
2574
2575         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2576         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2577         udelay(40);
2578
2579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2580         udelay(40);
2581
2582         memset(&aninfo, 0, sizeof(aninfo));
2583         aninfo.flags |= MR_AN_ENABLE;
2584         aninfo.state = ANEG_STATE_UNKNOWN;
2585         aninfo.cur_time = 0;
2586         tick = 0;
2587         while (++tick < 195000) {
2588                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2589                 if (status == ANEG_DONE || status == ANEG_FAILED)
2590                         break;
2591
2592                 udelay(1);
2593         }
2594
2595         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2596         tw32_f(MAC_MODE, tp->mac_mode);
2597         udelay(40);
2598
2599         *flags = aninfo.flags;
2600
2601         if (status == ANEG_DONE &&
2602             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2603                              MR_LP_ADV_FULL_DUPLEX)))
2604                 res = 1;
2605
2606         return res;
2607 }
2608
2609 static void tg3_init_bcm8002(struct tg3 *tp)
2610 {
2611         u32 mac_status = tr32(MAC_STATUS);
2612         int i;
2613
2614         /* Reset when initting first time or we have a link. */
2615         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2616             !(mac_status & MAC_STATUS_PCS_SYNCED))
2617                 return;
2618
2619         /* Set PLL lock range. */
2620         tg3_writephy(tp, 0x16, 0x8007);
2621
2622         /* SW reset */
2623         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2624
2625         /* Wait for reset to complete. */
2626         /* XXX schedule_timeout() ... */
2627         for (i = 0; i < 500; i++)
2628                 udelay(10);
2629
2630         /* Config mode; select PMA/Ch 1 regs. */
2631         tg3_writephy(tp, 0x10, 0x8411);
2632
2633         /* Enable auto-lock and comdet, select txclk for tx. */
2634         tg3_writephy(tp, 0x11, 0x0a10);
2635
2636         tg3_writephy(tp, 0x18, 0x00a0);
2637         tg3_writephy(tp, 0x16, 0x41ff);
2638
2639         /* Assert and deassert POR. */
2640         tg3_writephy(tp, 0x13, 0x0400);
2641         udelay(40);
2642         tg3_writephy(tp, 0x13, 0x0000);
2643
2644         tg3_writephy(tp, 0x11, 0x0a50);
2645         udelay(40);
2646         tg3_writephy(tp, 0x11, 0x0a10);
2647
2648         /* Wait for signal to stabilize */
2649         /* XXX schedule_timeout() ... */
2650         for (i = 0; i < 15000; i++)
2651                 udelay(10);
2652
2653         /* Deselect the channel register so we can read the PHYID
2654          * later.
2655          */
2656         tg3_writephy(tp, 0x10, 0x8011);
2657 }
2658
2659 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2660 {
2661         u32 sg_dig_ctrl, sg_dig_status;
2662         u32 serdes_cfg, expected_sg_dig_ctrl;
2663         int workaround, port_a;
2664         int current_link_up;
2665
2666         serdes_cfg = 0;
2667         expected_sg_dig_ctrl = 0;
2668         workaround = 0;
2669         port_a = 1;
2670         current_link_up = 0;
2671
2672         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2673             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2674                 workaround = 1;
2675                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2676                         port_a = 0;
2677
2678                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2679                 /* preserve bits 20-23 for voltage regulator */
2680                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2681         }
2682
2683         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2684
2685         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2686                 if (sg_dig_ctrl & (1 << 31)) {
2687                         if (workaround) {
2688                                 u32 val = serdes_cfg;
2689
2690                                 if (port_a)
2691                                         val |= 0xc010000;
2692                                 else
2693                                         val |= 0x4010000;
2694                                 tw32_f(MAC_SERDES_CFG, val);
2695                         }
2696                         tw32_f(SG_DIG_CTRL, 0x01388400);
2697                 }
2698                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2699                         tg3_setup_flow_control(tp, 0, 0);
2700                         current_link_up = 1;
2701                 }
2702                 goto out;
2703         }
2704
2705         /* Want auto-negotiation.  */
2706         expected_sg_dig_ctrl = 0x81388400;
2707
2708         /* Pause capability */
2709         expected_sg_dig_ctrl |= (1 << 11);
2710
2711         /* Asymettric pause */
2712         expected_sg_dig_ctrl |= (1 << 12);
2713
2714         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2715                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2716                     tp->serdes_counter &&
2717                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2718                                     MAC_STATUS_RCVD_CFG)) ==
2719                      MAC_STATUS_PCS_SYNCED)) {
2720                         tp->serdes_counter--;
2721                         current_link_up = 1;
2722                         goto out;
2723                 }
2724 restart_autoneg:
2725                 if (workaround)
2726                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2727                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2728                 udelay(5);
2729                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2730
2731                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2732                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2733         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2734                                  MAC_STATUS_SIGNAL_DET)) {
2735                 sg_dig_status = tr32(SG_DIG_STATUS);
2736                 mac_status = tr32(MAC_STATUS);
2737
2738                 if ((sg_dig_status & (1 << 1)) &&
2739                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2740                         u32 local_adv, remote_adv;
2741
2742                         local_adv = ADVERTISE_PAUSE_CAP;
2743                         remote_adv = 0;
2744                         if (sg_dig_status & (1 << 19))
2745                                 remote_adv |= LPA_PAUSE_CAP;
2746                         if (sg_dig_status & (1 << 20))
2747                                 remote_adv |= LPA_PAUSE_ASYM;
2748
2749                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2750                         current_link_up = 1;
2751                         tp->serdes_counter = 0;
2752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2753                 } else if (!(sg_dig_status & (1 << 1))) {
2754                         if (tp->serdes_counter)
2755                                 tp->serdes_counter--;
2756                         else {
2757                                 if (workaround) {
2758                                         u32 val = serdes_cfg;
2759
2760                                         if (port_a)
2761                                                 val |= 0xc010000;
2762                                         else
2763                                                 val |= 0x4010000;
2764
2765                                         tw32_f(MAC_SERDES_CFG, val);
2766                                 }
2767
2768                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2769                                 udelay(40);
2770
2771                                 /* Link parallel detection - link is up */
2772                                 /* only if we have PCS_SYNC and not */
2773                                 /* receiving config code words */
2774                                 mac_status = tr32(MAC_STATUS);
2775                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2776                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2777                                         tg3_setup_flow_control(tp, 0, 0);
2778                                         current_link_up = 1;
2779                                         tp->tg3_flags2 |=
2780                                                 TG3_FLG2_PARALLEL_DETECT;
2781                                         tp->serdes_counter =
2782                                                 SERDES_PARALLEL_DET_TIMEOUT;
2783                                 } else
2784                                         goto restart_autoneg;
2785                         }
2786                 }
2787         } else {
2788                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2789                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2790         }
2791
2792 out:
2793         return current_link_up;
2794 }
2795
2796 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2797 {
2798         int current_link_up = 0;
2799
2800         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2801                 goto out;
2802
2803         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2804                 u32 flags;
2805                 int i;
2806
2807                 if (fiber_autoneg(tp, &flags)) {
2808                         u32 local_adv, remote_adv;
2809
2810                         local_adv = ADVERTISE_PAUSE_CAP;
2811                         remote_adv = 0;
2812                         if (flags & MR_LP_ADV_SYM_PAUSE)
2813                                 remote_adv |= LPA_PAUSE_CAP;
2814                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2815                                 remote_adv |= LPA_PAUSE_ASYM;
2816
2817                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2818
2819                         current_link_up = 1;
2820                 }
2821                 for (i = 0; i < 30; i++) {
2822                         udelay(20);
2823                         tw32_f(MAC_STATUS,
2824                                (MAC_STATUS_SYNC_CHANGED |
2825                                 MAC_STATUS_CFG_CHANGED));
2826                         udelay(40);
2827                         if ((tr32(MAC_STATUS) &
2828                              (MAC_STATUS_SYNC_CHANGED |
2829                               MAC_STATUS_CFG_CHANGED)) == 0)
2830                                 break;
2831                 }
2832
2833                 mac_status = tr32(MAC_STATUS);
2834                 if (current_link_up == 0 &&
2835                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2836                     !(mac_status & MAC_STATUS_RCVD_CFG))
2837                         current_link_up = 1;
2838         } else {
2839                 /* Forcing 1000FD link up. */
2840                 current_link_up = 1;
2841
2842                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2843                 udelay(40);
2844
2845                 tw32_f(MAC_MODE, tp->mac_mode);
2846                 udelay(40);
2847         }
2848
2849 out:
2850         return current_link_up;
2851 }
2852
2853 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2854 {
2855         u32 orig_pause_cfg;
2856         u16 orig_active_speed;
2857         u8 orig_active_duplex;
2858         u32 mac_status;
2859         int current_link_up;
2860         int i;
2861
2862         orig_pause_cfg = tp->link_config.active_flowctrl;
2863         orig_active_speed = tp->link_config.active_speed;
2864         orig_active_duplex = tp->link_config.active_duplex;
2865
2866         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2867             netif_carrier_ok(tp->dev) &&
2868             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2869                 mac_status = tr32(MAC_STATUS);
2870                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2871                                MAC_STATUS_SIGNAL_DET |
2872                                MAC_STATUS_CFG_CHANGED |
2873                                MAC_STATUS_RCVD_CFG);
2874                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2875                                    MAC_STATUS_SIGNAL_DET)) {
2876                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2877                                             MAC_STATUS_CFG_CHANGED));
2878                         return 0;
2879                 }
2880         }
2881
2882         tw32_f(MAC_TX_AUTO_NEG, 0);
2883
2884         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2885         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2886         tw32_f(MAC_MODE, tp->mac_mode);
2887         udelay(40);
2888
2889         if (tp->phy_id == PHY_ID_BCM8002)
2890                 tg3_init_bcm8002(tp);
2891
2892         /* Enable link change event even when serdes polling.  */
2893         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2894         udelay(40);
2895
2896         current_link_up = 0;
2897         mac_status = tr32(MAC_STATUS);
2898
2899         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2900                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2901         else
2902                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2903
2904         tp->hw_status->status =
2905                 (SD_STATUS_UPDATED |
2906                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2907
2908         for (i = 0; i < 100; i++) {
2909                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2910                                     MAC_STATUS_CFG_CHANGED));
2911                 udelay(5);
2912                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2913                                          MAC_STATUS_CFG_CHANGED |
2914                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2915                         break;
2916         }
2917
2918         mac_status = tr32(MAC_STATUS);
2919         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2920                 current_link_up = 0;
2921                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2922                     tp->serdes_counter == 0) {
2923                         tw32_f(MAC_MODE, (tp->mac_mode |
2924                                           MAC_MODE_SEND_CONFIGS));
2925                         udelay(1);
2926                         tw32_f(MAC_MODE, tp->mac_mode);
2927                 }
2928         }
2929
2930         if (current_link_up == 1) {
2931                 tp->link_config.active_speed = SPEED_1000;
2932                 tp->link_config.active_duplex = DUPLEX_FULL;
2933                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2934                                     LED_CTRL_LNKLED_OVERRIDE |
2935                                     LED_CTRL_1000MBPS_ON));
2936         } else {
2937                 tp->link_config.active_speed = SPEED_INVALID;
2938                 tp->link_config.active_duplex = DUPLEX_INVALID;
2939                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2940                                     LED_CTRL_LNKLED_OVERRIDE |
2941                                     LED_CTRL_TRAFFIC_OVERRIDE));
2942         }
2943
2944         if (current_link_up != netif_carrier_ok(tp->dev)) {
2945                 if (current_link_up)
2946                         netif_carrier_on(tp->dev);
2947                 else
2948                         netif_carrier_off(tp->dev);
2949                 tg3_link_report(tp);
2950         } else {
2951                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2952                 if (orig_pause_cfg != now_pause_cfg ||
2953                     orig_active_speed != tp->link_config.active_speed ||
2954                     orig_active_duplex != tp->link_config.active_duplex)
2955                         tg3_link_report(tp);
2956         }
2957
2958         return 0;
2959 }
2960
2961 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2962 {
2963         int current_link_up, err = 0;
2964         u32 bmsr, bmcr;
2965         u16 current_speed;
2966         u8 current_duplex;
2967
2968         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2969         tw32_f(MAC_MODE, tp->mac_mode);
2970         udelay(40);
2971
2972         tw32(MAC_EVENT, 0);
2973
2974         tw32_f(MAC_STATUS,
2975              (MAC_STATUS_SYNC_CHANGED |
2976               MAC_STATUS_CFG_CHANGED |
2977               MAC_STATUS_MI_COMPLETION |
2978               MAC_STATUS_LNKSTATE_CHANGED));
2979         udelay(40);
2980
2981         if (force_reset)
2982                 tg3_phy_reset(tp);
2983
2984         current_link_up = 0;
2985         current_speed = SPEED_INVALID;
2986         current_duplex = DUPLEX_INVALID;
2987
2988         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2989         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2991                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2992                         bmsr |= BMSR_LSTATUS;
2993                 else
2994                         bmsr &= ~BMSR_LSTATUS;
2995         }
2996
2997         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2998
2999         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3000             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3001                 /* do nothing, just check for link up at the end */
3002         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3003                 u32 adv, new_adv;
3004
3005                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3006                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3007                                   ADVERTISE_1000XPAUSE |
3008                                   ADVERTISE_1000XPSE_ASYM |
3009                                   ADVERTISE_SLCT);
3010
3011                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3012
3013                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3014                         new_adv |= ADVERTISE_1000XHALF;
3015                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3016                         new_adv |= ADVERTISE_1000XFULL;
3017
3018                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3019                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3020                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3021                         tg3_writephy(tp, MII_BMCR, bmcr);
3022
3023                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3024                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3025                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3026
3027                         return err;
3028                 }
3029         } else {
3030                 u32 new_bmcr;
3031
3032                 bmcr &= ~BMCR_SPEED1000;
3033                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3034
3035                 if (tp->link_config.duplex == DUPLEX_FULL)
3036                         new_bmcr |= BMCR_FULLDPLX;
3037
3038                 if (new_bmcr != bmcr) {
3039                         /* BMCR_SPEED1000 is a reserved bit that needs
3040                          * to be set on write.
3041                          */
3042                         new_bmcr |= BMCR_SPEED1000;
3043
3044                         /* Force a linkdown */
3045                         if (netif_carrier_ok(tp->dev)) {
3046                                 u32 adv;
3047
3048                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3049                                 adv &= ~(ADVERTISE_1000XFULL |
3050                                          ADVERTISE_1000XHALF |
3051                                          ADVERTISE_SLCT);
3052                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3053                                 tg3_writephy(tp, MII_BMCR, bmcr |
3054                                                            BMCR_ANRESTART |
3055                                                            BMCR_ANENABLE);
3056                                 udelay(10);
3057                                 netif_carrier_off(tp->dev);
3058                         }
3059                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3060                         bmcr = new_bmcr;
3061                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3062                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3064                             ASIC_REV_5714) {
3065                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3066                                         bmsr |= BMSR_LSTATUS;
3067                                 else
3068                                         bmsr &= ~BMSR_LSTATUS;
3069                         }
3070                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3071                 }
3072         }
3073
3074         if (bmsr & BMSR_LSTATUS) {
3075                 current_speed = SPEED_1000;
3076                 current_link_up = 1;
3077                 if (bmcr & BMCR_FULLDPLX)
3078                         current_duplex = DUPLEX_FULL;
3079                 else
3080                         current_duplex = DUPLEX_HALF;
3081
3082                 if (bmcr & BMCR_ANENABLE) {
3083                         u32 local_adv, remote_adv, common;
3084
3085                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3086                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3087                         common = local_adv & remote_adv;
3088                         if (common & (ADVERTISE_1000XHALF |
3089                                       ADVERTISE_1000XFULL)) {
3090                                 if (common & ADVERTISE_1000XFULL)
3091                                         current_duplex = DUPLEX_FULL;
3092                                 else
3093                                         current_duplex = DUPLEX_HALF;
3094
3095                                 tg3_setup_flow_control(tp, local_adv,
3096                                                        remote_adv);
3097                         }
3098                         else
3099                                 current_link_up = 0;
3100                 }
3101         }
3102
3103         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3104         if (tp->link_config.active_duplex == DUPLEX_HALF)
3105                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3106
3107         tw32_f(MAC_MODE, tp->mac_mode);
3108         udelay(40);
3109
3110         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3111
3112         tp->link_config.active_speed = current_speed;
3113         tp->link_config.active_duplex = current_duplex;
3114
3115         if (current_link_up != netif_carrier_ok(tp->dev)) {
3116                 if (current_link_up)
3117                         netif_carrier_on(tp->dev);
3118                 else {
3119                         netif_carrier_off(tp->dev);
3120                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3121                 }
3122                 tg3_link_report(tp);
3123         }
3124         return err;
3125 }
3126
3127 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3128 {
3129         if (tp->serdes_counter) {
3130                 /* Give autoneg time to complete. */
3131                 tp->serdes_counter--;
3132                 return;
3133         }
3134         if (!netif_carrier_ok(tp->dev) &&
3135             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3136                 u32 bmcr;
3137
3138                 tg3_readphy(tp, MII_BMCR, &bmcr);
3139                 if (bmcr & BMCR_ANENABLE) {
3140                         u32 phy1, phy2;
3141
3142                         /* Select shadow register 0x1f */
3143                         tg3_writephy(tp, 0x1c, 0x7c00);
3144                         tg3_readphy(tp, 0x1c, &phy1);
3145
3146                         /* Select expansion interrupt status register */
3147                         tg3_writephy(tp, 0x17, 0x0f01);
3148                         tg3_readphy(tp, 0x15, &phy2);
3149                         tg3_readphy(tp, 0x15, &phy2);
3150
3151                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3152                                 /* We have signal detect and not receiving
3153                                  * config code words, link is up by parallel
3154                                  * detection.
3155                                  */
3156
3157                                 bmcr &= ~BMCR_ANENABLE;
3158                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3159                                 tg3_writephy(tp, MII_BMCR, bmcr);
3160                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3161                         }
3162                 }
3163         }
3164         else if (netif_carrier_ok(tp->dev) &&
3165                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3166                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3167                 u32 phy2;
3168
3169                 /* Select expansion interrupt status register */
3170                 tg3_writephy(tp, 0x17, 0x0f01);
3171                 tg3_readphy(tp, 0x15, &phy2);
3172                 if (phy2 & 0x20) {
3173                         u32 bmcr;
3174
3175                         /* Config code words received, turn on autoneg. */
3176                         tg3_readphy(tp, MII_BMCR, &bmcr);
3177                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3178
3179                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3180
3181                 }
3182         }
3183 }
3184
3185 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3186 {
3187         int err;
3188
3189         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3190                 err = tg3_setup_fiber_phy(tp, force_reset);
3191         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3192                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3193         } else {
3194                 err = tg3_setup_copper_phy(tp, force_reset);
3195         }
3196
3197         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3198             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3199                 u32 val, scale;
3200
3201                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3202                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3203                         scale = 65;
3204                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3205                         scale = 6;
3206                 else
3207                         scale = 12;
3208
3209                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3210                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3211                 tw32(GRC_MISC_CFG, val);
3212         }
3213
3214         if (tp->link_config.active_speed == SPEED_1000 &&
3215             tp->link_config.active_duplex == DUPLEX_HALF)
3216                 tw32(MAC_TX_LENGTHS,
3217                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3218                       (6 << TX_LENGTHS_IPG_SHIFT) |
3219                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3220         else
3221                 tw32(MAC_TX_LENGTHS,
3222                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3223                       (6 << TX_LENGTHS_IPG_SHIFT) |
3224                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3225
3226         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3227                 if (netif_carrier_ok(tp->dev)) {
3228                         tw32(HOSTCC_STAT_COAL_TICKS,
3229                              tp->coal.stats_block_coalesce_usecs);
3230                 } else {
3231                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3232                 }
3233         }
3234
3235         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3236                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3237                 if (!netif_carrier_ok(tp->dev))
3238                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3239                               tp->pwrmgmt_thresh;
3240                 else
3241                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3242                 tw32(PCIE_PWR_MGMT_THRESH, val);
3243         }
3244
3245         return err;
3246 }
3247
3248 /* This is called whenever we suspect that the system chipset is re-
3249  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3250  * is bogus tx completions. We try to recover by setting the
3251  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3252  * in the workqueue.
3253  */
3254 static void tg3_tx_recover(struct tg3 *tp)
3255 {
3256         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3257                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3258
3259         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3260                "mapped I/O cycles to the network device, attempting to "
3261                "recover. Please report the problem to the driver maintainer "
3262                "and include system chipset information.\n", tp->dev->name);
3263
3264         spin_lock(&tp->lock);
3265         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3266         spin_unlock(&tp->lock);
3267 }
3268
3269 static inline u32 tg3_tx_avail(struct tg3 *tp)
3270 {
3271         smp_mb();
3272         return (tp->tx_pending -
3273                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3274 }
3275
3276 /* Tigon3 never reports partial packet sends.  So we do not
3277  * need special logic to handle SKBs that have not had all
3278  * of their frags sent yet, like SunGEM does.
3279  */
3280 static void tg3_tx(struct tg3 *tp)
3281 {
3282         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3283         u32 sw_idx = tp->tx_cons;
3284
3285         while (sw_idx != hw_idx) {
3286                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3287                 struct sk_buff *skb = ri->skb;
3288                 int i, tx_bug = 0;
3289
3290                 if (unlikely(skb == NULL)) {
3291                         tg3_tx_recover(tp);
3292                         return;
3293                 }
3294
3295                 pci_unmap_single(tp->pdev,
3296                                  pci_unmap_addr(ri, mapping),
3297                                  skb_headlen(skb),
3298                                  PCI_DMA_TODEVICE);
3299
3300                 ri->skb = NULL;
3301
3302                 sw_idx = NEXT_TX(sw_idx);
3303
3304                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3305                         ri = &tp->tx_buffers[sw_idx];
3306                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3307                                 tx_bug = 1;
3308
3309                         pci_unmap_page(tp->pdev,
3310                                        pci_unmap_addr(ri, mapping),
3311                                        skb_shinfo(skb)->frags[i].size,
3312                                        PCI_DMA_TODEVICE);
3313
3314                         sw_idx = NEXT_TX(sw_idx);
3315                 }
3316
3317                 dev_kfree_skb(skb);
3318
3319                 if (unlikely(tx_bug)) {
3320                         tg3_tx_recover(tp);
3321                         return;
3322                 }
3323         }
3324
3325         tp->tx_cons = sw_idx;
3326
3327         /* Need to make the tx_cons update visible to tg3_start_xmit()
3328          * before checking for netif_queue_stopped().  Without the
3329          * memory barrier, there is a small possibility that tg3_start_xmit()
3330          * will miss it and cause the queue to be stopped forever.
3331          */
3332         smp_mb();
3333
3334         if (unlikely(netif_queue_stopped(tp->dev) &&
3335                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3336                 netif_tx_lock(tp->dev);
3337                 if (netif_queue_stopped(tp->dev) &&
3338                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3339                         netif_wake_queue(tp->dev);
3340                 netif_tx_unlock(tp->dev);
3341         }
3342 }
3343
3344 /* Returns size of skb allocated or < 0 on error.
3345  *
3346  * We only need to fill in the address because the other members
3347  * of the RX descriptor are invariant, see tg3_init_rings.
3348  *
3349  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3350  * posting buffers we only dirty the first cache line of the RX
3351  * descriptor (containing the address).  Whereas for the RX status
3352  * buffers the cpu only reads the last cacheline of the RX descriptor
3353  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3354  */
3355 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3356                             int src_idx, u32 dest_idx_unmasked)
3357 {
3358         struct tg3_rx_buffer_desc *desc;
3359         struct ring_info *map, *src_map;
3360         struct sk_buff *skb;
3361         dma_addr_t mapping;
3362         int skb_size, dest_idx;
3363
3364         src_map = NULL;
3365         switch (opaque_key) {
3366         case RXD_OPAQUE_RING_STD:
3367                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3368                 desc = &tp->rx_std[dest_idx];
3369                 map = &tp->rx_std_buffers[dest_idx];
3370                 if (src_idx >= 0)
3371                         src_map = &tp->rx_std_buffers[src_idx];
3372                 skb_size = tp->rx_pkt_buf_sz;
3373                 break;
3374
3375         case RXD_OPAQUE_RING_JUMBO:
3376                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3377                 desc = &tp->rx_jumbo[dest_idx];
3378                 map = &tp->rx_jumbo_buffers[dest_idx];
3379                 if (src_idx >= 0)
3380                         src_map = &tp->rx_jumbo_buffers[src_idx];
3381                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3382                 break;
3383
3384         default:
3385                 return -EINVAL;
3386         };
3387
3388         /* Do not overwrite any of the map or rp information
3389          * until we are sure we can commit to a new buffer.
3390          *
3391          * Callers depend upon this behavior and assume that
3392          * we leave everything unchanged if we fail.
3393          */
3394         skb = netdev_alloc_skb(tp->dev, skb_size);
3395         if (skb == NULL)
3396                 return -ENOMEM;
3397
3398         skb_reserve(skb, tp->rx_offset);
3399
3400         mapping = pci_map_single(tp->pdev, skb->data,
3401                                  skb_size - tp->rx_offset,
3402                                  PCI_DMA_FROMDEVICE);
3403
3404         map->skb = skb;
3405         pci_unmap_addr_set(map, mapping, mapping);
3406
3407         if (src_map != NULL)
3408                 src_map->skb = NULL;
3409
3410         desc->addr_hi = ((u64)mapping >> 32);
3411         desc->addr_lo = ((u64)mapping & 0xffffffff);
3412
3413         return skb_size;
3414 }
3415
3416 /* We only need to move over in the address because the other
3417  * members of the RX descriptor are invariant.  See notes above
3418  * tg3_alloc_rx_skb for full details.
3419  */
3420 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3421                            int src_idx, u32 dest_idx_unmasked)
3422 {
3423         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3424         struct ring_info *src_map, *dest_map;
3425         int dest_idx;
3426
3427         switch (opaque_key) {
3428         case RXD_OPAQUE_RING_STD:
3429                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3430                 dest_desc = &tp->rx_std[dest_idx];
3431                 dest_map = &tp->rx_std_buffers[dest_idx];
3432                 src_desc = &tp->rx_std[src_idx];
3433                 src_map = &tp->rx_std_buffers[src_idx];
3434                 break;
3435
3436         case RXD_OPAQUE_RING_JUMBO:
3437                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3438                 dest_desc = &tp->rx_jumbo[dest_idx];
3439                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3440                 src_desc = &tp->rx_jumbo[src_idx];
3441                 src_map = &tp->rx_jumbo_buffers[src_idx];
3442                 break;
3443
3444         default:
3445                 return;
3446         };
3447
3448         dest_map->skb = src_map->skb;
3449         pci_unmap_addr_set(dest_map, mapping,
3450                            pci_unmap_addr(src_map, mapping));
3451         dest_desc->addr_hi = src_desc->addr_hi;
3452         dest_desc->addr_lo = src_desc->addr_lo;
3453
3454         src_map->skb = NULL;
3455 }
3456
3457 #if TG3_VLAN_TAG_USED
3458 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3459 {
3460         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3461 }
3462 #endif
3463
3464 /* The RX ring scheme is composed of multiple rings which post fresh
3465  * buffers to the chip, and one special ring the chip uses to report
3466  * status back to the host.
3467  *
3468  * The special ring reports the status of received packets to the
3469  * host.  The chip does not write into the original descriptor the
3470  * RX buffer was obtained from.  The chip simply takes the original
3471  * descriptor as provided by the host, updates the status and length
3472  * field, then writes this into the next status ring entry.
3473  *
3474  * Each ring the host uses to post buffers to the chip is described
3475  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3476  * it is first placed into the on-chip ram.  When the packet's length
3477  * is known, it walks down the TG3_BDINFO entries to select the ring.
3478  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3479  * which is within the range of the new packet's length is chosen.
3480  *
3481  * The "separate ring for rx status" scheme may sound queer, but it makes
3482  * sense from a cache coherency perspective.  If only the host writes
3483  * to the buffer post rings, and only the chip writes to the rx status
3484  * rings, then cache lines never move beyond shared-modified state.
3485  * If both the host and chip were to write into the same ring, cache line
3486  * eviction could occur since both entities want it in an exclusive state.
3487  */
3488 static int tg3_rx(struct tg3 *tp, int budget)
3489 {
3490         u32 work_mask, rx_std_posted = 0;
3491         u32 sw_idx = tp->rx_rcb_ptr;
3492         u16 hw_idx;
3493         int received;
3494
3495         hw_idx = tp->hw_status->idx[0].rx_producer;
3496         /*
3497          * We need to order the read of hw_idx and the read of
3498          * the opaque cookie.
3499          */
3500         rmb();
3501         work_mask = 0;
3502         received = 0;
3503         while (sw_idx != hw_idx && budget > 0) {
3504                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3505                 unsigned int len;
3506                 struct sk_buff *skb;
3507                 dma_addr_t dma_addr;
3508                 u32 opaque_key, desc_idx, *post_ptr;
3509
3510                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3511                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3512                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3513                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3514                                                   mapping);
3515                         skb = tp->rx_std_buffers[desc_idx].skb;
3516                         post_ptr = &tp->rx_std_ptr;
3517                         rx_std_posted++;
3518                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3519                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3520                                                   mapping);
3521                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3522                         post_ptr = &tp->rx_jumbo_ptr;
3523                 }
3524                 else {
3525                         goto next_pkt_nopost;
3526                 }
3527
3528                 work_mask |= opaque_key;
3529
3530                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3531                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3532                 drop_it:
3533                         tg3_recycle_rx(tp, opaque_key,
3534                                        desc_idx, *post_ptr);
3535                 drop_it_no_recycle:
3536                         /* Other statistics kept track of by card. */
3537                         tp->net_stats.rx_dropped++;
3538                         goto next_pkt;
3539                 }
3540
3541                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3542
3543                 if (len > RX_COPY_THRESHOLD
3544                         && tp->rx_offset == 2
3545                         /* rx_offset != 2 iff this is a 5701 card running
3546                          * in PCI-X mode [see tg3_get_invariants()] */
3547                 ) {
3548                         int skb_size;
3549
3550                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3551                                                     desc_idx, *post_ptr);
3552                         if (skb_size < 0)
3553                                 goto drop_it;
3554
3555                         pci_unmap_single(tp->pdev, dma_addr,
3556                                          skb_size - tp->rx_offset,
3557                                          PCI_DMA_FROMDEVICE);
3558
3559                         skb_put(skb, len);
3560                 } else {
3561                         struct sk_buff *copy_skb;
3562
3563                         tg3_recycle_rx(tp, opaque_key,
3564                                        desc_idx, *post_ptr);
3565
3566                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3567                         if (copy_skb == NULL)
3568                                 goto drop_it_no_recycle;
3569
3570                         skb_reserve(copy_skb, 2);
3571                         skb_put(copy_skb, len);
3572                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3573                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3574                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3575
3576                         /* We'll reuse the original ring buffer. */
3577                         skb = copy_skb;
3578                 }
3579
3580                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3581                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3582                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3583                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3584                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3585                 else
3586                         skb->ip_summed = CHECKSUM_NONE;
3587
3588                 skb->protocol = eth_type_trans(skb, tp->dev);
3589 #if TG3_VLAN_TAG_USED
3590                 if (tp->vlgrp != NULL &&
3591                     desc->type_flags & RXD_FLAG_VLAN) {
3592                         tg3_vlan_rx(tp, skb,
3593                                     desc->err_vlan & RXD_VLAN_MASK);
3594                 } else
3595 #endif
3596                         netif_receive_skb(skb);
3597
3598                 tp->dev->last_rx = jiffies;
3599                 received++;
3600                 budget--;
3601
3602 next_pkt:
3603                 (*post_ptr)++;
3604
3605                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3606                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3607
3608                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3609                                      TG3_64BIT_REG_LOW, idx);
3610                         work_mask &= ~RXD_OPAQUE_RING_STD;
3611                         rx_std_posted = 0;
3612                 }
3613 next_pkt_nopost:
3614                 sw_idx++;
3615                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3616
3617                 /* Refresh hw_idx to see if there is new work */
3618                 if (sw_idx == hw_idx) {
3619                         hw_idx = tp->hw_status->idx[0].rx_producer;
3620                         rmb();
3621                 }
3622         }
3623
3624         /* ACK the status ring. */
3625         tp->rx_rcb_ptr = sw_idx;
3626         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3627
3628         /* Refill RX ring(s). */
3629         if (work_mask & RXD_OPAQUE_RING_STD) {
3630                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3631                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3632                              sw_idx);
3633         }
3634         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3635                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3636                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3637                              sw_idx);
3638         }
3639         mmiowb();
3640
3641         return received;
3642 }
3643
3644 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3645 {
3646         struct tg3_hw_status *sblk = tp->hw_status;
3647
3648         /* handle link change and other phy events */
3649         if (!(tp->tg3_flags &
3650               (TG3_FLAG_USE_LINKCHG_REG |
3651                TG3_FLAG_POLL_SERDES))) {
3652                 if (sblk->status & SD_STATUS_LINK_CHG) {
3653                         sblk->status = SD_STATUS_UPDATED |
3654                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3655                         spin_lock(&tp->lock);
3656                         tg3_setup_phy(tp, 0);
3657                         spin_unlock(&tp->lock);
3658                 }
3659         }
3660
3661         /* run TX completion thread */
3662         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3663                 tg3_tx(tp);
3664                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3665                         return work_done;
3666         }
3667
3668         /* run RX thread, within the bounds set by NAPI.
3669          * All RX "locking" is done by ensuring outside
3670          * code synchronizes with tg3->napi.poll()
3671          */
3672         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3673                 work_done += tg3_rx(tp, budget - work_done);
3674
3675         return work_done;
3676 }
3677
3678 static int tg3_poll(struct napi_struct *napi, int budget)
3679 {
3680         struct tg3 *tp = container_of(napi, struct tg3, napi);
3681         int work_done = 0;
3682         struct tg3_hw_status *sblk = tp->hw_status;
3683
3684         while (1) {
3685                 work_done = tg3_poll_work(tp, work_done, budget);
3686
3687                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3688                         goto tx_recovery;
3689
3690                 if (unlikely(work_done >= budget))
3691                         break;
3692
3693                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3694                         /* tp->last_tag is used in tg3_restart_ints() below
3695                          * to tell the hw how much work has been processed,
3696                          * so we must read it before checking for more work.
3697                          */
3698                         tp->last_tag = sblk->status_tag;
3699                         rmb();
3700                 } else
3701                         sblk->status &= ~SD_STATUS_UPDATED;
3702
3703                 if (likely(!tg3_has_work(tp))) {
3704                         netif_rx_complete(tp->dev, napi);
3705                         tg3_restart_ints(tp);
3706                         break;
3707                 }
3708         }
3709
3710         return work_done;
3711
3712 tx_recovery:
3713         /* work_done is guaranteed to be less than budget. */
3714         netif_rx_complete(tp->dev, napi);
3715         schedule_work(&tp->reset_task);
3716         return work_done;
3717 }
3718
3719 static void tg3_irq_quiesce(struct tg3 *tp)
3720 {
3721         BUG_ON(tp->irq_sync);
3722
3723         tp->irq_sync = 1;
3724         smp_mb();
3725
3726         synchronize_irq(tp->pdev->irq);
3727 }
3728
3729 static inline int tg3_irq_sync(struct tg3 *tp)
3730 {
3731         return tp->irq_sync;
3732 }
3733
3734 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3735  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3736  * with as well.  Most of the time, this is not necessary except when
3737  * shutting down the device.
3738  */
3739 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3740 {
3741         spin_lock_bh(&tp->lock);
3742         if (irq_sync)
3743                 tg3_irq_quiesce(tp);
3744 }
3745
3746 static inline void tg3_full_unlock(struct tg3 *tp)
3747 {
3748         spin_unlock_bh(&tp->lock);
3749 }
3750
3751 /* One-shot MSI handler - Chip automatically disables interrupt
3752  * after sending MSI so driver doesn't have to do it.
3753  */
3754 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3755 {
3756         struct net_device *dev = dev_id;
3757         struct tg3 *tp = netdev_priv(dev);
3758
3759         prefetch(tp->hw_status);
3760         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3761
3762         if (likely(!tg3_irq_sync(tp)))
3763                 netif_rx_schedule(dev, &tp->napi);
3764
3765         return IRQ_HANDLED;
3766 }
3767
3768 /* MSI ISR - No need to check for interrupt sharing and no need to
3769  * flush status block and interrupt mailbox. PCI ordering rules
3770  * guarantee that MSI will arrive after the status block.
3771  */
3772 static irqreturn_t tg3_msi(int irq, void *dev_id)
3773 {
3774         struct net_device *dev = dev_id;
3775         struct tg3 *tp = netdev_priv(dev);
3776
3777         prefetch(tp->hw_status);
3778         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3779         /*
3780          * Writing any value to intr-mbox-0 clears PCI INTA# and
3781          * chip-internal interrupt pending events.
3782          * Writing non-zero to intr-mbox-0 additional tells the
3783          * NIC to stop sending us irqs, engaging "in-intr-handler"
3784          * event coalescing.
3785          */
3786         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3787         if (likely(!tg3_irq_sync(tp)))
3788                 netif_rx_schedule(dev, &tp->napi);
3789
3790         return IRQ_RETVAL(1);
3791 }
3792
3793 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3794 {
3795         struct net_device *dev = dev_id;
3796         struct tg3 *tp = netdev_priv(dev);
3797         struct tg3_hw_status *sblk = tp->hw_status;
3798         unsigned int handled = 1;
3799
3800         /* In INTx mode, it is possible for the interrupt to arrive at
3801          * the CPU before the status block posted prior to the interrupt.
3802          * Reading the PCI State register will confirm whether the
3803          * interrupt is ours and will flush the status block.
3804          */
3805         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3806                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3807                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3808                         handled = 0;
3809                         goto out;
3810                 }
3811         }
3812
3813         /*
3814          * Writing any value to intr-mbox-0 clears PCI INTA# and
3815          * chip-internal interrupt pending events.
3816          * Writing non-zero to intr-mbox-0 additional tells the
3817          * NIC to stop sending us irqs, engaging "in-intr-handler"
3818          * event coalescing.
3819          *
3820          * Flush the mailbox to de-assert the IRQ immediately to prevent
3821          * spurious interrupts.  The flush impacts performance but
3822          * excessive spurious interrupts can be worse in some cases.
3823          */
3824         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3825         if (tg3_irq_sync(tp))
3826                 goto out;
3827         sblk->status &= ~SD_STATUS_UPDATED;
3828         if (likely(tg3_has_work(tp))) {
3829                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3830                 netif_rx_schedule(dev, &tp->napi);
3831         } else {
3832                 /* No work, shared interrupt perhaps?  re-enable
3833                  * interrupts, and flush that PCI write
3834                  */
3835                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3836                                0x00000000);
3837         }
3838 out:
3839         return IRQ_RETVAL(handled);
3840 }
3841
3842 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3843 {
3844         struct net_device *dev = dev_id;
3845         struct tg3 *tp = netdev_priv(dev);
3846         struct tg3_hw_status *sblk = tp->hw_status;
3847         unsigned int handled = 1;
3848
3849         /* In INTx mode, it is possible for the interrupt to arrive at
3850          * the CPU before the status block posted prior to the interrupt.
3851          * Reading the PCI State register will confirm whether the
3852          * interrupt is ours and will flush the status block.
3853          */
3854         if (unlikely(sblk->status_tag == tp->last_tag)) {
3855                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3856                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3857                         handled = 0;
3858                         goto out;
3859                 }
3860         }
3861
3862         /*
3863          * writing any value to intr-mbox-0 clears PCI INTA# and
3864          * chip-internal interrupt pending events.
3865          * writing non-zero to intr-mbox-0 additional tells the
3866          * NIC to stop sending us irqs, engaging "in-intr-handler"
3867          * event coalescing.
3868          *
3869          * Flush the mailbox to de-assert the IRQ immediately to prevent
3870          * spurious interrupts.  The flush impacts performance but
3871          * excessive spurious interrupts can be worse in some cases.
3872          */
3873         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3874         if (tg3_irq_sync(tp))
3875                 goto out;
3876         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3877                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3878                 /* Update last_tag to mark that this status has been
3879                  * seen. Because interrupt may be shared, we may be
3880                  * racing with tg3_poll(), so only update last_tag
3881                  * if tg3_poll() is not scheduled.
3882                  */
3883                 tp->last_tag = sblk->status_tag;
3884                 __netif_rx_schedule(dev, &tp->napi);
3885         }
3886 out:
3887         return IRQ_RETVAL(handled);
3888 }
3889
3890 /* ISR for interrupt test */
3891 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3892 {
3893         struct net_device *dev = dev_id;
3894         struct tg3 *tp = netdev_priv(dev);
3895         struct tg3_hw_status *sblk = tp->hw_status;
3896
3897         if ((sblk->status & SD_STATUS_UPDATED) ||
3898             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3899                 tg3_disable_ints(tp);
3900                 return IRQ_RETVAL(1);
3901         }
3902         return IRQ_RETVAL(0);
3903 }
3904
3905 static int tg3_init_hw(struct tg3 *, int);
3906 static int tg3_halt(struct tg3 *, int, int);
3907
3908 /* Restart hardware after configuration changes, self-test, etc.
3909  * Invoked with tp->lock held.
3910  */
3911 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3912 {
3913         int err;
3914
3915         err = tg3_init_hw(tp, reset_phy);
3916         if (err) {
3917                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3918                        "aborting.\n", tp->dev->name);
3919                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3920                 tg3_full_unlock(tp);
3921                 del_timer_sync(&tp->timer);
3922                 tp->irq_sync = 0;
3923                 napi_enable(&tp->napi);
3924                 dev_close(tp->dev);
3925                 tg3_full_lock(tp, 0);
3926         }
3927         return err;
3928 }
3929
3930 #ifdef CONFIG_NET_POLL_CONTROLLER
3931 static void tg3_poll_controller(struct net_device *dev)
3932 {
3933         struct tg3 *tp = netdev_priv(dev);
3934
3935         tg3_interrupt(tp->pdev->irq, dev);
3936 }
3937 #endif
3938
3939 static void tg3_reset_task(struct work_struct *work)
3940 {
3941         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3942         unsigned int restart_timer;
3943
3944         tg3_full_lock(tp, 0);
3945
3946         if (!netif_running(tp->dev)) {
3947                 tg3_full_unlock(tp);
3948                 return;
3949         }
3950
3951         tg3_full_unlock(tp);
3952
3953         tg3_netif_stop(tp);
3954
3955         tg3_full_lock(tp, 1);
3956
3957         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3958         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3959
3960         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3961                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3962                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3963                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3964                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3965         }
3966
3967         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3968         if (tg3_init_hw(tp, 1))
3969                 goto out;
3970
3971         tg3_netif_start(tp);
3972
3973         if (restart_timer)
3974                 mod_timer(&tp->timer, jiffies + 1);
3975
3976 out:
3977         tg3_full_unlock(tp);
3978 }
3979
3980 static void tg3_dump_short_state(struct tg3 *tp)
3981 {
3982         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3983                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3984         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3985                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3986 }
3987
3988 static void tg3_tx_timeout(struct net_device *dev)
3989 {
3990         struct tg3 *tp = netdev_priv(dev);
3991
3992         if (netif_msg_tx_err(tp)) {
3993                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3994                        dev->name);
3995                 tg3_dump_short_state(tp);
3996         }
3997
3998         schedule_work(&tp->reset_task);
3999 }
4000
4001 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4002 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4003 {
4004         u32 base = (u32) mapping & 0xffffffff;
4005
4006         return ((base > 0xffffdcc0) &&
4007                 (base + len + 8 < base));
4008 }
4009
4010 /* Test for DMA addresses > 40-bit */
4011 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4012                                           int len)
4013 {
4014 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4015         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4016                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4017         return 0;
4018 #else
4019         return 0;
4020 #endif
4021 }
4022
4023 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4024
4025 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4026 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4027                                        u32 last_plus_one, u32 *start,
4028                                        u32 base_flags, u32 mss)
4029 {
4030         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4031         dma_addr_t new_addr = 0;
4032         u32 entry = *start;
4033         int i, ret = 0;
4034
4035         if (!new_skb) {
4036                 ret = -1;
4037         } else {
4038                 /* New SKB is guaranteed to be linear. */
4039                 entry = *start;
4040                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4041                                           PCI_DMA_TODEVICE);
4042                 /* Make sure new skb does not cross any 4G boundaries.
4043                  * Drop the packet if it does.
4044                  */
4045                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4046                         ret = -1;
4047                         dev_kfree_skb(new_skb);
4048                         new_skb = NULL;
4049                 } else {
4050                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4051                                     base_flags, 1 | (mss << 1));
4052                         *start = NEXT_TX(entry);
4053                 }
4054         }
4055
4056         /* Now clean up the sw ring entries. */
4057         i = 0;
4058         while (entry != last_plus_one) {
4059                 int len;
4060
4061                 if (i == 0)
4062                         len = skb_headlen(skb);
4063                 else
4064                         len = skb_shinfo(skb)->frags[i-1].size;
4065                 pci_unmap_single(tp->pdev,
4066                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4067                                  len, PCI_DMA_TODEVICE);
4068                 if (i == 0) {
4069                         tp->tx_buffers[entry].skb = new_skb;
4070                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4071                 } else {
4072                         tp->tx_buffers[entry].skb = NULL;
4073                 }
4074                 entry = NEXT_TX(entry);
4075                 i++;
4076         }
4077
4078         dev_kfree_skb(skb);
4079
4080         return ret;
4081 }
4082
4083 static void tg3_set_txd(struct tg3 *tp, int entry,
4084                         dma_addr_t mapping, int len, u32 flags,
4085                         u32 mss_and_is_end)
4086 {
4087         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4088         int is_end = (mss_and_is_end & 0x1);
4089         u32 mss = (mss_and_is_end >> 1);
4090         u32 vlan_tag = 0;
4091
4092         if (is_end)
4093                 flags |= TXD_FLAG_END;
4094         if (flags & TXD_FLAG_VLAN) {
4095                 vlan_tag = flags >> 16;
4096                 flags &= 0xffff;
4097         }
4098         vlan_tag |= (mss << TXD_MSS_SHIFT);
4099
4100         txd->addr_hi = ((u64) mapping >> 32);
4101         txd->addr_lo = ((u64) mapping & 0xffffffff);
4102         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4103         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4104 }
4105
4106 /* hard_start_xmit for devices that don't have any bugs and
4107  * support TG3_FLG2_HW_TSO_2 only.
4108  */
4109 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4110 {
4111         struct tg3 *tp = netdev_priv(dev);
4112         dma_addr_t mapping;
4113         u32 len, entry, base_flags, mss;
4114
4115         len = skb_headlen(skb);
4116
4117         /* We are running in BH disabled context with netif_tx_lock
4118          * and TX reclaim runs via tp->napi.poll inside of a software
4119          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4120          * no IRQ context deadlocks to worry about either.  Rejoice!
4121          */
4122         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4123                 if (!netif_queue_stopped(dev)) {
4124                         netif_stop_queue(dev);
4125
4126                         /* This is a hard error, log it. */
4127                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4128                                "queue awake!\n", dev->name);
4129                 }
4130                 return NETDEV_TX_BUSY;
4131         }
4132
4133         entry = tp->tx_prod;
4134         base_flags = 0;
4135         mss = 0;
4136         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4137                 int tcp_opt_len, ip_tcp_len;
4138
4139                 if (skb_header_cloned(skb) &&
4140                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4141                         dev_kfree_skb(skb);
4142                         goto out_unlock;
4143                 }
4144
4145                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4146                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4147                 else {
4148                         struct iphdr *iph = ip_hdr(skb);
4149
4150                         tcp_opt_len = tcp_optlen(skb);
4151                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4152
4153                         iph->check = 0;
4154                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4155                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4156                 }
4157
4158                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4159                                TXD_FLAG_CPU_POST_DMA);
4160
4161                 tcp_hdr(skb)->check = 0;
4162
4163         }
4164         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4165                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4166 #if TG3_VLAN_TAG_USED
4167         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4168                 base_flags |= (TXD_FLAG_VLAN |
4169                                (vlan_tx_tag_get(skb) << 16));
4170 #endif
4171
4172         /* Queue skb data, a.k.a. the main skb fragment. */
4173         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4174
4175         tp->tx_buffers[entry].skb = skb;
4176         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4177
4178         tg3_set_txd(tp, entry, mapping, len, base_flags,
4179                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4180
4181         entry = NEXT_TX(entry);
4182
4183         /* Now loop through additional data fragments, and queue them. */
4184         if (skb_shinfo(skb)->nr_frags > 0) {
4185                 unsigned int i, last;
4186
4187                 last = skb_shinfo(skb)->nr_frags - 1;
4188                 for (i = 0; i <= last; i++) {
4189                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4190
4191                         len = frag->size;
4192                         mapping = pci_map_page(tp->pdev,
4193                                                frag->page,
4194                                                frag->page_offset,
4195                                                len, PCI_DMA_TODEVICE);
4196
4197                         tp->tx_buffers[entry].skb = NULL;
4198                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4199
4200                         tg3_set_txd(tp, entry, mapping, len,
4201                                     base_flags, (i == last) | (mss << 1));
4202
4203                         entry = NEXT_TX(entry);
4204                 }
4205         }
4206
4207         /* Packets are ready, update Tx producer idx local and on card. */
4208         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4209
4210         tp->tx_prod = entry;
4211         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4212                 netif_stop_queue(dev);
4213                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4214                         netif_wake_queue(tp->dev);
4215         }
4216
4217 out_unlock:
4218         mmiowb();
4219
4220         dev->trans_start = jiffies;
4221
4222         return NETDEV_TX_OK;
4223 }
4224
4225 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4226
4227 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4228  * TSO header is greater than 80 bytes.
4229  */
4230 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4231 {
4232         struct sk_buff *segs, *nskb;
4233
4234         /* Estimate the number of fragments in the worst case */
4235         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4236                 netif_stop_queue(tp->dev);
4237                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4238                         return NETDEV_TX_BUSY;
4239
4240                 netif_wake_queue(tp->dev);
4241         }
4242
4243         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4244         if (unlikely(IS_ERR(segs)))
4245                 goto tg3_tso_bug_end;
4246
4247         do {
4248                 nskb = segs;
4249                 segs = segs->next;
4250                 nskb->next = NULL;
4251                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4252         } while (segs);
4253
4254 tg3_tso_bug_end:
4255         dev_kfree_skb(skb);
4256
4257         return NETDEV_TX_OK;
4258 }
4259
4260 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4261  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4262  */
4263 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4264 {
4265         struct tg3 *tp = netdev_priv(dev);
4266         dma_addr_t mapping;
4267         u32 len, entry, base_flags, mss;
4268         int would_hit_hwbug;
4269
4270         len = skb_headlen(skb);
4271
4272         /* We are running in BH disabled context with netif_tx_lock
4273          * and TX reclaim runs via tp->napi.poll inside of a software
4274          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4275          * no IRQ context deadlocks to worry about either.  Rejoice!
4276          */
4277         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4278                 if (!netif_queue_stopped(dev)) {
4279                         netif_stop_queue(dev);
4280
4281                         /* This is a hard error, log it. */
4282                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4283                                "queue awake!\n", dev->name);
4284                 }
4285                 return NETDEV_TX_BUSY;
4286         }
4287
4288         entry = tp->tx_prod;
4289         base_flags = 0;
4290         if (skb->ip_summed == CHECKSUM_PARTIAL)
4291                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4292         mss = 0;
4293         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4294                 struct iphdr *iph;
4295                 int tcp_opt_len, ip_tcp_len, hdr_len;
4296
4297                 if (skb_header_cloned(skb) &&
4298                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4299                         dev_kfree_skb(skb);
4300                         goto out_unlock;
4301                 }
4302
4303                 tcp_opt_len = tcp_optlen(skb);
4304                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4305
4306                 hdr_len = ip_tcp_len + tcp_opt_len;
4307                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4308                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4309                         return (tg3_tso_bug(tp, skb));
4310
4311                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4312                                TXD_FLAG_CPU_POST_DMA);
4313
4314                 iph = ip_hdr(skb);
4315                 iph->check = 0;
4316                 iph->tot_len = htons(mss + hdr_len);
4317                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4318                         tcp_hdr(skb)->check = 0;
4319                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4320                 } else
4321                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4322                                                                  iph->daddr, 0,
4323                                                                  IPPROTO_TCP,
4324                                                                  0);
4325
4326                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4327                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4328                         if (tcp_opt_len || iph->ihl > 5) {
4329                                 int tsflags;
4330
4331                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4332                                 mss |= (tsflags << 11);
4333                         }
4334                 } else {
4335                         if (tcp_opt_len || iph->ihl > 5) {
4336                                 int tsflags;
4337
4338                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4339                                 base_flags |= tsflags << 12;
4340                         }
4341                 }
4342         }
4343 #if TG3_VLAN_TAG_USED
4344         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4345                 base_flags |= (TXD_FLAG_VLAN |
4346                                (vlan_tx_tag_get(skb) << 16));
4347 #endif
4348
4349         /* Queue skb data, a.k.a. the main skb fragment. */
4350         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4351
4352         tp->tx_buffers[entry].skb = skb;
4353         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4354
4355         would_hit_hwbug = 0;
4356
4357         if (tg3_4g_overflow_test(mapping, len))
4358                 would_hit_hwbug = 1;
4359
4360         tg3_set_txd(tp, entry, mapping, len, base_flags,
4361                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4362
4363         entry = NEXT_TX(entry);
4364
4365         /* Now loop through additional data fragments, and queue them. */
4366         if (skb_shinfo(skb)->nr_frags > 0) {
4367                 unsigned int i, last;
4368
4369                 last = skb_shinfo(skb)->nr_frags - 1;
4370                 for (i = 0; i <= last; i++) {
4371                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4372
4373                         len = frag->size;
4374                         mapping = pci_map_page(tp->pdev,
4375                                                frag->page,
4376                                                frag->page_offset,
4377                                                len, PCI_DMA_TODEVICE);
4378
4379                         tp->tx_buffers[entry].skb = NULL;
4380                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4381
4382                         if (tg3_4g_overflow_test(mapping, len))
4383                                 would_hit_hwbug = 1;
4384
4385                         if (tg3_40bit_overflow_test(tp, mapping, len))
4386                                 would_hit_hwbug = 1;
4387
4388                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4389                                 tg3_set_txd(tp, entry, mapping, len,
4390                                             base_flags, (i == last)|(mss << 1));
4391                         else
4392                                 tg3_set_txd(tp, entry, mapping, len,
4393                                             base_flags, (i == last));
4394
4395                         entry = NEXT_TX(entry);
4396                 }
4397         }
4398
4399         if (would_hit_hwbug) {
4400                 u32 last_plus_one = entry;
4401                 u32 start;
4402
4403                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4404                 start &= (TG3_TX_RING_SIZE - 1);
4405
4406                 /* If the workaround fails due to memory/mapping
4407                  * failure, silently drop this packet.
4408                  */
4409                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4410                                                 &start, base_flags, mss))
4411                         goto out_unlock;
4412
4413                 entry = start;
4414         }
4415
4416         /* Packets are ready, update Tx producer idx local and on card. */
4417         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4418
4419         tp->tx_prod = entry;
4420         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4421                 netif_stop_queue(dev);
4422                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4423                         netif_wake_queue(tp->dev);
4424         }
4425
4426 out_unlock:
4427         mmiowb();
4428
4429         dev->trans_start = jiffies;
4430
4431         return NETDEV_TX_OK;
4432 }
4433
4434 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4435                                int new_mtu)
4436 {
4437         dev->mtu = new_mtu;
4438
4439         if (new_mtu > ETH_DATA_LEN) {
4440                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4441                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4442                         ethtool_op_set_tso(dev, 0);
4443                 }
4444                 else
4445                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4446         } else {
4447                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4448                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4449                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4450         }
4451 }
4452
4453 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4454 {
4455         struct tg3 *tp = netdev_priv(dev);
4456         int err;
4457
4458         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4459                 return -EINVAL;
4460
4461         if (!netif_running(dev)) {
4462                 /* We'll just catch it later when the
4463                  * device is up'd.
4464                  */
4465                 tg3_set_mtu(dev, tp, new_mtu);
4466                 return 0;
4467         }
4468
4469         tg3_netif_stop(tp);
4470
4471         tg3_full_lock(tp, 1);
4472
4473         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4474
4475         tg3_set_mtu(dev, tp, new_mtu);
4476
4477         err = tg3_restart_hw(tp, 0);
4478
4479         if (!err)
4480                 tg3_netif_start(tp);
4481
4482         tg3_full_unlock(tp);
4483
4484         return err;
4485 }
4486
4487 /* Free up pending packets in all rx/tx rings.
4488  *
4489  * The chip has been shut down and the driver detached from
4490  * the networking, so no interrupts or new tx packets will
4491  * end up in the driver.  tp->{tx,}lock is not held and we are not
4492  * in an interrupt context and thus may sleep.
4493  */
4494 static void tg3_free_rings(struct tg3 *tp)
4495 {
4496         struct ring_info *rxp;
4497         int i;
4498
4499         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4500                 rxp = &tp->rx_std_buffers[i];
4501
4502                 if (rxp->skb == NULL)
4503                         continue;
4504                 pci_unmap_single(tp->pdev,
4505                                  pci_unmap_addr(rxp, mapping),
4506                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4507                                  PCI_DMA_FROMDEVICE);
4508                 dev_kfree_skb_any(rxp->skb);
4509                 rxp->skb = NULL;
4510         }
4511
4512         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4513                 rxp = &tp->rx_jumbo_buffers[i];
4514
4515                 if (rxp->skb == NULL)
4516                         continue;
4517                 pci_unmap_single(tp->pdev,
4518                                  pci_unmap_addr(rxp, mapping),
4519                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4520                                  PCI_DMA_FROMDEVICE);
4521                 dev_kfree_skb_any(rxp->skb);
4522                 rxp->skb = NULL;
4523         }
4524
4525         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4526                 struct tx_ring_info *txp;
4527                 struct sk_buff *skb;
4528                 int j;
4529
4530                 txp = &tp->tx_buffers[i];
4531                 skb = txp->skb;
4532
4533                 if (skb == NULL) {
4534                         i++;
4535                         continue;
4536                 }
4537
4538                 pci_unmap_single(tp->pdev,
4539                                  pci_unmap_addr(txp, mapping),
4540                                  skb_headlen(skb),
4541                                  PCI_DMA_TODEVICE);
4542                 txp->skb = NULL;
4543
4544                 i++;
4545
4546                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4547                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4548                         pci_unmap_page(tp->pdev,
4549                                        pci_unmap_addr(txp, mapping),
4550                                        skb_shinfo(skb)->frags[j].size,
4551                                        PCI_DMA_TODEVICE);
4552                         i++;
4553                 }
4554
4555                 dev_kfree_skb_any(skb);
4556         }
4557 }
4558
4559 /* Initialize tx/rx rings for packet processing.
4560  *
4561  * The chip has been shut down and the driver detached from
4562  * the networking, so no interrupts or new tx packets will
4563  * end up in the driver.  tp->{tx,}lock are held and thus
4564  * we may not sleep.
4565  */
4566 static int tg3_init_rings(struct tg3 *tp)
4567 {
4568         u32 i;
4569
4570         /* Free up all the SKBs. */
4571         tg3_free_rings(tp);
4572
4573         /* Zero out all descriptors. */
4574         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4575         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4576         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4577         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4578
4579         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4580         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4581             (tp->dev->mtu > ETH_DATA_LEN))
4582                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4583
4584         /* Initialize invariants of the rings, we only set this
4585          * stuff once.  This works because the card does not
4586          * write into the rx buffer posting rings.
4587          */
4588         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4589                 struct tg3_rx_buffer_desc *rxd;
4590
4591                 rxd = &tp->rx_std[i];
4592                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4593                         << RXD_LEN_SHIFT;
4594                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4595                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4596                                (i << RXD_OPAQUE_INDEX_SHIFT));
4597         }
4598
4599         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4600                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4601                         struct tg3_rx_buffer_desc *rxd;
4602
4603                         rxd = &tp->rx_jumbo[i];
4604                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4605                                 << RXD_LEN_SHIFT;
4606                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4607                                 RXD_FLAG_JUMBO;
4608                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4609                                (i << RXD_OPAQUE_INDEX_SHIFT));
4610                 }
4611         }
4612
4613         /* Now allocate fresh SKBs for each rx ring. */
4614         for (i = 0; i < tp->rx_pending; i++) {
4615                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4616                         printk(KERN_WARNING PFX
4617                                "%s: Using a smaller RX standard ring, "
4618                                "only %d out of %d buffers were allocated "
4619                                "successfully.\n",
4620                                tp->dev->name, i, tp->rx_pending);
4621                         if (i == 0)
4622                                 return -ENOMEM;
4623                         tp->rx_pending = i;
4624                         break;
4625                 }
4626         }
4627
4628         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4629                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4630                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4631                                              -1, i) < 0) {
4632                                 printk(KERN_WARNING PFX
4633                                        "%s: Using a smaller RX jumbo ring, "
4634                                        "only %d out of %d buffers were "
4635                                        "allocated successfully.\n",
4636                                        tp->dev->name, i, tp->rx_jumbo_pending);
4637                                 if (i == 0) {
4638                                         tg3_free_rings(tp);
4639                                         return -ENOMEM;
4640                                 }
4641                                 tp->rx_jumbo_pending = i;
4642                                 break;
4643                         }
4644                 }
4645         }
4646         return 0;
4647 }
4648
4649 /*
4650  * Must not be invoked with interrupt sources disabled and
4651  * the hardware shutdown down.
4652  */
4653 static void tg3_free_consistent(struct tg3 *tp)
4654 {
4655         kfree(tp->rx_std_buffers);
4656         tp->rx_std_buffers = NULL;
4657         if (tp->rx_std) {
4658                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4659                                     tp->rx_std, tp->rx_std_mapping);
4660                 tp->rx_std = NULL;
4661         }
4662         if (tp->rx_jumbo) {
4663                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4664                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4665                 tp->rx_jumbo = NULL;
4666         }
4667         if (tp->rx_rcb) {
4668                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4669                                     tp->rx_rcb, tp->rx_rcb_mapping);
4670                 tp->rx_rcb = NULL;
4671         }
4672         if (tp->tx_ring) {
4673                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4674                         tp->tx_ring, tp->tx_desc_mapping);
4675                 tp->tx_ring = NULL;
4676         }
4677         if (tp->hw_status) {
4678                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4679                                     tp->hw_status, tp->status_mapping);
4680                 tp->hw_status = NULL;
4681         }
4682         if (tp->hw_stats) {
4683                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4684                                     tp->hw_stats, tp->stats_mapping);
4685                 tp->hw_stats = NULL;
4686         }
4687 }
4688
4689 /*
4690  * Must not be invoked with interrupt sources disabled and
4691  * the hardware shutdown down.  Can sleep.
4692  */
4693 static int tg3_alloc_consistent(struct tg3 *tp)
4694 {
4695         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4696                                       (TG3_RX_RING_SIZE +
4697                                        TG3_RX_JUMBO_RING_SIZE)) +
4698                                      (sizeof(struct tx_ring_info) *
4699                                       TG3_TX_RING_SIZE),
4700                                      GFP_KERNEL);
4701         if (!tp->rx_std_buffers)
4702                 return -ENOMEM;
4703
4704         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4705         tp->tx_buffers = (struct tx_ring_info *)
4706                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4707
4708         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4709                                           &tp->rx_std_mapping);
4710         if (!tp->rx_std)
4711                 goto err_out;
4712
4713         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4714                                             &tp->rx_jumbo_mapping);
4715
4716         if (!tp->rx_jumbo)
4717                 goto err_out;
4718
4719         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4720                                           &tp->rx_rcb_mapping);
4721         if (!tp->rx_rcb)
4722                 goto err_out;
4723
4724         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4725                                            &tp->tx_desc_mapping);
4726         if (!tp->tx_ring)
4727                 goto err_out;
4728
4729         tp->hw_status = pci_alloc_consistent(tp->pdev,
4730                                              TG3_HW_STATUS_SIZE,
4731                                              &tp->status_mapping);
4732         if (!tp->hw_status)
4733                 goto err_out;
4734
4735         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4736                                             sizeof(struct tg3_hw_stats),
4737                                             &tp->stats_mapping);
4738         if (!tp->hw_stats)
4739                 goto err_out;
4740
4741         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4742         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4743
4744         return 0;
4745
4746 err_out:
4747         tg3_free_consistent(tp);
4748         return -ENOMEM;
4749 }
4750
4751 #define MAX_WAIT_CNT 1000
4752
4753 /* To stop a block, clear the enable bit and poll till it
4754  * clears.  tp->lock is held.
4755  */
4756 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4757 {
4758         unsigned int i;
4759         u32 val;
4760
4761         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4762                 switch (ofs) {
4763                 case RCVLSC_MODE:
4764                 case DMAC_MODE:
4765                 case MBFREE_MODE:
4766                 case BUFMGR_MODE:
4767                 case MEMARB_MODE:
4768                         /* We can't enable/disable these bits of the
4769                          * 5705/5750, just say success.
4770                          */
4771                         return 0;
4772
4773                 default:
4774                         break;
4775                 };
4776         }
4777
4778         val = tr32(ofs);
4779         val &= ~enable_bit;
4780         tw32_f(ofs, val);
4781
4782         for (i = 0; i < MAX_WAIT_CNT; i++) {
4783                 udelay(100);
4784                 val = tr32(ofs);
4785                 if ((val & enable_bit) == 0)
4786                         break;
4787         }
4788
4789         if (i == MAX_WAIT_CNT && !silent) {
4790                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4791                        "ofs=%lx enable_bit=%x\n",
4792                        ofs, enable_bit);
4793                 return -ENODEV;
4794         }
4795
4796         return 0;
4797 }
4798
4799 /* tp->lock is held. */
4800 static int tg3_abort_hw(struct tg3 *tp, int silent)
4801 {
4802         int i, err;
4803
4804         tg3_disable_ints(tp);
4805
4806         tp->rx_mode &= ~RX_MODE_ENABLE;
4807         tw32_f(MAC_RX_MODE, tp->rx_mode);
4808         udelay(10);
4809
4810         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4811         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4812         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4813         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4814         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4815         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4816
4817         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4818         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4819         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4820         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4821         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4822         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4823         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4824
4825         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4826         tw32_f(MAC_MODE, tp->mac_mode);
4827         udelay(40);
4828
4829         tp->tx_mode &= ~TX_MODE_ENABLE;
4830         tw32_f(MAC_TX_MODE, tp->tx_mode);
4831
4832         for (i = 0; i < MAX_WAIT_CNT; i++) {
4833                 udelay(100);
4834                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4835                         break;
4836         }
4837         if (i >= MAX_WAIT_CNT) {
4838                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4839                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4840                        tp->dev->name, tr32(MAC_TX_MODE));
4841                 err |= -ENODEV;
4842         }
4843
4844         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4845         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4846         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4847
4848         tw32(FTQ_RESET, 0xffffffff);
4849         tw32(FTQ_RESET, 0x00000000);
4850
4851         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4852         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4853
4854         if (tp->hw_status)
4855                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4856         if (tp->hw_stats)
4857                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4858
4859         return err;
4860 }
4861
4862 /* tp->lock is held. */
4863 static int tg3_nvram_lock(struct tg3 *tp)
4864 {
4865         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4866                 int i;
4867
4868                 if (tp->nvram_lock_cnt == 0) {
4869                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4870                         for (i = 0; i < 8000; i++) {
4871                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4872                                         break;
4873                                 udelay(20);
4874                         }
4875                         if (i == 8000) {
4876                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4877                                 return -ENODEV;
4878                         }
4879                 }
4880                 tp->nvram_lock_cnt++;
4881         }
4882         return 0;
4883 }
4884
4885 /* tp->lock is held. */
4886 static void tg3_nvram_unlock(struct tg3 *tp)
4887 {
4888         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4889                 if (tp->nvram_lock_cnt > 0)
4890                         tp->nvram_lock_cnt--;
4891                 if (tp->nvram_lock_cnt == 0)
4892                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4893         }
4894 }
4895
4896 /* tp->lock is held. */
4897 static void tg3_enable_nvram_access(struct tg3 *tp)
4898 {
4899         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4900             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4901                 u32 nvaccess = tr32(NVRAM_ACCESS);
4902
4903                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4904         }
4905 }
4906
4907 /* tp->lock is held. */
4908 static void tg3_disable_nvram_access(struct tg3 *tp)
4909 {
4910         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4911             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4912                 u32 nvaccess = tr32(NVRAM_ACCESS);
4913
4914                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4915         }
4916 }
4917
4918 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4919 {
4920         int i;
4921         u32 apedata;
4922
4923         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4924         if (apedata != APE_SEG_SIG_MAGIC)
4925                 return;
4926
4927         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4928         if (apedata != APE_FW_STATUS_READY)
4929                 return;
4930
4931         /* Wait for up to 1 millisecond for APE to service previous event. */
4932         for (i = 0; i < 10; i++) {
4933                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4934                         return;
4935
4936                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4937
4938                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4939                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4940                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4941
4942                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4943
4944                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4945                         break;
4946
4947                 udelay(100);
4948         }
4949
4950         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4951                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4952 }
4953
4954 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4955 {
4956         u32 event;
4957         u32 apedata;
4958
4959         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4960                 return;
4961
4962         switch (kind) {
4963                 case RESET_KIND_INIT:
4964                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4965                                         APE_HOST_SEG_SIG_MAGIC);
4966                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4967                                         APE_HOST_SEG_LEN_MAGIC);
4968                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4969                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4970                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4971                                         APE_HOST_DRIVER_ID_MAGIC);
4972                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4973                                         APE_HOST_BEHAV_NO_PHYLOCK);
4974
4975                         event = APE_EVENT_STATUS_STATE_START;
4976                         break;
4977                 case RESET_KIND_SHUTDOWN:
4978                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4979                         break;
4980                 case RESET_KIND_SUSPEND:
4981                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4982                         break;
4983                 default:
4984                         return;
4985         }
4986
4987         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4988
4989         tg3_ape_send_event(tp, event);
4990 }
4991
4992 /* tp->lock is held. */
4993 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4994 {
4995         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4996                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4997
4998         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4999                 switch (kind) {
5000                 case RESET_KIND_INIT:
5001                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5002                                       DRV_STATE_START);
5003                         break;
5004
5005                 case RESET_KIND_SHUTDOWN:
5006                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5007                                       DRV_STATE_UNLOAD);
5008                         break;
5009
5010                 case RESET_KIND_SUSPEND:
5011                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5012                                       DRV_STATE_SUSPEND);
5013                         break;
5014
5015                 default:
5016                         break;
5017                 };
5018         }
5019
5020         if (kind == RESET_KIND_INIT ||
5021             kind == RESET_KIND_SUSPEND)
5022                 tg3_ape_driver_state_change(tp, kind);
5023 }
5024
5025 /* tp->lock is held. */
5026 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5027 {
5028         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5029                 switch (kind) {
5030                 case RESET_KIND_INIT:
5031                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5032                                       DRV_STATE_START_DONE);
5033                         break;
5034
5035                 case RESET_KIND_SHUTDOWN:
5036                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5037                                       DRV_STATE_UNLOAD_DONE);
5038                         break;
5039
5040                 default:
5041                         break;
5042                 };
5043         }
5044
5045         if (kind == RESET_KIND_SHUTDOWN)
5046                 tg3_ape_driver_state_change(tp, kind);
5047 }
5048
5049 /* tp->lock is held. */
5050 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5051 {
5052         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5053                 switch (kind) {
5054                 case RESET_KIND_INIT:
5055                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5056                                       DRV_STATE_START);
5057                         break;
5058
5059                 case RESET_KIND_SHUTDOWN:
5060                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5061                                       DRV_STATE_UNLOAD);
5062                         break;
5063
5064                 case RESET_KIND_SUSPEND:
5065                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5066                                       DRV_STATE_SUSPEND);
5067                         break;
5068
5069                 default:
5070                         break;
5071                 };
5072         }
5073 }
5074
5075 static int tg3_poll_fw(struct tg3 *tp)
5076 {
5077         int i;
5078         u32 val;
5079
5080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5081                 /* Wait up to 20ms for init done. */
5082                 for (i = 0; i < 200; i++) {
5083                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5084                                 return 0;
5085                         udelay(100);
5086                 }
5087                 return -ENODEV;
5088         }
5089
5090         /* Wait for firmware initialization to complete. */
5091         for (i = 0; i < 100000; i++) {
5092                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5093                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5094                         break;
5095                 udelay(10);
5096         }
5097
5098         /* Chip might not be fitted with firmware.  Some Sun onboard
5099          * parts are configured like that.  So don't signal the timeout
5100          * of the above loop as an error, but do report the lack of
5101          * running firmware once.
5102          */
5103         if (i >= 100000 &&
5104             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5105                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5106
5107                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5108                        tp->dev->name);
5109         }
5110
5111         return 0;
5112 }
5113
5114 /* Save PCI command register before chip reset */
5115 static void tg3_save_pci_state(struct tg3 *tp)
5116 {
5117         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5118 }
5119
5120 /* Restore PCI state after chip reset */
5121 static void tg3_restore_pci_state(struct tg3 *tp)
5122 {
5123         u32 val;
5124
5125         /* Re-enable indirect register accesses. */
5126         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5127                                tp->misc_host_ctrl);
5128
5129         /* Set MAX PCI retry to zero. */
5130         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5131         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5132             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5133                 val |= PCISTATE_RETRY_SAME_DMA;
5134         /* Allow reads and writes to the APE register and memory space. */
5135         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5136                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5137                        PCISTATE_ALLOW_APE_SHMEM_WR;
5138         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5139
5140         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5141
5142         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5143                 pcie_set_readrq(tp->pdev, 4096);
5144         else {
5145                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5146                                       tp->pci_cacheline_sz);
5147                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5148                                       tp->pci_lat_timer);
5149         }
5150
5151         /* Make sure PCI-X relaxed ordering bit is clear. */
5152         if (tp->pcix_cap) {
5153                 u16 pcix_cmd;
5154
5155                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5156                                      &pcix_cmd);
5157                 pcix_cmd &= ~PCI_X_CMD_ERO;
5158                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5159                                       pcix_cmd);
5160         }
5161
5162         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5163
5164                 /* Chip reset on 5780 will reset MSI enable bit,
5165                  * so need to restore it.
5166                  */
5167                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5168                         u16 ctrl;
5169
5170                         pci_read_config_word(tp->pdev,
5171                                              tp->msi_cap + PCI_MSI_FLAGS,
5172                                              &ctrl);
5173                         pci_write_config_word(tp->pdev,
5174                                               tp->msi_cap + PCI_MSI_FLAGS,
5175                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5176                         val = tr32(MSGINT_MODE);
5177                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5178                 }
5179         }
5180 }
5181
5182 static void tg3_stop_fw(struct tg3 *);
5183
5184 /* tp->lock is held. */
5185 static int tg3_chip_reset(struct tg3 *tp)
5186 {
5187         u32 val;
5188         void (*write_op)(struct tg3 *, u32, u32);
5189         int err;
5190
5191         tg3_nvram_lock(tp);
5192
5193         /* No matching tg3_nvram_unlock() after this because
5194          * chip reset below will undo the nvram lock.
5195          */
5196         tp->nvram_lock_cnt = 0;
5197
5198         /* GRC_MISC_CFG core clock reset will clear the memory
5199          * enable bit in PCI register 4 and the MSI enable bit
5200          * on some chips, so we save relevant registers here.
5201          */
5202         tg3_save_pci_state(tp);
5203
5204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5205             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5206             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5207             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5208             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5209                 tw32(GRC_FASTBOOT_PC, 0);
5210
5211         /*
5212          * We must avoid the readl() that normally takes place.
5213          * It locks machines, causes machine checks, and other
5214          * fun things.  So, temporarily disable the 5701
5215          * hardware workaround, while we do the reset.
5216          */
5217         write_op = tp->write32;
5218         if (write_op == tg3_write_flush_reg32)
5219                 tp->write32 = tg3_write32;
5220
5221         /* Prevent the irq handler from reading or writing PCI registers
5222          * during chip reset when the memory enable bit in the PCI command
5223          * register may be cleared.  The chip does not generate interrupt
5224          * at this time, but the irq handler may still be called due to irq
5225          * sharing or irqpoll.
5226          */
5227         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5228         if (tp->hw_status) {
5229                 tp->hw_status->status = 0;
5230                 tp->hw_status->status_tag = 0;
5231         }
5232         tp->last_tag = 0;
5233         smp_mb();
5234         synchronize_irq(tp->pdev->irq);
5235
5236         /* do the reset */
5237         val = GRC_MISC_CFG_CORECLK_RESET;
5238
5239         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5240                 if (tr32(0x7e2c) == 0x60) {
5241                         tw32(0x7e2c, 0x20);
5242                 }
5243                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5244                         tw32(GRC_MISC_CFG, (1 << 29));
5245                         val |= (1 << 29);
5246                 }
5247         }
5248
5249         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5250                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5251                 tw32(GRC_VCPU_EXT_CTRL,
5252                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5253         }
5254
5255         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5256                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5257         tw32(GRC_MISC_CFG, val);
5258
5259         /* restore 5701 hardware bug workaround write method */
5260         tp->write32 = write_op;
5261
5262         /* Unfortunately, we have to delay before the PCI read back.
5263          * Some 575X chips even will not respond to a PCI cfg access
5264          * when the reset command is given to the chip.
5265          *
5266          * How do these hardware designers expect things to work
5267          * properly if the PCI write is posted for a long period
5268          * of time?  It is always necessary to have some method by
5269          * which a register read back can occur to push the write
5270          * out which does the reset.
5271          *
5272          * For most tg3 variants the trick below was working.
5273          * Ho hum...
5274          */
5275         udelay(120);
5276
5277         /* Flush PCI posted writes.  The normal MMIO registers
5278          * are inaccessible at this time so this is the only
5279          * way to make this reliably (actually, this is no longer
5280          * the case, see above).  I tried to use indirect
5281          * register read/write but this upset some 5701 variants.
5282          */
5283         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5284
5285         udelay(120);
5286
5287         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5288                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5289                         int i;
5290                         u32 cfg_val;
5291
5292                         /* Wait for link training to complete.  */
5293                         for (i = 0; i < 5000; i++)
5294                                 udelay(100);
5295
5296                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5297                         pci_write_config_dword(tp->pdev, 0xc4,
5298                                                cfg_val | (1 << 15));
5299                 }
5300                 /* Set PCIE max payload size and clear error status.  */
5301                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5302         }
5303
5304         tg3_restore_pci_state(tp);
5305
5306         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5307
5308         val = 0;
5309         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5310                 val = tr32(MEMARB_MODE);
5311         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5312
5313         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5314                 tg3_stop_fw(tp);
5315                 tw32(0x5000, 0x400);
5316         }
5317
5318         tw32(GRC_MODE, tp->grc_mode);
5319
5320         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5321                 val = tr32(0xc4);
5322
5323                 tw32(0xc4, val | (1 << 15));
5324         }
5325
5326         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5327             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5328                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5329                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5330                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5331                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5332         }
5333
5334         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5335                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5336                 tw32_f(MAC_MODE, tp->mac_mode);
5337         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5338                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5339                 tw32_f(MAC_MODE, tp->mac_mode);
5340         } else
5341                 tw32_f(MAC_MODE, 0);
5342         udelay(40);
5343
5344         err = tg3_poll_fw(tp);
5345         if (err)
5346                 return err;
5347
5348         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5349             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5350                 val = tr32(0x7c00);
5351
5352                 tw32(0x7c00, val | (1 << 25));
5353         }
5354
5355         /* Reprobe ASF enable state.  */
5356         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5357         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5358         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5359         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5360                 u32 nic_cfg;
5361
5362                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5363                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5364                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5365                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5366                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5367                 }
5368         }
5369
5370         return 0;
5371 }
5372
5373 /* tp->lock is held. */
5374 static void tg3_stop_fw(struct tg3 *tp)
5375 {
5376         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5377            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5378                 u32 val;
5379                 int i;
5380
5381                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5382                 val = tr32(GRC_RX_CPU_EVENT);
5383                 val |= (1 << 14);
5384                 tw32(GRC_RX_CPU_EVENT, val);
5385
5386                 /* Wait for RX cpu to ACK the event.  */
5387                 for (i = 0; i < 100; i++) {
5388                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5389                                 break;
5390                         udelay(1);
5391                 }
5392         }
5393 }
5394
5395 /* tp->lock is held. */
5396 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5397 {
5398         int err;
5399
5400         tg3_stop_fw(tp);
5401
5402         tg3_write_sig_pre_reset(tp, kind);
5403
5404         tg3_abort_hw(tp, silent);
5405         err = tg3_chip_reset(tp);
5406
5407         tg3_write_sig_legacy(tp, kind);
5408         tg3_write_sig_post_reset(tp, kind);
5409
5410         if (err)
5411                 return err;
5412
5413         return 0;
5414 }
5415
5416 #define TG3_FW_RELEASE_MAJOR    0x0
5417 #define TG3_FW_RELASE_MINOR     0x0
5418 #define TG3_FW_RELEASE_FIX      0x0
5419 #define TG3_FW_START_ADDR       0x08000000
5420 #define TG3_FW_TEXT_ADDR        0x08000000
5421 #define TG3_FW_TEXT_LEN         0x9c0
5422 #define TG3_FW_RODATA_ADDR      0x080009c0
5423 #define TG3_FW_RODATA_LEN       0x60
5424 #define TG3_FW_DATA_ADDR        0x08000a40
5425 #define TG3_FW_DATA_LEN         0x20
5426 #define TG3_FW_SBSS_ADDR        0x08000a60
5427 #define TG3_FW_SBSS_LEN         0xc
5428 #define TG3_FW_BSS_ADDR         0x08000a70
5429 #define TG3_FW_BSS_LEN          0x10
5430
5431 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5432         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5433         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5434         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5435         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5436         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5437         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5438         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5439         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5440         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5441         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5442         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5443         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5444         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5445         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5446         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5447         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5448         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5449         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5450         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5451         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5452         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5453         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5454         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5455         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5456         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5457         0, 0, 0, 0, 0, 0,
5458         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5459         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5460         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5461         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5462         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5463         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5464         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5465         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5466         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5467         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5468         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5469         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5470         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5471         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5472         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5473         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5474         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5475         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5476         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5477         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5478         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5479         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5480         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5481         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5482         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5483         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5484         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5485         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5486         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5487         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5488         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5489         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5490         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5491         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5492         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5493         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5494         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5495         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5496         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5497         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5498         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5499         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5500         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5501         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5502         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5503         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5504         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5505         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5506         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5507         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5508         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5509         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5510         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5511         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5512         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5513         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5514         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5515         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5516         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5517         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5518         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5519         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5520         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5521         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5522         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5523 };
5524
5525 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5526         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5527         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5528         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5529         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5530         0x00000000
5531 };
5532
5533 #if 0 /* All zeros, don't eat up space with it. */
5534 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5535         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5536         0x00000000, 0x00000000, 0x00000000, 0x00000000
5537 };
5538 #endif
5539
5540 #define RX_CPU_SCRATCH_BASE     0x30000
5541 #define RX_CPU_SCRATCH_SIZE     0x04000
5542 #define TX_CPU_SCRATCH_BASE     0x34000
5543 #define TX_CPU_SCRATCH_SIZE     0x04000
5544
5545 /* tp->lock is held. */
5546 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5547 {
5548         int i;
5549
5550         BUG_ON(offset == TX_CPU_BASE &&
5551             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5552
5553         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5554                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5555
5556                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5557                 return 0;
5558         }
5559         if (offset == RX_CPU_BASE) {
5560                 for (i = 0; i < 10000; i++) {
5561                         tw32(offset + CPU_STATE, 0xffffffff);
5562                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5563                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5564                                 break;
5565                 }
5566
5567                 tw32(offset + CPU_STATE, 0xffffffff);
5568                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5569                 udelay(10);
5570         } else {
5571                 for (i = 0; i < 10000; i++) {
5572                         tw32(offset + CPU_STATE, 0xffffffff);
5573                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5574                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5575                                 break;
5576                 }
5577         }
5578
5579         if (i >= 10000) {
5580                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5581                        "and %s CPU\n",
5582                        tp->dev->name,
5583                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5584                 return -ENODEV;
5585         }
5586
5587         /* Clear firmware's nvram arbitration. */
5588         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5589                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5590         return 0;
5591 }
5592
5593 struct fw_info {
5594         unsigned int text_base;
5595         unsigned int text_len;
5596         const u32 *text_data;
5597         unsigned int rodata_base;
5598         unsigned int rodata_len;
5599         const u32 *rodata_data;
5600         unsigned int data_base;
5601         unsigned int data_len;
5602         const u32 *data_data;
5603 };
5604
5605 /* tp->lock is held. */
5606 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5607                                  int cpu_scratch_size, struct fw_info *info)
5608 {
5609         int err, lock_err, i;
5610         void (*write_op)(struct tg3 *, u32, u32);
5611
5612         if (cpu_base == TX_CPU_BASE &&
5613             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5614                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5615                        "TX cpu firmware on %s which is 5705.\n",
5616                        tp->dev->name);
5617                 return -EINVAL;
5618         }
5619
5620         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5621                 write_op = tg3_write_mem;
5622         else
5623                 write_op = tg3_write_indirect_reg32;
5624
5625         /* It is possible that bootcode is still loading at this point.
5626          * Get the nvram lock first before halting the cpu.
5627          */
5628         lock_err = tg3_nvram_lock(tp);
5629         err = tg3_halt_cpu(tp, cpu_base);
5630         if (!lock_err)
5631                 tg3_nvram_unlock(tp);
5632         if (err)
5633                 goto out;
5634
5635         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5636                 write_op(tp, cpu_scratch_base + i, 0);
5637         tw32(cpu_base + CPU_STATE, 0xffffffff);
5638         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5639         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5640                 write_op(tp, (cpu_scratch_base +
5641                               (info->text_base & 0xffff) +
5642                               (i * sizeof(u32))),
5643                          (info->text_data ?
5644                           info->text_data[i] : 0));
5645         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5646                 write_op(tp, (cpu_scratch_base +
5647                               (info->rodata_base & 0xffff) +
5648                               (i * sizeof(u32))),
5649                          (info->rodata_data ?
5650                           info->rodata_data[i] : 0));
5651         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5652                 write_op(tp, (cpu_scratch_base +
5653                               (info->data_base & 0xffff) +
5654                               (i * sizeof(u32))),
5655                          (info->data_data ?
5656                           info->data_data[i] : 0));
5657
5658         err = 0;
5659
5660 out:
5661         return err;
5662 }
5663
5664 /* tp->lock is held. */
5665 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5666 {
5667         struct fw_info info;
5668         int err, i;
5669
5670         info.text_base = TG3_FW_TEXT_ADDR;
5671         info.text_len = TG3_FW_TEXT_LEN;
5672         info.text_data = &tg3FwText[0];
5673         info.rodata_base = TG3_FW_RODATA_ADDR;
5674         info.rodata_len = TG3_FW_RODATA_LEN;
5675         info.rodata_data = &tg3FwRodata[0];
5676         info.data_base = TG3_FW_DATA_ADDR;
5677         info.data_len = TG3_FW_DATA_LEN;
5678         info.data_data = NULL;
5679
5680         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5681                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5682                                     &info);
5683         if (err)
5684                 return err;
5685
5686         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5687                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5688                                     &info);
5689         if (err)
5690                 return err;
5691
5692         /* Now startup only the RX cpu. */
5693         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5694         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5695
5696         for (i = 0; i < 5; i++) {
5697                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5698                         break;
5699                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5700                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5701                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5702                 udelay(1000);
5703         }
5704         if (i >= 5) {
5705                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5706                        "to set RX CPU PC, is %08x should be %08x\n",
5707                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5708                        TG3_FW_TEXT_ADDR);
5709                 return -ENODEV;
5710         }
5711         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5712         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5713
5714         return 0;
5715 }
5716
5717
5718 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5719 #define TG3_TSO_FW_RELASE_MINOR         0x6
5720 #define TG3_TSO_FW_RELEASE_FIX          0x0
5721 #define TG3_TSO_FW_START_ADDR           0x08000000
5722 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5723 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5724 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5725 #define TG3_TSO_FW_RODATA_LEN           0x60
5726 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5727 #define TG3_TSO_FW_DATA_LEN             0x30
5728 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5729 #define TG3_TSO_FW_SBSS_LEN             0x2c
5730 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5731 #define TG3_TSO_FW_BSS_LEN              0x894
5732
5733 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5734         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5735         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5736         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5737         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5738         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5739         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5740         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5741         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5742         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5743         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5744         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5745         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5746         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5747         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5748         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5749         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5750         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5751         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5752         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5753         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5754         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5755         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5756         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5757         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5758         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5759         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5760         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5761         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5762         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5763         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5764         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5765         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5766         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5767         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5768         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5769         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5770         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5771         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5772         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5773         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5774         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5775         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5776         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5777         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5778         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5779         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5780         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5781         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5782         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5783         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5784         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5785         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5786         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5787         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5788         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5789         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5790         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5791         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5792         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5793         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5794         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5795         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5796         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5797         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5798         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5799         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5800         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5801         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5802         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5803         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5804         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5805         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5806         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5807         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5808         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5809         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5810         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5811         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5812         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5813         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5814         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5815         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5816         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5817         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5818         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5819         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5820         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5821         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5822         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5823         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5824         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5825         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5826         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5827         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5828         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5829         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5830         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5831         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5832         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5833         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5834         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5835         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5836         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5837         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5838         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5839         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5840         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5841         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5842         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5843         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5844         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5845         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5846         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5847         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5848         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5849         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5850         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5851         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5852         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5853         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5854         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5855         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5856         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5857         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5858         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5859         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5860         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5861         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5862         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5863         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5864         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5865         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5866         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5867         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5868         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5869         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5870         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5871         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5872         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5873         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5874         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5875         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5876         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5877         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5878         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5879         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5880         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5881         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5882         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5883         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5884         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5885         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5886         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5887         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5888         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5889         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5890         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5891         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5892         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5893         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5894         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5895         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5896         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5897         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5898         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5899         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5900         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5901         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5902         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5903         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5904         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5905         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5906         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5907         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5908         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5909         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5910         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5911         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5912         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5913         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5914         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5915         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5916         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5917         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5918         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5919         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5920         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5921         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5922         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5923         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5924         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5925         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5926         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5927         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5928         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5929         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5930         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5931         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5932         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5933         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5934         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5935         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5936         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5937         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5938         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5939         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5940         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5941         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5942         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5943         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5944         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5945         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5946         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5947         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5948         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5949         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5950         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5951         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5952         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5953         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5954         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5955         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5956         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5957         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5958         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5959         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5960         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5961         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5962         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5963         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5964         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5965         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5966         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5967         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5968         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5969         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5970         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5971         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5972         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5973         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5974         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5975         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5976         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5977         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5978         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5979         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5980         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5981         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5982         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5983         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5984         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5985         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5986         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5987         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5988         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5989         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5990         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5991         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5992         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5993         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5994         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5995         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5996         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5997         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5998         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5999         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6000         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6001         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6002         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6003         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6004         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6005         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6006         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6007         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6008         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6009         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6010         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6011         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6012         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6013         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6014         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6015         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6016         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6017         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6018 };
6019
6020 static const u32 tg3TsoFwRodata[] = {
6021         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6022         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6023         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6024         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6025         0x00000000,
6026 };
6027
6028 static const u32 tg3TsoFwData[] = {
6029         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6030         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6031         0x00000000,
6032 };
6033
6034 /* 5705 needs a special version of the TSO firmware.  */
6035 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6036 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6037 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6038 #define TG3_TSO5_FW_START_ADDR          0x00010000
6039 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6040 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6041 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6042 #define TG3_TSO5_FW_RODATA_LEN          0x50
6043 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6044 #define TG3_TSO5_FW_DATA_LEN            0x20
6045 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6046 #define TG3_TSO5_FW_SBSS_LEN            0x28
6047 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6048 #define TG3_TSO5_FW_BSS_LEN             0x88
6049
6050 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6051         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6052         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6053         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6054         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6055         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6056         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6057         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6058         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6059         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6060         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6061         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6062         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6063         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6064         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6065         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6066         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6067         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6068         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6069         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6070         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6071         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6072         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6073         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6074         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6075         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6076         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6077         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6078         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6079         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6080         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6081         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6082         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6083         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6084         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6085         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6086         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6087         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6088         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6089         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6090         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6091         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6092         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6093         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6094         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6095         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6096         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6097         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6098         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6099         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6100         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6101         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6102         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6103         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6104         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6105         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6106         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6107         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6108         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6109         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6110         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6111         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6112         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6113         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6114         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6115         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6116         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6117         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6118         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6119         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6120         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6121         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6122         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6123         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6124         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6125         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6126         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6127         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6128         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6129         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6130         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6131         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6132         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6133         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6134         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6135         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6136         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6137         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6138         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6139         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6140         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6141         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6142         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6143         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6144         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6145         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6146         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6147         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6148         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6149         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6150         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6151         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6152         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6153         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6154         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6155         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6156         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6157         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6158         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6159         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6160         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6161         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6162         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6163         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6164         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6165         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6166         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6167         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6168         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6169         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6170         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6171         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6172         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6173         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6174         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6175         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6176         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6177         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6178         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6179         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6180         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6181         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6182         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6183         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6184         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6185         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6186         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6187         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6188         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6189         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6190         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6191         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6192         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6193         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6194         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6195         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6196         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6197         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6198         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6199         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6200         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6201         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6202         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6203         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6204         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6205         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6206         0x00000000, 0x00000000, 0x00000000,
6207 };
6208
6209 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6210         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6211         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6212         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6213         0x00000000, 0x00000000, 0x00000000,
6214 };
6215
6216 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6217         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6218         0x00000000, 0x00000000, 0x00000000,
6219 };
6220
6221 /* tp->lock is held. */
6222 static int tg3_load_tso_firmware(struct tg3 *tp)
6223 {
6224         struct fw_info info;
6225         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6226         int err, i;
6227
6228         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6229                 return 0;
6230
6231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6232                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6233                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6234                 info.text_data = &tg3Tso5FwText[0];
6235                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6236                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6237                 info.rodata_data = &tg3Tso5FwRodata[0];
6238                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6239                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6240                 info.data_data = &tg3Tso5FwData[0];
6241                 cpu_base = RX_CPU_BASE;
6242                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6243                 cpu_scratch_size = (info.text_len +
6244                                     info.rodata_len +
6245                                     info.data_len +
6246                                     TG3_TSO5_FW_SBSS_LEN +
6247                                     TG3_TSO5_FW_BSS_LEN);
6248         } else {
6249                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6250                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6251                 info.text_data = &tg3TsoFwText[0];
6252                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6253                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6254                 info.rodata_data = &tg3TsoFwRodata[0];
6255                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6256                 info.data_len = TG3_TSO_FW_DATA_LEN;
6257                 info.data_data = &tg3TsoFwData[0];
6258                 cpu_base = TX_CPU_BASE;
6259                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6260                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6261         }
6262
6263         err = tg3_load_firmware_cpu(tp, cpu_base,
6264                                     cpu_scratch_base, cpu_scratch_size,
6265                                     &info);
6266         if (err)
6267                 return err;
6268
6269         /* Now startup the cpu. */
6270         tw32(cpu_base + CPU_STATE, 0xffffffff);
6271         tw32_f(cpu_base + CPU_PC,    info.text_base);
6272
6273         for (i = 0; i < 5; i++) {
6274                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6275                         break;
6276                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6277                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6278                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6279                 udelay(1000);
6280         }
6281         if (i >= 5) {
6282                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6283                        "to set CPU PC, is %08x should be %08x\n",
6284                        tp->dev->name, tr32(cpu_base + CPU_PC),
6285                        info.text_base);
6286                 return -ENODEV;
6287         }
6288         tw32(cpu_base + CPU_STATE, 0xffffffff);
6289         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6290         return 0;
6291 }
6292
6293
6294 /* tp->lock is held. */
6295 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6296 {
6297         u32 addr_high, addr_low;
6298         int i;
6299
6300         addr_high = ((tp->dev->dev_addr[0] << 8) |
6301                      tp->dev->dev_addr[1]);
6302         addr_low = ((tp->dev->dev_addr[2] << 24) |
6303                     (tp->dev->dev_addr[3] << 16) |
6304                     (tp->dev->dev_addr[4] <<  8) |
6305                     (tp->dev->dev_addr[5] <<  0));
6306         for (i = 0; i < 4; i++) {
6307                 if (i == 1 && skip_mac_1)
6308                         continue;
6309                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6310                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6311         }
6312
6313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6315                 for (i = 0; i < 12; i++) {
6316                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6317                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6318                 }
6319         }
6320
6321         addr_high = (tp->dev->dev_addr[0] +
6322                      tp->dev->dev_addr[1] +
6323                      tp->dev->dev_addr[2] +
6324                      tp->dev->dev_addr[3] +
6325                      tp->dev->dev_addr[4] +
6326                      tp->dev->dev_addr[5]) &
6327                 TX_BACKOFF_SEED_MASK;
6328         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6329 }
6330
6331 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6332 {
6333         struct tg3 *tp = netdev_priv(dev);
6334         struct sockaddr *addr = p;
6335         int err = 0, skip_mac_1 = 0;
6336
6337         if (!is_valid_ether_addr(addr->sa_data))
6338                 return -EINVAL;
6339
6340         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6341
6342         if (!netif_running(dev))
6343                 return 0;
6344
6345         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6346                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6347
6348                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6349                 addr0_low = tr32(MAC_ADDR_0_LOW);
6350                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6351                 addr1_low = tr32(MAC_ADDR_1_LOW);
6352
6353                 /* Skip MAC addr 1 if ASF is using it. */
6354                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6355                     !(addr1_high == 0 && addr1_low == 0))
6356                         skip_mac_1 = 1;
6357         }
6358         spin_lock_bh(&tp->lock);
6359         __tg3_set_mac_addr(tp, skip_mac_1);
6360         spin_unlock_bh(&tp->lock);
6361
6362         return err;
6363 }
6364
6365 /* tp->lock is held. */
6366 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6367                            dma_addr_t mapping, u32 maxlen_flags,
6368                            u32 nic_addr)
6369 {
6370         tg3_write_mem(tp,
6371                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6372                       ((u64) mapping >> 32));
6373         tg3_write_mem(tp,
6374                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6375                       ((u64) mapping & 0xffffffff));
6376         tg3_write_mem(tp,
6377                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6378                        maxlen_flags);
6379
6380         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6381                 tg3_write_mem(tp,
6382                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6383                               nic_addr);
6384 }
6385
6386 static void __tg3_set_rx_mode(struct net_device *);
6387 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6388 {
6389         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6390         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6391         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6392         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6393         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6394                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6395                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6396         }
6397         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6398         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6399         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6400                 u32 val = ec->stats_block_coalesce_usecs;
6401
6402                 if (!netif_carrier_ok(tp->dev))
6403                         val = 0;
6404
6405                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6406         }
6407 }
6408
6409 /* tp->lock is held. */
6410 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6411 {
6412         u32 val, rdmac_mode;
6413         int i, err, limit;
6414
6415         tg3_disable_ints(tp);
6416
6417         tg3_stop_fw(tp);
6418
6419         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6420
6421         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6422                 tg3_abort_hw(tp, 1);
6423         }
6424
6425         if (reset_phy)
6426                 tg3_phy_reset(tp);
6427
6428         err = tg3_chip_reset(tp);
6429         if (err)
6430                 return err;
6431
6432         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6433
6434         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6435             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6436                 val = tr32(TG3_CPMU_CTRL);
6437                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6438                 tw32(TG3_CPMU_CTRL, val);
6439
6440                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6441                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6442                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6443                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6444
6445                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6446                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6447                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6448                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6449
6450                 val = tr32(TG3_CPMU_HST_ACC);
6451                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6452                 val |= CPMU_HST_ACC_MACCLK_6_25;
6453                 tw32(TG3_CPMU_HST_ACC, val);
6454         }
6455
6456         /* This works around an issue with Athlon chipsets on
6457          * B3 tigon3 silicon.  This bit has no effect on any
6458          * other revision.  But do not set this on PCI Express
6459          * chips and don't even touch the clocks if the CPMU is present.
6460          */
6461         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6462                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6463                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6464                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6465         }
6466
6467         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6468             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6469                 val = tr32(TG3PCI_PCISTATE);
6470                 val |= PCISTATE_RETRY_SAME_DMA;
6471                 tw32(TG3PCI_PCISTATE, val);
6472         }
6473
6474         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6475                 /* Allow reads and writes to the
6476                  * APE register and memory space.
6477                  */
6478                 val = tr32(TG3PCI_PCISTATE);
6479                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6480                        PCISTATE_ALLOW_APE_SHMEM_WR;
6481                 tw32(TG3PCI_PCISTATE, val);
6482         }
6483
6484         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6485                 /* Enable some hw fixes.  */
6486                 val = tr32(TG3PCI_MSI_DATA);
6487                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6488                 tw32(TG3PCI_MSI_DATA, val);
6489         }
6490
6491         /* Descriptor ring init may make accesses to the
6492          * NIC SRAM area to setup the TX descriptors, so we
6493          * can only do this after the hardware has been
6494          * successfully reset.
6495          */
6496         err = tg3_init_rings(tp);
6497         if (err)
6498                 return err;
6499
6500         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6501             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6502                 /* This value is determined during the probe time DMA
6503                  * engine test, tg3_test_dma.
6504                  */
6505                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6506         }
6507
6508         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6509                           GRC_MODE_4X_NIC_SEND_RINGS |
6510                           GRC_MODE_NO_TX_PHDR_CSUM |
6511                           GRC_MODE_NO_RX_PHDR_CSUM);
6512         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6513
6514         /* Pseudo-header checksum is done by hardware logic and not
6515          * the offload processers, so make the chip do the pseudo-
6516          * header checksums on receive.  For transmit it is more
6517          * convenient to do the pseudo-header checksum in software
6518          * as Linux does that on transmit for us in all cases.
6519          */
6520         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6521
6522         tw32(GRC_MODE,
6523              tp->grc_mode |
6524              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6525
6526         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6527         val = tr32(GRC_MISC_CFG);
6528         val &= ~0xff;
6529         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6530         tw32(GRC_MISC_CFG, val);
6531
6532         /* Initialize MBUF/DESC pool. */
6533         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6534                 /* Do nothing.  */
6535         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6536                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6537                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6538                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6539                 else
6540                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6541                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6542                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6543         }
6544         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6545                 int fw_len;
6546
6547                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6548                           TG3_TSO5_FW_RODATA_LEN +
6549                           TG3_TSO5_FW_DATA_LEN +
6550                           TG3_TSO5_FW_SBSS_LEN +
6551                           TG3_TSO5_FW_BSS_LEN);
6552                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6553                 tw32(BUFMGR_MB_POOL_ADDR,
6554                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6555                 tw32(BUFMGR_MB_POOL_SIZE,
6556                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6557         }
6558
6559         if (tp->dev->mtu <= ETH_DATA_LEN) {
6560                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6561                      tp->bufmgr_config.mbuf_read_dma_low_water);
6562                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6563                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6564                 tw32(BUFMGR_MB_HIGH_WATER,
6565                      tp->bufmgr_config.mbuf_high_water);
6566         } else {
6567                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6568                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6569                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6570                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6571                 tw32(BUFMGR_MB_HIGH_WATER,
6572                      tp->bufmgr_config.mbuf_high_water_jumbo);
6573         }
6574         tw32(BUFMGR_DMA_LOW_WATER,
6575              tp->bufmgr_config.dma_low_water);
6576         tw32(BUFMGR_DMA_HIGH_WATER,
6577              tp->bufmgr_config.dma_high_water);
6578
6579         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6580         for (i = 0; i < 2000; i++) {
6581                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6582                         break;
6583                 udelay(10);
6584         }
6585         if (i >= 2000) {
6586                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6587                        tp->dev->name);
6588                 return -ENODEV;
6589         }
6590
6591         /* Setup replenish threshold. */
6592         val = tp->rx_pending / 8;
6593         if (val == 0)
6594                 val = 1;
6595         else if (val > tp->rx_std_max_post)
6596                 val = tp->rx_std_max_post;
6597         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6598                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6599                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6600
6601                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6602                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6603         }
6604
6605         tw32(RCVBDI_STD_THRESH, val);
6606
6607         /* Initialize TG3_BDINFO's at:
6608          *  RCVDBDI_STD_BD:     standard eth size rx ring
6609          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6610          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6611          *
6612          * like so:
6613          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6614          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6615          *                              ring attribute flags
6616          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6617          *
6618          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6619          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6620          *
6621          * The size of each ring is fixed in the firmware, but the location is
6622          * configurable.
6623          */
6624         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6625              ((u64) tp->rx_std_mapping >> 32));
6626         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6627              ((u64) tp->rx_std_mapping & 0xffffffff));
6628         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6629              NIC_SRAM_RX_BUFFER_DESC);
6630
6631         /* Don't even try to program the JUMBO/MINI buffer descriptor
6632          * configs on 5705.
6633          */
6634         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6635                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6636                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6637         } else {
6638                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6639                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6640
6641                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6642                      BDINFO_FLAGS_DISABLED);
6643
6644                 /* Setup replenish threshold. */
6645                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6646
6647                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6648                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6649                              ((u64) tp->rx_jumbo_mapping >> 32));
6650                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6651                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6652                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6653                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6654                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6655                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6656                 } else {
6657                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6658                              BDINFO_FLAGS_DISABLED);
6659                 }
6660
6661         }
6662
6663         /* There is only one send ring on 5705/5750, no need to explicitly
6664          * disable the others.
6665          */
6666         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6667                 /* Clear out send RCB ring in SRAM. */
6668                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6669                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6670                                       BDINFO_FLAGS_DISABLED);
6671         }
6672
6673         tp->tx_prod = 0;
6674         tp->tx_cons = 0;
6675         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6676         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6677
6678         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6679                        tp->tx_desc_mapping,
6680                        (TG3_TX_RING_SIZE <<
6681                         BDINFO_FLAGS_MAXLEN_SHIFT),
6682                        NIC_SRAM_TX_BUFFER_DESC);
6683
6684         /* There is only one receive return ring on 5705/5750, no need
6685          * to explicitly disable the others.
6686          */
6687         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6688                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6689                      i += TG3_BDINFO_SIZE) {
6690                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6691                                       BDINFO_FLAGS_DISABLED);
6692                 }
6693         }
6694
6695         tp->rx_rcb_ptr = 0;
6696         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6697
6698         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6699                        tp->rx_rcb_mapping,
6700                        (TG3_RX_RCB_RING_SIZE(tp) <<
6701                         BDINFO_FLAGS_MAXLEN_SHIFT),
6702                        0);
6703
6704         tp->rx_std_ptr = tp->rx_pending;
6705         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6706                      tp->rx_std_ptr);
6707
6708         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6709                                                 tp->rx_jumbo_pending : 0;
6710         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6711                      tp->rx_jumbo_ptr);
6712
6713         /* Initialize MAC address and backoff seed. */
6714         __tg3_set_mac_addr(tp, 0);
6715
6716         /* MTU + ethernet header + FCS + optional VLAN tag */
6717         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6718
6719         /* The slot time is changed by tg3_setup_phy if we
6720          * run at gigabit with half duplex.
6721          */
6722         tw32(MAC_TX_LENGTHS,
6723              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6724              (6 << TX_LENGTHS_IPG_SHIFT) |
6725              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6726
6727         /* Receive rules. */
6728         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6729         tw32(RCVLPC_CONFIG, 0x0181);
6730
6731         /* Calculate RDMAC_MODE setting early, we need it to determine
6732          * the RCVLPC_STATE_ENABLE mask.
6733          */
6734         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6735                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6736                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6737                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6738                       RDMAC_MODE_LNGREAD_ENAB);
6739
6740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6741                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6742                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6743                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6744
6745         /* If statement applies to 5705 and 5750 PCI devices only */
6746         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6747              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6748             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6749                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6750                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6751                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6752                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6753                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6754                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6755                 }
6756         }
6757
6758         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6759                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6760
6761         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6762                 rdmac_mode |= (1 << 27);
6763
6764         /* Receive/send statistics. */
6765         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6766                 val = tr32(RCVLPC_STATS_ENABLE);
6767                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6768                 tw32(RCVLPC_STATS_ENABLE, val);
6769         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6770                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6771                 val = tr32(RCVLPC_STATS_ENABLE);
6772                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6773                 tw32(RCVLPC_STATS_ENABLE, val);
6774         } else {
6775                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6776         }
6777         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6778         tw32(SNDDATAI_STATSENAB, 0xffffff);
6779         tw32(SNDDATAI_STATSCTRL,
6780              (SNDDATAI_SCTRL_ENABLE |
6781               SNDDATAI_SCTRL_FASTUPD));
6782
6783         /* Setup host coalescing engine. */
6784         tw32(HOSTCC_MODE, 0);
6785         for (i = 0; i < 2000; i++) {
6786                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6787                         break;
6788                 udelay(10);
6789         }
6790
6791         __tg3_set_coalesce(tp, &tp->coal);
6792
6793         /* set status block DMA address */
6794         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6795              ((u64) tp->status_mapping >> 32));
6796         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6797              ((u64) tp->status_mapping & 0xffffffff));
6798
6799         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6800                 /* Status/statistics block address.  See tg3_timer,
6801                  * the tg3_periodic_fetch_stats call there, and
6802                  * tg3_get_stats to see how this works for 5705/5750 chips.
6803                  */
6804                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6805                      ((u64) tp->stats_mapping >> 32));
6806                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6807                      ((u64) tp->stats_mapping & 0xffffffff));
6808                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6809                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6810         }
6811
6812         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6813
6814         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6815         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6816         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6817                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6818
6819         /* Clear statistics/status block in chip, and status block in ram. */
6820         for (i = NIC_SRAM_STATS_BLK;
6821              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6822              i += sizeof(u32)) {
6823                 tg3_write_mem(tp, i, 0);
6824                 udelay(40);
6825         }
6826         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6827
6828         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6829                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6830                 /* reset to prevent losing 1st rx packet intermittently */
6831                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6832                 udelay(10);
6833         }
6834
6835         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6836                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6837         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6838             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6839             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6840                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6841         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6842         udelay(40);
6843
6844         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6845          * If TG3_FLG2_IS_NIC is zero, we should read the
6846          * register to preserve the GPIO settings for LOMs. The GPIOs,
6847          * whether used as inputs or outputs, are set by boot code after
6848          * reset.
6849          */
6850         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6851                 u32 gpio_mask;
6852
6853                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6854                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6855                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6856
6857                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6858                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6859                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6860
6861                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6862                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6863
6864                 tp->grc_local_ctrl &= ~gpio_mask;
6865                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6866
6867                 /* GPIO1 must be driven high for eeprom write protect */
6868                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6869                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6870                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6871         }
6872         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6873         udelay(100);
6874
6875         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6876         tp->last_tag = 0;
6877
6878         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6879                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6880                 udelay(40);
6881         }
6882
6883         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6884                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6885                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6886                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6887                WDMAC_MODE_LNGREAD_ENAB);
6888
6889         /* If statement applies to 5705 and 5750 PCI devices only */
6890         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6891              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6893                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6894                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6895                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6896                         /* nothing */
6897                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6898                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6899                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6900                         val |= WDMAC_MODE_RX_ACCEL;
6901                 }
6902         }
6903
6904         /* Enable host coalescing bug fix */
6905         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6906             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6907             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6908             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6909                 val |= (1 << 29);
6910
6911         tw32_f(WDMAC_MODE, val);
6912         udelay(40);
6913
6914         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6915                 u16 pcix_cmd;
6916
6917                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6918                                      &pcix_cmd);
6919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6920                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6921                         pcix_cmd |= PCI_X_CMD_READ_2K;
6922                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6923                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6924                         pcix_cmd |= PCI_X_CMD_READ_2K;
6925                 }
6926                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6927                                       pcix_cmd);
6928         }
6929
6930         tw32_f(RDMAC_MODE, rdmac_mode);
6931         udelay(40);
6932
6933         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6934         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6935                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6936
6937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6938                 tw32(SNDDATAC_MODE,
6939                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6940         else
6941                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6942
6943         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6944         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6945         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6946         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6947         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6948                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6949         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6950         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6951
6952         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6953                 err = tg3_load_5701_a0_firmware_fix(tp);
6954                 if (err)
6955                         return err;
6956         }
6957
6958         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6959                 err = tg3_load_tso_firmware(tp);
6960                 if (err)
6961                         return err;
6962         }
6963
6964         tp->tx_mode = TX_MODE_ENABLE;
6965         tw32_f(MAC_TX_MODE, tp->tx_mode);
6966         udelay(100);
6967
6968         tp->rx_mode = RX_MODE_ENABLE;
6969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6971                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6972
6973         tw32_f(MAC_RX_MODE, tp->rx_mode);
6974         udelay(10);
6975
6976         if (tp->link_config.phy_is_low_power) {
6977                 tp->link_config.phy_is_low_power = 0;
6978                 tp->link_config.speed = tp->link_config.orig_speed;
6979                 tp->link_config.duplex = tp->link_config.orig_duplex;
6980                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6981         }
6982
6983         tp->mi_mode = MAC_MI_MODE_BASE;
6984         tw32_f(MAC_MI_MODE, tp->mi_mode);
6985         udelay(80);
6986
6987         tw32(MAC_LED_CTRL, tp->led_ctrl);
6988
6989         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6990         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6991                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6992                 udelay(10);
6993         }
6994         tw32_f(MAC_RX_MODE, tp->rx_mode);
6995         udelay(10);
6996
6997         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6998                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6999                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7000                         /* Set drive transmission level to 1.2V  */
7001                         /* only if the signal pre-emphasis bit is not set  */
7002                         val = tr32(MAC_SERDES_CFG);
7003                         val &= 0xfffff000;
7004                         val |= 0x880;
7005                         tw32(MAC_SERDES_CFG, val);
7006                 }
7007                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7008                         tw32(MAC_SERDES_CFG, 0x616000);
7009         }
7010
7011         /* Prevent chip from dropping frames when flow control
7012          * is enabled.
7013          */
7014         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7015
7016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7017             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7018                 /* Use hardware link auto-negotiation */
7019                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7020         }
7021
7022         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7023             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7024                 u32 tmp;
7025
7026                 tmp = tr32(SERDES_RX_CTRL);
7027                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7028                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7029                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7030                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7031         }
7032
7033         err = tg3_setup_phy(tp, 0);
7034         if (err)
7035                 return err;
7036
7037         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7038             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7039                 u32 tmp;
7040
7041                 /* Clear CRC stats. */
7042                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7043                         tg3_writephy(tp, MII_TG3_TEST1,
7044                                      tmp | MII_TG3_TEST1_CRC_EN);
7045                         tg3_readphy(tp, 0x14, &tmp);
7046                 }
7047         }
7048
7049         __tg3_set_rx_mode(tp->dev);
7050
7051         /* Initialize receive rules. */
7052         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7053         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7054         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7055         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7056
7057         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7058             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7059                 limit = 8;
7060         else
7061                 limit = 16;
7062         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7063                 limit -= 4;
7064         switch (limit) {
7065         case 16:
7066                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7067         case 15:
7068                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7069         case 14:
7070                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7071         case 13:
7072                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7073         case 12:
7074                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7075         case 11:
7076                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7077         case 10:
7078                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7079         case 9:
7080                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7081         case 8:
7082                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7083         case 7:
7084                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7085         case 6:
7086                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7087         case 5:
7088                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7089         case 4:
7090                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7091         case 3:
7092                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7093         case 2:
7094         case 1:
7095
7096         default:
7097                 break;
7098         };
7099
7100         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7101                 /* Write our heartbeat update interval to APE. */
7102                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7103                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7104
7105         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7106
7107         return 0;
7108 }
7109
7110 /* Called at device open time to get the chip ready for
7111  * packet processing.  Invoked with tp->lock held.
7112  */
7113 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7114 {
7115         int err;
7116
7117         /* Force the chip into D0. */
7118         err = tg3_set_power_state(tp, PCI_D0);
7119         if (err)
7120                 goto out;
7121
7122         tg3_switch_clocks(tp);
7123
7124         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7125
7126         err = tg3_reset_hw(tp, reset_phy);
7127
7128 out:
7129         return err;
7130 }
7131
7132 #define TG3_STAT_ADD32(PSTAT, REG) \
7133 do {    u32 __val = tr32(REG); \
7134         (PSTAT)->low += __val; \
7135         if ((PSTAT)->low < __val) \
7136                 (PSTAT)->high += 1; \
7137 } while (0)
7138
7139 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7140 {
7141         struct tg3_hw_stats *sp = tp->hw_stats;
7142
7143         if (!netif_carrier_ok(tp->dev))
7144                 return;
7145
7146         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7147         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7148         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7149         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7150         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7151         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7152         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7153         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7154         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7155         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7156         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7157         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7158         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7159
7160         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7161         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7162         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7163         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7164         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7165         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7166         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7167         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7168         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7169         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7170         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7171         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7172         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7173         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7174
7175         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7176         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7177         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7178 }
7179
7180 static void tg3_timer(unsigned long __opaque)
7181 {
7182         struct tg3 *tp = (struct tg3 *) __opaque;
7183
7184         if (tp->irq_sync)
7185                 goto restart_timer;
7186
7187         spin_lock(&tp->lock);
7188
7189         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7190                 /* All of this garbage is because when using non-tagged
7191                  * IRQ status the mailbox/status_block protocol the chip
7192                  * uses with the cpu is race prone.
7193                  */
7194                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7195                         tw32(GRC_LOCAL_CTRL,
7196                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7197                 } else {
7198                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7199                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7200                 }
7201
7202                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7203                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7204                         spin_unlock(&tp->lock);
7205                         schedule_work(&tp->reset_task);
7206                         return;
7207                 }
7208         }
7209
7210         /* This part only runs once per second. */
7211         if (!--tp->timer_counter) {
7212                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7213                         tg3_periodic_fetch_stats(tp);
7214
7215                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7216                         u32 mac_stat;
7217                         int phy_event;
7218
7219                         mac_stat = tr32(MAC_STATUS);
7220
7221                         phy_event = 0;
7222                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7223                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7224                                         phy_event = 1;
7225                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7226                                 phy_event = 1;
7227
7228                         if (phy_event)
7229                                 tg3_setup_phy(tp, 0);
7230                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7231                         u32 mac_stat = tr32(MAC_STATUS);
7232                         int need_setup = 0;
7233
7234                         if (netif_carrier_ok(tp->dev) &&
7235                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7236                                 need_setup = 1;
7237                         }
7238                         if (! netif_carrier_ok(tp->dev) &&
7239                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7240                                          MAC_STATUS_SIGNAL_DET))) {
7241                                 need_setup = 1;
7242                         }
7243                         if (need_setup) {
7244                                 if (!tp->serdes_counter) {
7245                                         tw32_f(MAC_MODE,
7246                                              (tp->mac_mode &
7247                                               ~MAC_MODE_PORT_MODE_MASK));
7248                                         udelay(40);
7249                                         tw32_f(MAC_MODE, tp->mac_mode);
7250                                         udelay(40);
7251                                 }
7252                                 tg3_setup_phy(tp, 0);
7253                         }
7254                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7255                         tg3_serdes_parallel_detect(tp);
7256
7257                 tp->timer_counter = tp->timer_multiplier;
7258         }
7259
7260         /* Heartbeat is only sent once every 2 seconds.
7261          *
7262          * The heartbeat is to tell the ASF firmware that the host
7263          * driver is still alive.  In the event that the OS crashes,
7264          * ASF needs to reset the hardware to free up the FIFO space
7265          * that may be filled with rx packets destined for the host.
7266          * If the FIFO is full, ASF will no longer function properly.
7267          *
7268          * Unintended resets have been reported on real time kernels
7269          * where the timer doesn't run on time.  Netpoll will also have
7270          * same problem.
7271          *
7272          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7273          * to check the ring condition when the heartbeat is expiring
7274          * before doing the reset.  This will prevent most unintended
7275          * resets.
7276          */
7277         if (!--tp->asf_counter) {
7278                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7279                         u32 val;
7280
7281                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7282                                       FWCMD_NICDRV_ALIVE3);
7283                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7284                         /* 5 seconds timeout */
7285                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7286                         val = tr32(GRC_RX_CPU_EVENT);
7287                         val |= (1 << 14);
7288                         tw32(GRC_RX_CPU_EVENT, val);
7289                 }
7290                 tp->asf_counter = tp->asf_multiplier;
7291         }
7292
7293         spin_unlock(&tp->lock);
7294
7295 restart_timer:
7296         tp->timer.expires = jiffies + tp->timer_offset;
7297         add_timer(&tp->timer);
7298 }
7299
7300 static int tg3_request_irq(struct tg3 *tp)
7301 {
7302         irq_handler_t fn;
7303         unsigned long flags;
7304         struct net_device *dev = tp->dev;
7305
7306         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7307                 fn = tg3_msi;
7308                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7309                         fn = tg3_msi_1shot;
7310                 flags = IRQF_SAMPLE_RANDOM;
7311         } else {
7312                 fn = tg3_interrupt;
7313                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7314                         fn = tg3_interrupt_tagged;
7315                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7316         }
7317         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7318 }
7319
7320 static int tg3_test_interrupt(struct tg3 *tp)
7321 {
7322         struct net_device *dev = tp->dev;
7323         int err, i, intr_ok = 0;
7324
7325         if (!netif_running(dev))
7326                 return -ENODEV;
7327
7328         tg3_disable_ints(tp);
7329
7330         free_irq(tp->pdev->irq, dev);
7331
7332         err = request_irq(tp->pdev->irq, tg3_test_isr,
7333                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7334         if (err)
7335                 return err;
7336
7337         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7338         tg3_enable_ints(tp);
7339
7340         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7341                HOSTCC_MODE_NOW);
7342
7343         for (i = 0; i < 5; i++) {
7344                 u32 int_mbox, misc_host_ctrl;
7345
7346                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7347                                         TG3_64BIT_REG_LOW);
7348                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7349
7350                 if ((int_mbox != 0) ||
7351                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7352                         intr_ok = 1;
7353                         break;
7354                 }
7355
7356                 msleep(10);
7357         }
7358
7359         tg3_disable_ints(tp);
7360
7361         free_irq(tp->pdev->irq, dev);
7362
7363         err = tg3_request_irq(tp);
7364
7365         if (err)
7366                 return err;
7367
7368         if (intr_ok)
7369                 return 0;
7370
7371         return -EIO;
7372 }
7373
7374 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7375  * successfully restored
7376  */
7377 static int tg3_test_msi(struct tg3 *tp)
7378 {
7379         struct net_device *dev = tp->dev;
7380         int err;
7381         u16 pci_cmd;
7382
7383         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7384                 return 0;
7385
7386         /* Turn off SERR reporting in case MSI terminates with Master
7387          * Abort.
7388          */
7389         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7390         pci_write_config_word(tp->pdev, PCI_COMMAND,
7391                               pci_cmd & ~PCI_COMMAND_SERR);
7392
7393         err = tg3_test_interrupt(tp);
7394
7395         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7396
7397         if (!err)
7398                 return 0;
7399
7400         /* other failures */
7401         if (err != -EIO)
7402                 return err;
7403
7404         /* MSI test failed, go back to INTx mode */
7405         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7406                "switching to INTx mode. Please report this failure to "
7407                "the PCI maintainer and include system chipset information.\n",
7408                        tp->dev->name);
7409
7410         free_irq(tp->pdev->irq, dev);
7411         pci_disable_msi(tp->pdev);
7412
7413         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7414
7415         err = tg3_request_irq(tp);
7416         if (err)
7417                 return err;
7418
7419         /* Need to reset the chip because the MSI cycle may have terminated
7420          * with Master Abort.
7421          */
7422         tg3_full_lock(tp, 1);
7423
7424         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7425         err = tg3_init_hw(tp, 1);
7426
7427         tg3_full_unlock(tp);
7428
7429         if (err)
7430                 free_irq(tp->pdev->irq, dev);
7431
7432         return err;
7433 }
7434
7435 static int tg3_open(struct net_device *dev)
7436 {
7437         struct tg3 *tp = netdev_priv(dev);
7438         int err;
7439
7440         netif_carrier_off(tp->dev);
7441
7442         tg3_full_lock(tp, 0);
7443
7444         err = tg3_set_power_state(tp, PCI_D0);
7445         if (err) {
7446                 tg3_full_unlock(tp);
7447                 return err;
7448         }
7449
7450         tg3_disable_ints(tp);
7451         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7452
7453         tg3_full_unlock(tp);
7454
7455         /* The placement of this call is tied
7456          * to the setup and use of Host TX descriptors.
7457          */
7458         err = tg3_alloc_consistent(tp);
7459         if (err)
7460                 return err;
7461
7462         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7463                 /* All MSI supporting chips should support tagged
7464                  * status.  Assert that this is the case.
7465                  */
7466                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7467                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7468                                "Not using MSI.\n", tp->dev->name);
7469                 } else if (pci_enable_msi(tp->pdev) == 0) {
7470                         u32 msi_mode;
7471
7472                         msi_mode = tr32(MSGINT_MODE);
7473                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7474                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7475                 }
7476         }
7477         err = tg3_request_irq(tp);
7478
7479         if (err) {
7480                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7481                         pci_disable_msi(tp->pdev);
7482                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7483                 }
7484                 tg3_free_consistent(tp);
7485                 return err;
7486         }
7487
7488         napi_enable(&tp->napi);
7489
7490         tg3_full_lock(tp, 0);
7491
7492         err = tg3_init_hw(tp, 1);
7493         if (err) {
7494                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7495                 tg3_free_rings(tp);
7496         } else {
7497                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7498                         tp->timer_offset = HZ;
7499                 else
7500                         tp->timer_offset = HZ / 10;
7501
7502                 BUG_ON(tp->timer_offset > HZ);
7503                 tp->timer_counter = tp->timer_multiplier =
7504                         (HZ / tp->timer_offset);
7505                 tp->asf_counter = tp->asf_multiplier =
7506                         ((HZ / tp->timer_offset) * 2);
7507
7508                 init_timer(&tp->timer);
7509                 tp->timer.expires = jiffies + tp->timer_offset;
7510                 tp->timer.data = (unsigned long) tp;
7511                 tp->timer.function = tg3_timer;
7512         }
7513
7514         tg3_full_unlock(tp);
7515
7516         if (err) {
7517                 napi_disable(&tp->napi);
7518                 free_irq(tp->pdev->irq, dev);
7519                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7520                         pci_disable_msi(tp->pdev);
7521                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7522                 }
7523                 tg3_free_consistent(tp);
7524                 return err;
7525         }
7526
7527         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7528                 err = tg3_test_msi(tp);
7529
7530                 if (err) {
7531                         tg3_full_lock(tp, 0);
7532
7533                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7534                                 pci_disable_msi(tp->pdev);
7535                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7536                         }
7537                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7538                         tg3_free_rings(tp);
7539                         tg3_free_consistent(tp);
7540
7541                         tg3_full_unlock(tp);
7542
7543                         napi_disable(&tp->napi);
7544
7545                         return err;
7546                 }
7547
7548                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7549                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7550                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7551
7552                                 tw32(PCIE_TRANSACTION_CFG,
7553                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7554                         }
7555                 }
7556         }
7557
7558         tg3_full_lock(tp, 0);
7559
7560         add_timer(&tp->timer);
7561         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7562         tg3_enable_ints(tp);
7563
7564         tg3_full_unlock(tp);
7565
7566         netif_start_queue(dev);
7567
7568         return 0;
7569 }
7570
7571 #if 0
7572 /*static*/ void tg3_dump_state(struct tg3 *tp)
7573 {
7574         u32 val32, val32_2, val32_3, val32_4, val32_5;
7575         u16 val16;
7576         int i;
7577
7578         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7579         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7580         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7581                val16, val32);
7582
7583         /* MAC block */
7584         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7585                tr32(MAC_MODE), tr32(MAC_STATUS));
7586         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7587                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7588         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7589                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7590         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7591                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7592
7593         /* Send data initiator control block */
7594         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7595                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7596         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7597                tr32(SNDDATAI_STATSCTRL));
7598
7599         /* Send data completion control block */
7600         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7601
7602         /* Send BD ring selector block */
7603         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7604                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7605
7606         /* Send BD initiator control block */
7607         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7608                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7609
7610         /* Send BD completion control block */
7611         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7612
7613         /* Receive list placement control block */
7614         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7615                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7616         printk("       RCVLPC_STATSCTRL[%08x]\n",
7617                tr32(RCVLPC_STATSCTRL));
7618
7619         /* Receive data and receive BD initiator control block */
7620         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7621                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7622
7623         /* Receive data completion control block */
7624         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7625                tr32(RCVDCC_MODE));
7626
7627         /* Receive BD initiator control block */
7628         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7629                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7630
7631         /* Receive BD completion control block */
7632         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7633                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7634
7635         /* Receive list selector control block */
7636         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7637                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7638
7639         /* Mbuf cluster free block */
7640         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7641                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7642
7643         /* Host coalescing control block */
7644         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7645                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7646         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7647                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7648                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7649         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7650                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7651                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7652         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7653                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7654         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7655                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7656
7657         /* Memory arbiter control block */
7658         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7659                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7660
7661         /* Buffer manager control block */
7662         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7663                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7664         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7665                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7666         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7667                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7668                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7669                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7670
7671         /* Read DMA control block */
7672         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7673                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7674
7675         /* Write DMA control block */
7676         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7677                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7678
7679         /* DMA completion block */
7680         printk("DEBUG: DMAC_MODE[%08x]\n",
7681                tr32(DMAC_MODE));
7682
7683         /* GRC block */
7684         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7685                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7686         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7687                tr32(GRC_LOCAL_CTRL));
7688
7689         /* TG3_BDINFOs */
7690         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7691                tr32(RCVDBDI_JUMBO_BD + 0x0),
7692                tr32(RCVDBDI_JUMBO_BD + 0x4),
7693                tr32(RCVDBDI_JUMBO_BD + 0x8),
7694                tr32(RCVDBDI_JUMBO_BD + 0xc));
7695         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7696                tr32(RCVDBDI_STD_BD + 0x0),
7697                tr32(RCVDBDI_STD_BD + 0x4),
7698                tr32(RCVDBDI_STD_BD + 0x8),
7699                tr32(RCVDBDI_STD_BD + 0xc));
7700         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7701                tr32(RCVDBDI_MINI_BD + 0x0),
7702                tr32(RCVDBDI_MINI_BD + 0x4),
7703                tr32(RCVDBDI_MINI_BD + 0x8),
7704                tr32(RCVDBDI_MINI_BD + 0xc));
7705
7706         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7707         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7708         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7709         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7710         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7711                val32, val32_2, val32_3, val32_4);
7712
7713         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7714         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7715         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7716         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7717         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7718                val32, val32_2, val32_3, val32_4);
7719
7720         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7721         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7722         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7723         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7724         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7725         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7726                val32, val32_2, val32_3, val32_4, val32_5);
7727
7728         /* SW status block */
7729         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7730                tp->hw_status->status,
7731                tp->hw_status->status_tag,
7732                tp->hw_status->rx_jumbo_consumer,
7733                tp->hw_status->rx_consumer,
7734                tp->hw_status->rx_mini_consumer,
7735                tp->hw_status->idx[0].rx_producer,
7736                tp->hw_status->idx[0].tx_consumer);
7737
7738         /* SW statistics block */
7739         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7740                ((u32 *)tp->hw_stats)[0],
7741                ((u32 *)tp->hw_stats)[1],
7742                ((u32 *)tp->hw_stats)[2],
7743                ((u32 *)tp->hw_stats)[3]);
7744
7745         /* Mailboxes */
7746         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7747                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7748                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7749                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7750                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7751
7752         /* NIC side send descriptors. */
7753         for (i = 0; i < 6; i++) {
7754                 unsigned long txd;
7755
7756                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7757                         + (i * sizeof(struct tg3_tx_buffer_desc));
7758                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7759                        i,
7760                        readl(txd + 0x0), readl(txd + 0x4),
7761                        readl(txd + 0x8), readl(txd + 0xc));
7762         }
7763
7764         /* NIC side RX descriptors. */
7765         for (i = 0; i < 6; i++) {
7766                 unsigned long rxd;
7767
7768                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7769                         + (i * sizeof(struct tg3_rx_buffer_desc));
7770                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7771                        i,
7772                        readl(rxd + 0x0), readl(rxd + 0x4),
7773                        readl(rxd + 0x8), readl(rxd + 0xc));
7774                 rxd += (4 * sizeof(u32));
7775                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7776                        i,
7777                        readl(rxd + 0x0), readl(rxd + 0x4),
7778                        readl(rxd + 0x8), readl(rxd + 0xc));
7779         }
7780
7781         for (i = 0; i < 6; i++) {
7782                 unsigned long rxd;
7783
7784                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7785                         + (i * sizeof(struct tg3_rx_buffer_desc));
7786                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7787                        i,
7788                        readl(rxd + 0x0), readl(rxd + 0x4),
7789                        readl(rxd + 0x8), readl(rxd + 0xc));
7790                 rxd += (4 * sizeof(u32));
7791                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7792                        i,
7793                        readl(rxd + 0x0), readl(rxd + 0x4),
7794                        readl(rxd + 0x8), readl(rxd + 0xc));
7795         }
7796 }
7797 #endif
7798
7799 static struct net_device_stats *tg3_get_stats(struct net_device *);
7800 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7801
7802 static int tg3_close(struct net_device *dev)
7803 {
7804         struct tg3 *tp = netdev_priv(dev);
7805
7806         napi_disable(&tp->napi);
7807         cancel_work_sync(&tp->reset_task);
7808
7809         netif_stop_queue(dev);
7810
7811         del_timer_sync(&tp->timer);
7812
7813         tg3_full_lock(tp, 1);
7814 #if 0
7815         tg3_dump_state(tp);
7816 #endif
7817
7818         tg3_disable_ints(tp);
7819
7820         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7821         tg3_free_rings(tp);
7822         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7823
7824         tg3_full_unlock(tp);
7825
7826         free_irq(tp->pdev->irq, dev);
7827         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7828                 pci_disable_msi(tp->pdev);
7829                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7830         }
7831
7832         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7833                sizeof(tp->net_stats_prev));
7834         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7835                sizeof(tp->estats_prev));
7836
7837         tg3_free_consistent(tp);
7838
7839         tg3_set_power_state(tp, PCI_D3hot);
7840
7841         netif_carrier_off(tp->dev);
7842
7843         return 0;
7844 }
7845
7846 static inline unsigned long get_stat64(tg3_stat64_t *val)
7847 {
7848         unsigned long ret;
7849
7850 #if (BITS_PER_LONG == 32)
7851         ret = val->low;
7852 #else
7853         ret = ((u64)val->high << 32) | ((u64)val->low);
7854 #endif
7855         return ret;
7856 }
7857
7858 static unsigned long calc_crc_errors(struct tg3 *tp)
7859 {
7860         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7861
7862         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7863             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7864              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7865                 u32 val;
7866
7867                 spin_lock_bh(&tp->lock);
7868                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7869                         tg3_writephy(tp, MII_TG3_TEST1,
7870                                      val | MII_TG3_TEST1_CRC_EN);
7871                         tg3_readphy(tp, 0x14, &val);
7872                 } else
7873                         val = 0;
7874                 spin_unlock_bh(&tp->lock);
7875
7876                 tp->phy_crc_errors += val;
7877
7878                 return tp->phy_crc_errors;
7879         }
7880
7881         return get_stat64(&hw_stats->rx_fcs_errors);
7882 }
7883
7884 #define ESTAT_ADD(member) \
7885         estats->member =        old_estats->member + \
7886                                 get_stat64(&hw_stats->member)
7887
7888 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7889 {
7890         struct tg3_ethtool_stats *estats = &tp->estats;
7891         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7892         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7893
7894         if (!hw_stats)
7895                 return old_estats;
7896
7897         ESTAT_ADD(rx_octets);
7898         ESTAT_ADD(rx_fragments);
7899         ESTAT_ADD(rx_ucast_packets);
7900         ESTAT_ADD(rx_mcast_packets);
7901         ESTAT_ADD(rx_bcast_packets);
7902         ESTAT_ADD(rx_fcs_errors);
7903         ESTAT_ADD(rx_align_errors);
7904         ESTAT_ADD(rx_xon_pause_rcvd);
7905         ESTAT_ADD(rx_xoff_pause_rcvd);
7906         ESTAT_ADD(rx_mac_ctrl_rcvd);
7907         ESTAT_ADD(rx_xoff_entered);
7908         ESTAT_ADD(rx_frame_too_long_errors);
7909         ESTAT_ADD(rx_jabbers);
7910         ESTAT_ADD(rx_undersize_packets);
7911         ESTAT_ADD(rx_in_length_errors);
7912         ESTAT_ADD(rx_out_length_errors);
7913         ESTAT_ADD(rx_64_or_less_octet_packets);
7914         ESTAT_ADD(rx_65_to_127_octet_packets);
7915         ESTAT_ADD(rx_128_to_255_octet_packets);
7916         ESTAT_ADD(rx_256_to_511_octet_packets);
7917         ESTAT_ADD(rx_512_to_1023_octet_packets);
7918         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7919         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7920         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7921         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7922         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7923
7924         ESTAT_ADD(tx_octets);
7925         ESTAT_ADD(tx_collisions);
7926         ESTAT_ADD(tx_xon_sent);
7927         ESTAT_ADD(tx_xoff_sent);
7928         ESTAT_ADD(tx_flow_control);
7929         ESTAT_ADD(tx_mac_errors);
7930         ESTAT_ADD(tx_single_collisions);
7931         ESTAT_ADD(tx_mult_collisions);
7932         ESTAT_ADD(tx_deferred);
7933         ESTAT_ADD(tx_excessive_collisions);
7934         ESTAT_ADD(tx_late_collisions);
7935         ESTAT_ADD(tx_collide_2times);
7936         ESTAT_ADD(tx_collide_3times);
7937         ESTAT_ADD(tx_collide_4times);
7938         ESTAT_ADD(tx_collide_5times);
7939         ESTAT_ADD(tx_collide_6times);
7940         ESTAT_ADD(tx_collide_7times);
7941         ESTAT_ADD(tx_collide_8times);
7942         ESTAT_ADD(tx_collide_9times);
7943         ESTAT_ADD(tx_collide_10times);
7944         ESTAT_ADD(tx_collide_11times);
7945         ESTAT_ADD(tx_collide_12times);
7946         ESTAT_ADD(tx_collide_13times);
7947         ESTAT_ADD(tx_collide_14times);
7948         ESTAT_ADD(tx_collide_15times);
7949         ESTAT_ADD(tx_ucast_packets);
7950         ESTAT_ADD(tx_mcast_packets);
7951         ESTAT_ADD(tx_bcast_packets);
7952         ESTAT_ADD(tx_carrier_sense_errors);
7953         ESTAT_ADD(tx_discards);
7954         ESTAT_ADD(tx_errors);
7955
7956         ESTAT_ADD(dma_writeq_full);
7957         ESTAT_ADD(dma_write_prioq_full);
7958         ESTAT_ADD(rxbds_empty);
7959         ESTAT_ADD(rx_discards);
7960         ESTAT_ADD(rx_errors);
7961         ESTAT_ADD(rx_threshold_hit);
7962
7963         ESTAT_ADD(dma_readq_full);
7964         ESTAT_ADD(dma_read_prioq_full);
7965         ESTAT_ADD(tx_comp_queue_full);
7966
7967         ESTAT_ADD(ring_set_send_prod_index);
7968         ESTAT_ADD(ring_status_update);
7969         ESTAT_ADD(nic_irqs);
7970         ESTAT_ADD(nic_avoided_irqs);
7971         ESTAT_ADD(nic_tx_threshold_hit);
7972
7973         return estats;
7974 }
7975
7976 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7977 {
7978         struct tg3 *tp = netdev_priv(dev);
7979         struct net_device_stats *stats = &tp->net_stats;
7980         struct net_device_stats *old_stats = &tp->net_stats_prev;
7981         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7982
7983         if (!hw_stats)
7984                 return old_stats;
7985
7986         stats->rx_packets = old_stats->rx_packets +
7987                 get_stat64(&hw_stats->rx_ucast_packets) +
7988                 get_stat64(&hw_stats->rx_mcast_packets) +
7989                 get_stat64(&hw_stats->rx_bcast_packets);
7990
7991         stats->tx_packets = old_stats->tx_packets +
7992                 get_stat64(&hw_stats->tx_ucast_packets) +
7993                 get_stat64(&hw_stats->tx_mcast_packets) +
7994                 get_stat64(&hw_stats->tx_bcast_packets);
7995
7996         stats->rx_bytes = old_stats->rx_bytes +
7997                 get_stat64(&hw_stats->rx_octets);
7998         stats->tx_bytes = old_stats->tx_bytes +
7999                 get_stat64(&hw_stats->tx_octets);
8000
8001         stats->rx_errors = old_stats->rx_errors +
8002                 get_stat64(&hw_stats->rx_errors);
8003         stats->tx_errors = old_stats->tx_errors +
8004                 get_stat64(&hw_stats->tx_errors) +
8005                 get_stat64(&hw_stats->tx_mac_errors) +
8006                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8007                 get_stat64(&hw_stats->tx_discards);
8008
8009         stats->multicast = old_stats->multicast +
8010                 get_stat64(&hw_stats->rx_mcast_packets);
8011         stats->collisions = old_stats->collisions +
8012                 get_stat64(&hw_stats->tx_collisions);
8013
8014         stats->rx_length_errors = old_stats->rx_length_errors +
8015                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8016                 get_stat64(&hw_stats->rx_undersize_packets);
8017
8018         stats->rx_over_errors = old_stats->rx_over_errors +
8019                 get_stat64(&hw_stats->rxbds_empty);
8020         stats->rx_frame_errors = old_stats->rx_frame_errors +
8021                 get_stat64(&hw_stats->rx_align_errors);
8022         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8023                 get_stat64(&hw_stats->tx_discards);
8024         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8025                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8026
8027         stats->rx_crc_errors = old_stats->rx_crc_errors +
8028                 calc_crc_errors(tp);
8029
8030         stats->rx_missed_errors = old_stats->rx_missed_errors +
8031                 get_stat64(&hw_stats->rx_discards);
8032
8033         return stats;
8034 }
8035
8036 static inline u32 calc_crc(unsigned char *buf, int len)
8037 {
8038         u32 reg;
8039         u32 tmp;
8040         int j, k;
8041
8042         reg = 0xffffffff;
8043
8044         for (j = 0; j < len; j++) {
8045                 reg ^= buf[j];
8046
8047                 for (k = 0; k < 8; k++) {
8048                         tmp = reg & 0x01;
8049
8050                         reg >>= 1;
8051
8052                         if (tmp) {
8053                                 reg ^= 0xedb88320;
8054                         }
8055                 }
8056         }
8057
8058         return ~reg;
8059 }
8060
8061 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8062 {
8063         /* accept or reject all multicast frames */
8064         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8065         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8066         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8067         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8068 }
8069
8070 static void __tg3_set_rx_mode(struct net_device *dev)
8071 {
8072         struct tg3 *tp = netdev_priv(dev);
8073         u32 rx_mode;
8074
8075         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8076                                   RX_MODE_KEEP_VLAN_TAG);
8077
8078         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8079          * flag clear.
8080          */
8081 #if TG3_VLAN_TAG_USED
8082         if (!tp->vlgrp &&
8083             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8084                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8085 #else
8086         /* By definition, VLAN is disabled always in this
8087          * case.
8088          */
8089         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8090                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8091 #endif
8092
8093         if (dev->flags & IFF_PROMISC) {
8094                 /* Promiscuous mode. */
8095                 rx_mode |= RX_MODE_PROMISC;
8096         } else if (dev->flags & IFF_ALLMULTI) {
8097                 /* Accept all multicast. */
8098                 tg3_set_multi (tp, 1);
8099         } else if (dev->mc_count < 1) {
8100                 /* Reject all multicast. */
8101                 tg3_set_multi (tp, 0);
8102         } else {
8103                 /* Accept one or more multicast(s). */
8104                 struct dev_mc_list *mclist;
8105                 unsigned int i;
8106                 u32 mc_filter[4] = { 0, };
8107                 u32 regidx;
8108                 u32 bit;
8109                 u32 crc;
8110
8111                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8112                      i++, mclist = mclist->next) {
8113
8114                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8115                         bit = ~crc & 0x7f;
8116                         regidx = (bit & 0x60) >> 5;
8117                         bit &= 0x1f;
8118                         mc_filter[regidx] |= (1 << bit);
8119                 }
8120
8121                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8122                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8123                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8124                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8125         }
8126
8127         if (rx_mode != tp->rx_mode) {
8128                 tp->rx_mode = rx_mode;
8129                 tw32_f(MAC_RX_MODE, rx_mode);
8130                 udelay(10);
8131         }
8132 }
8133
8134 static void tg3_set_rx_mode(struct net_device *dev)
8135 {
8136         struct tg3 *tp = netdev_priv(dev);
8137
8138         if (!netif_running(dev))
8139                 return;
8140
8141         tg3_full_lock(tp, 0);
8142         __tg3_set_rx_mode(dev);
8143         tg3_full_unlock(tp);
8144 }
8145
8146 #define TG3_REGDUMP_LEN         (32 * 1024)
8147
8148 static int tg3_get_regs_len(struct net_device *dev)
8149 {
8150         return TG3_REGDUMP_LEN;
8151 }
8152
8153 static void tg3_get_regs(struct net_device *dev,
8154                 struct ethtool_regs *regs, void *_p)
8155 {
8156         u32 *p = _p;
8157         struct tg3 *tp = netdev_priv(dev);
8158         u8 *orig_p = _p;
8159         int i;
8160
8161         regs->version = 0;
8162
8163         memset(p, 0, TG3_REGDUMP_LEN);
8164
8165         if (tp->link_config.phy_is_low_power)
8166                 return;
8167
8168         tg3_full_lock(tp, 0);
8169
8170 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8171 #define GET_REG32_LOOP(base,len)                \
8172 do {    p = (u32 *)(orig_p + (base));           \
8173         for (i = 0; i < len; i += 4)            \
8174                 __GET_REG32((base) + i);        \
8175 } while (0)
8176 #define GET_REG32_1(reg)                        \
8177 do {    p = (u32 *)(orig_p + (reg));            \
8178         __GET_REG32((reg));                     \
8179 } while (0)
8180
8181         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8182         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8183         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8184         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8185         GET_REG32_1(SNDDATAC_MODE);
8186         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8187         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8188         GET_REG32_1(SNDBDC_MODE);
8189         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8190         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8191         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8192         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8193         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8194         GET_REG32_1(RCVDCC_MODE);
8195         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8196         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8197         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8198         GET_REG32_1(MBFREE_MODE);
8199         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8200         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8201         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8202         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8203         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8204         GET_REG32_1(RX_CPU_MODE);
8205         GET_REG32_1(RX_CPU_STATE);
8206         GET_REG32_1(RX_CPU_PGMCTR);
8207         GET_REG32_1(RX_CPU_HWBKPT);
8208         GET_REG32_1(TX_CPU_MODE);
8209         GET_REG32_1(TX_CPU_STATE);
8210         GET_REG32_1(TX_CPU_PGMCTR);
8211         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8212         GET_REG32_LOOP(FTQ_RESET, 0x120);
8213         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8214         GET_REG32_1(DMAC_MODE);
8215         GET_REG32_LOOP(GRC_MODE, 0x4c);
8216         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8217                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8218
8219 #undef __GET_REG32
8220 #undef GET_REG32_LOOP
8221 #undef GET_REG32_1
8222
8223         tg3_full_unlock(tp);
8224 }
8225
8226 static int tg3_get_eeprom_len(struct net_device *dev)
8227 {
8228         struct tg3 *tp = netdev_priv(dev);
8229
8230         return tp->nvram_size;
8231 }
8232
8233 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8234 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8235 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8236
8237 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8238 {
8239         struct tg3 *tp = netdev_priv(dev);
8240         int ret;
8241         u8  *pd;
8242         u32 i, offset, len, b_offset, b_count;
8243         __le32 val;
8244
8245         if (tp->link_config.phy_is_low_power)
8246                 return -EAGAIN;
8247
8248         offset = eeprom->offset;
8249         len = eeprom->len;
8250         eeprom->len = 0;
8251
8252         eeprom->magic = TG3_EEPROM_MAGIC;
8253
8254         if (offset & 3) {
8255                 /* adjustments to start on required 4 byte boundary */
8256                 b_offset = offset & 3;
8257                 b_count = 4 - b_offset;
8258                 if (b_count > len) {
8259                         /* i.e. offset=1 len=2 */
8260                         b_count = len;
8261                 }
8262                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8263                 if (ret)
8264                         return ret;
8265                 memcpy(data, ((char*)&val) + b_offset, b_count);
8266                 len -= b_count;
8267                 offset += b_count;
8268                 eeprom->len += b_count;
8269         }
8270
8271         /* read bytes upto the last 4 byte boundary */
8272         pd = &data[eeprom->len];
8273         for (i = 0; i < (len - (len & 3)); i += 4) {
8274                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8275                 if (ret) {
8276                         eeprom->len += i;
8277                         return ret;
8278                 }
8279                 memcpy(pd + i, &val, 4);
8280         }
8281         eeprom->len += i;
8282
8283         if (len & 3) {
8284                 /* read last bytes not ending on 4 byte boundary */
8285                 pd = &data[eeprom->len];
8286                 b_count = len & 3;
8287                 b_offset = offset + len - b_count;
8288                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8289                 if (ret)
8290                         return ret;
8291                 memcpy(pd, &val, b_count);
8292                 eeprom->len += b_count;
8293         }
8294         return 0;
8295 }
8296
8297 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8298
8299 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8300 {
8301         struct tg3 *tp = netdev_priv(dev);
8302         int ret;
8303         u32 offset, len, b_offset, odd_len;
8304         u8 *buf;
8305         __le32 start, end;
8306
8307         if (tp->link_config.phy_is_low_power)
8308                 return -EAGAIN;
8309
8310         if (eeprom->magic != TG3_EEPROM_MAGIC)
8311                 return -EINVAL;
8312
8313         offset = eeprom->offset;
8314         len = eeprom->len;
8315
8316         if ((b_offset = (offset & 3))) {
8317                 /* adjustments to start on required 4 byte boundary */
8318                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8319                 if (ret)
8320                         return ret;
8321                 len += b_offset;
8322                 offset &= ~3;
8323                 if (len < 4)
8324                         len = 4;
8325         }
8326
8327         odd_len = 0;
8328         if (len & 3) {
8329                 /* adjustments to end on required 4 byte boundary */
8330                 odd_len = 1;
8331                 len = (len + 3) & ~3;
8332                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8333                 if (ret)
8334                         return ret;
8335         }
8336
8337         buf = data;
8338         if (b_offset || odd_len) {
8339                 buf = kmalloc(len, GFP_KERNEL);
8340                 if (!buf)
8341                         return -ENOMEM;
8342                 if (b_offset)
8343                         memcpy(buf, &start, 4);
8344                 if (odd_len)
8345                         memcpy(buf+len-4, &end, 4);
8346                 memcpy(buf + b_offset, data, eeprom->len);
8347         }
8348
8349         ret = tg3_nvram_write_block(tp, offset, len, buf);
8350
8351         if (buf != data)
8352                 kfree(buf);
8353
8354         return ret;
8355 }
8356
8357 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8358 {
8359         struct tg3 *tp = netdev_priv(dev);
8360
8361         cmd->supported = (SUPPORTED_Autoneg);
8362
8363         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8364                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8365                                    SUPPORTED_1000baseT_Full);
8366
8367         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8368                 cmd->supported |= (SUPPORTED_100baseT_Half |
8369                                   SUPPORTED_100baseT_Full |
8370                                   SUPPORTED_10baseT_Half |
8371                                   SUPPORTED_10baseT_Full |
8372                                   SUPPORTED_TP);
8373                 cmd->port = PORT_TP;
8374         } else {
8375                 cmd->supported |= SUPPORTED_FIBRE;
8376                 cmd->port = PORT_FIBRE;
8377         }
8378
8379         cmd->advertising = tp->link_config.advertising;
8380         if (netif_running(dev)) {
8381                 cmd->speed = tp->link_config.active_speed;
8382                 cmd->duplex = tp->link_config.active_duplex;
8383         }
8384         cmd->phy_address = PHY_ADDR;
8385         cmd->transceiver = 0;
8386         cmd->autoneg = tp->link_config.autoneg;
8387         cmd->maxtxpkt = 0;
8388         cmd->maxrxpkt = 0;
8389         return 0;
8390 }
8391
8392 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8393 {
8394         struct tg3 *tp = netdev_priv(dev);
8395
8396         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8397                 /* These are the only valid advertisement bits allowed.  */
8398                 if (cmd->autoneg == AUTONEG_ENABLE &&
8399                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8400                                           ADVERTISED_1000baseT_Full |
8401                                           ADVERTISED_Autoneg |
8402                                           ADVERTISED_FIBRE)))
8403                         return -EINVAL;
8404                 /* Fiber can only do SPEED_1000.  */
8405                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8406                          (cmd->speed != SPEED_1000))
8407                         return -EINVAL;
8408         /* Copper cannot force SPEED_1000.  */
8409         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8410                    (cmd->speed == SPEED_1000))
8411                 return -EINVAL;
8412         else if ((cmd->speed == SPEED_1000) &&
8413                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8414                 return -EINVAL;
8415
8416         tg3_full_lock(tp, 0);
8417
8418         tp->link_config.autoneg = cmd->autoneg;
8419         if (cmd->autoneg == AUTONEG_ENABLE) {
8420                 tp->link_config.advertising = (cmd->advertising |
8421                                               ADVERTISED_Autoneg);
8422                 tp->link_config.speed = SPEED_INVALID;
8423                 tp->link_config.duplex = DUPLEX_INVALID;
8424         } else {
8425                 tp->link_config.advertising = 0;
8426                 tp->link_config.speed = cmd->speed;
8427                 tp->link_config.duplex = cmd->duplex;
8428         }
8429
8430         tp->link_config.orig_speed = tp->link_config.speed;
8431         tp->link_config.orig_duplex = tp->link_config.duplex;
8432         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8433
8434         if (netif_running(dev))
8435                 tg3_setup_phy(tp, 1);
8436
8437         tg3_full_unlock(tp);
8438
8439         return 0;
8440 }
8441
8442 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8443 {
8444         struct tg3 *tp = netdev_priv(dev);
8445
8446         strcpy(info->driver, DRV_MODULE_NAME);
8447         strcpy(info->version, DRV_MODULE_VERSION);
8448         strcpy(info->fw_version, tp->fw_ver);
8449         strcpy(info->bus_info, pci_name(tp->pdev));
8450 }
8451
8452 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8453 {
8454         struct tg3 *tp = netdev_priv(dev);
8455
8456         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8457                 wol->supported = WAKE_MAGIC;
8458         else
8459                 wol->supported = 0;
8460         wol->wolopts = 0;
8461         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8462                 wol->wolopts = WAKE_MAGIC;
8463         memset(&wol->sopass, 0, sizeof(wol->sopass));
8464 }
8465
8466 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8467 {
8468         struct tg3 *tp = netdev_priv(dev);
8469
8470         if (wol->wolopts & ~WAKE_MAGIC)
8471                 return -EINVAL;
8472         if ((wol->wolopts & WAKE_MAGIC) &&
8473             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8474                 return -EINVAL;
8475
8476         spin_lock_bh(&tp->lock);
8477         if (wol->wolopts & WAKE_MAGIC)
8478                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8479         else
8480                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8481         spin_unlock_bh(&tp->lock);
8482
8483         return 0;
8484 }
8485
8486 static u32 tg3_get_msglevel(struct net_device *dev)
8487 {
8488         struct tg3 *tp = netdev_priv(dev);
8489         return tp->msg_enable;
8490 }
8491
8492 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8493 {
8494         struct tg3 *tp = netdev_priv(dev);
8495         tp->msg_enable = value;
8496 }
8497
8498 static int tg3_set_tso(struct net_device *dev, u32 value)
8499 {
8500         struct tg3 *tp = netdev_priv(dev);
8501
8502         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8503                 if (value)
8504                         return -EINVAL;
8505                 return 0;
8506         }
8507         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8508             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8509                 if (value) {
8510                         dev->features |= NETIF_F_TSO6;
8511                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8512                                 dev->features |= NETIF_F_TSO_ECN;
8513                 } else
8514                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8515         }
8516         return ethtool_op_set_tso(dev, value);
8517 }
8518
8519 static int tg3_nway_reset(struct net_device *dev)
8520 {
8521         struct tg3 *tp = netdev_priv(dev);
8522         u32 bmcr;
8523         int r;
8524
8525         if (!netif_running(dev))
8526                 return -EAGAIN;
8527
8528         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8529                 return -EINVAL;
8530
8531         spin_lock_bh(&tp->lock);
8532         r = -EINVAL;
8533         tg3_readphy(tp, MII_BMCR, &bmcr);
8534         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8535             ((bmcr & BMCR_ANENABLE) ||
8536              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8537                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8538                                            BMCR_ANENABLE);
8539                 r = 0;
8540         }
8541         spin_unlock_bh(&tp->lock);
8542
8543         return r;
8544 }
8545
8546 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8547 {
8548         struct tg3 *tp = netdev_priv(dev);
8549
8550         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8551         ering->rx_mini_max_pending = 0;
8552         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8553                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8554         else
8555                 ering->rx_jumbo_max_pending = 0;
8556
8557         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8558
8559         ering->rx_pending = tp->rx_pending;
8560         ering->rx_mini_pending = 0;
8561         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8562                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8563         else
8564                 ering->rx_jumbo_pending = 0;
8565
8566         ering->tx_pending = tp->tx_pending;
8567 }
8568
8569 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8570 {
8571         struct tg3 *tp = netdev_priv(dev);
8572         int irq_sync = 0, err = 0;
8573
8574         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8575             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8576             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8577             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8578             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8579              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8580                 return -EINVAL;
8581
8582         if (netif_running(dev)) {
8583                 tg3_netif_stop(tp);
8584                 irq_sync = 1;
8585         }
8586
8587         tg3_full_lock(tp, irq_sync);
8588
8589         tp->rx_pending = ering->rx_pending;
8590
8591         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8592             tp->rx_pending > 63)
8593                 tp->rx_pending = 63;
8594         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8595         tp->tx_pending = ering->tx_pending;
8596
8597         if (netif_running(dev)) {
8598                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8599                 err = tg3_restart_hw(tp, 1);
8600                 if (!err)
8601                         tg3_netif_start(tp);
8602         }
8603
8604         tg3_full_unlock(tp);
8605
8606         return err;
8607 }
8608
8609 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8610 {
8611         struct tg3 *tp = netdev_priv(dev);
8612
8613         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8614
8615         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8616                 epause->rx_pause = 1;
8617         else
8618                 epause->rx_pause = 0;
8619
8620         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8621                 epause->tx_pause = 1;
8622         else
8623                 epause->tx_pause = 0;
8624 }
8625
8626 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8627 {
8628         struct tg3 *tp = netdev_priv(dev);
8629         int irq_sync = 0, err = 0;
8630
8631         if (netif_running(dev)) {
8632                 tg3_netif_stop(tp);
8633                 irq_sync = 1;
8634         }
8635
8636         tg3_full_lock(tp, irq_sync);
8637
8638         if (epause->autoneg)
8639                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8640         else
8641                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8642         if (epause->rx_pause)
8643                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8644         else
8645                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8646         if (epause->tx_pause)
8647                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8648         else
8649                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8650
8651         if (netif_running(dev)) {
8652                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8653                 err = tg3_restart_hw(tp, 1);
8654                 if (!err)
8655                         tg3_netif_start(tp);
8656         }
8657
8658         tg3_full_unlock(tp);
8659
8660         return err;
8661 }
8662
8663 static u32 tg3_get_rx_csum(struct net_device *dev)
8664 {
8665         struct tg3 *tp = netdev_priv(dev);
8666         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8667 }
8668
8669 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8670 {
8671         struct tg3 *tp = netdev_priv(dev);
8672
8673         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8674                 if (data != 0)
8675                         return -EINVAL;
8676                 return 0;
8677         }
8678
8679         spin_lock_bh(&tp->lock);
8680         if (data)
8681                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8682         else
8683                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8684         spin_unlock_bh(&tp->lock);
8685
8686         return 0;
8687 }
8688
8689 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8690 {
8691         struct tg3 *tp = netdev_priv(dev);
8692
8693         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8694                 if (data != 0)
8695                         return -EINVAL;
8696                 return 0;
8697         }
8698
8699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8703                 ethtool_op_set_tx_ipv6_csum(dev, data);
8704         else
8705                 ethtool_op_set_tx_csum(dev, data);
8706
8707         return 0;
8708 }
8709
8710 static int tg3_get_sset_count (struct net_device *dev, int sset)
8711 {
8712         switch (sset) {
8713         case ETH_SS_TEST:
8714                 return TG3_NUM_TEST;
8715         case ETH_SS_STATS:
8716                 return TG3_NUM_STATS;
8717         default:
8718                 return -EOPNOTSUPP;
8719         }
8720 }
8721
8722 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8723 {
8724         switch (stringset) {
8725         case ETH_SS_STATS:
8726                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8727                 break;
8728         case ETH_SS_TEST:
8729                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8730                 break;
8731         default:
8732                 WARN_ON(1);     /* we need a WARN() */
8733                 break;
8734         }
8735 }
8736
8737 static int tg3_phys_id(struct net_device *dev, u32 data)
8738 {
8739         struct tg3 *tp = netdev_priv(dev);
8740         int i;
8741
8742         if (!netif_running(tp->dev))
8743                 return -EAGAIN;
8744
8745         if (data == 0)
8746                 data = 2;
8747
8748         for (i = 0; i < (data * 2); i++) {
8749                 if ((i % 2) == 0)
8750                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8751                                            LED_CTRL_1000MBPS_ON |
8752                                            LED_CTRL_100MBPS_ON |
8753                                            LED_CTRL_10MBPS_ON |
8754                                            LED_CTRL_TRAFFIC_OVERRIDE |
8755                                            LED_CTRL_TRAFFIC_BLINK |
8756                                            LED_CTRL_TRAFFIC_LED);
8757
8758                 else
8759                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8760                                            LED_CTRL_TRAFFIC_OVERRIDE);
8761
8762                 if (msleep_interruptible(500))
8763                         break;
8764         }
8765         tw32(MAC_LED_CTRL, tp->led_ctrl);
8766         return 0;
8767 }
8768
8769 static void tg3_get_ethtool_stats (struct net_device *dev,
8770                                    struct ethtool_stats *estats, u64 *tmp_stats)
8771 {
8772         struct tg3 *tp = netdev_priv(dev);
8773         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8774 }
8775
8776 #define NVRAM_TEST_SIZE 0x100
8777 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8778 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8779 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8780 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8781 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8782
8783 static int tg3_test_nvram(struct tg3 *tp)
8784 {
8785         u32 csum, magic;
8786         __le32 *buf;
8787         int i, j, k, err = 0, size;
8788
8789         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8790                 return -EIO;
8791
8792         if (magic == TG3_EEPROM_MAGIC)
8793                 size = NVRAM_TEST_SIZE;
8794         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8795                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8796                     TG3_EEPROM_SB_FORMAT_1) {
8797                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8798                         case TG3_EEPROM_SB_REVISION_0:
8799                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8800                                 break;
8801                         case TG3_EEPROM_SB_REVISION_2:
8802                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8803                                 break;
8804                         case TG3_EEPROM_SB_REVISION_3:
8805                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8806                                 break;
8807                         default:
8808                                 return 0;
8809                         }
8810                 } else
8811                         return 0;
8812         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8813                 size = NVRAM_SELFBOOT_HW_SIZE;
8814         else
8815                 return -EIO;
8816
8817         buf = kmalloc(size, GFP_KERNEL);
8818         if (buf == NULL)
8819                 return -ENOMEM;
8820
8821         err = -EIO;
8822         for (i = 0, j = 0; i < size; i += 4, j++) {
8823                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8824                         break;
8825         }
8826         if (i < size)
8827                 goto out;
8828
8829         /* Selfboot format */
8830         magic = swab32(le32_to_cpu(buf[0]));
8831         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8832             TG3_EEPROM_MAGIC_FW) {
8833                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8834
8835                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8836                     TG3_EEPROM_SB_REVISION_2) {
8837                         /* For rev 2, the csum doesn't include the MBA. */
8838                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8839                                 csum8 += buf8[i];
8840                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8841                                 csum8 += buf8[i];
8842                 } else {
8843                         for (i = 0; i < size; i++)
8844                                 csum8 += buf8[i];
8845                 }
8846
8847                 if (csum8 == 0) {
8848                         err = 0;
8849                         goto out;
8850                 }
8851
8852                 err = -EIO;
8853                 goto out;
8854         }
8855
8856         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8857             TG3_EEPROM_MAGIC_HW) {
8858                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8859                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8860                 u8 *buf8 = (u8 *) buf;
8861
8862                 /* Separate the parity bits and the data bytes.  */
8863                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8864                         if ((i == 0) || (i == 8)) {
8865                                 int l;
8866                                 u8 msk;
8867
8868                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8869                                         parity[k++] = buf8[i] & msk;
8870                                 i++;
8871                         }
8872                         else if (i == 16) {
8873                                 int l;
8874                                 u8 msk;
8875
8876                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8877                                         parity[k++] = buf8[i] & msk;
8878                                 i++;
8879
8880                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8881                                         parity[k++] = buf8[i] & msk;
8882                                 i++;
8883                         }
8884                         data[j++] = buf8[i];
8885                 }
8886
8887                 err = -EIO;
8888                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8889                         u8 hw8 = hweight8(data[i]);
8890
8891                         if ((hw8 & 0x1) && parity[i])
8892                                 goto out;
8893                         else if (!(hw8 & 0x1) && !parity[i])
8894                                 goto out;
8895                 }
8896                 err = 0;
8897                 goto out;
8898         }
8899
8900         /* Bootstrap checksum at offset 0x10 */
8901         csum = calc_crc((unsigned char *) buf, 0x10);
8902         if(csum != le32_to_cpu(buf[0x10/4]))
8903                 goto out;
8904
8905         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8906         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8907         if (csum != le32_to_cpu(buf[0xfc/4]))
8908                  goto out;
8909
8910         err = 0;
8911
8912 out:
8913         kfree(buf);
8914         return err;
8915 }
8916
8917 #define TG3_SERDES_TIMEOUT_SEC  2
8918 #define TG3_COPPER_TIMEOUT_SEC  6
8919
8920 static int tg3_test_link(struct tg3 *tp)
8921 {
8922         int i, max;
8923
8924         if (!netif_running(tp->dev))
8925                 return -ENODEV;
8926
8927         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8928                 max = TG3_SERDES_TIMEOUT_SEC;
8929         else
8930                 max = TG3_COPPER_TIMEOUT_SEC;
8931
8932         for (i = 0; i < max; i++) {
8933                 if (netif_carrier_ok(tp->dev))
8934                         return 0;
8935
8936                 if (msleep_interruptible(1000))
8937                         break;
8938         }
8939
8940         return -EIO;
8941 }
8942
8943 /* Only test the commonly used registers */
8944 static int tg3_test_registers(struct tg3 *tp)
8945 {
8946         int i, is_5705, is_5750;
8947         u32 offset, read_mask, write_mask, val, save_val, read_val;
8948         static struct {
8949                 u16 offset;
8950                 u16 flags;
8951 #define TG3_FL_5705     0x1
8952 #define TG3_FL_NOT_5705 0x2
8953 #define TG3_FL_NOT_5788 0x4
8954 #define TG3_FL_NOT_5750 0x8
8955                 u32 read_mask;
8956                 u32 write_mask;
8957         } reg_tbl[] = {
8958                 /* MAC Control Registers */
8959                 { MAC_MODE, TG3_FL_NOT_5705,
8960                         0x00000000, 0x00ef6f8c },
8961                 { MAC_MODE, TG3_FL_5705,
8962                         0x00000000, 0x01ef6b8c },
8963                 { MAC_STATUS, TG3_FL_NOT_5705,
8964                         0x03800107, 0x00000000 },
8965                 { MAC_STATUS, TG3_FL_5705,
8966                         0x03800100, 0x00000000 },
8967                 { MAC_ADDR_0_HIGH, 0x0000,
8968                         0x00000000, 0x0000ffff },
8969                 { MAC_ADDR_0_LOW, 0x0000,
8970                         0x00000000, 0xffffffff },
8971                 { MAC_RX_MTU_SIZE, 0x0000,
8972                         0x00000000, 0x0000ffff },
8973                 { MAC_TX_MODE, 0x0000,
8974                         0x00000000, 0x00000070 },
8975                 { MAC_TX_LENGTHS, 0x0000,
8976                         0x00000000, 0x00003fff },
8977                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8978                         0x00000000, 0x000007fc },
8979                 { MAC_RX_MODE, TG3_FL_5705,
8980                         0x00000000, 0x000007dc },
8981                 { MAC_HASH_REG_0, 0x0000,
8982                         0x00000000, 0xffffffff },
8983                 { MAC_HASH_REG_1, 0x0000,
8984                         0x00000000, 0xffffffff },
8985                 { MAC_HASH_REG_2, 0x0000,
8986                         0x00000000, 0xffffffff },
8987                 { MAC_HASH_REG_3, 0x0000,
8988                         0x00000000, 0xffffffff },
8989
8990                 /* Receive Data and Receive BD Initiator Control Registers. */
8991                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8992                         0x00000000, 0xffffffff },
8993                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8994                         0x00000000, 0xffffffff },
8995                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8996                         0x00000000, 0x00000003 },
8997                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8998                         0x00000000, 0xffffffff },
8999                 { RCVDBDI_STD_BD+0, 0x0000,
9000                         0x00000000, 0xffffffff },
9001                 { RCVDBDI_STD_BD+4, 0x0000,
9002                         0x00000000, 0xffffffff },
9003                 { RCVDBDI_STD_BD+8, 0x0000,
9004                         0x00000000, 0xffff0002 },
9005                 { RCVDBDI_STD_BD+0xc, 0x0000,
9006                         0x00000000, 0xffffffff },
9007
9008                 /* Receive BD Initiator Control Registers. */
9009                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9010                         0x00000000, 0xffffffff },
9011                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9012                         0x00000000, 0x000003ff },
9013                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9014                         0x00000000, 0xffffffff },
9015
9016                 /* Host Coalescing Control Registers. */
9017                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9018                         0x00000000, 0x00000004 },
9019                 { HOSTCC_MODE, TG3_FL_5705,
9020                         0x00000000, 0x000000f6 },
9021                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9022                         0x00000000, 0xffffffff },
9023                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9024                         0x00000000, 0x000003ff },
9025                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9026                         0x00000000, 0xffffffff },
9027                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9028                         0x00000000, 0x000003ff },
9029                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9030                         0x00000000, 0xffffffff },
9031                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9032                         0x00000000, 0x000000ff },
9033                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9034                         0x00000000, 0xffffffff },
9035                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9036                         0x00000000, 0x000000ff },
9037                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9038                         0x00000000, 0xffffffff },
9039                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9040                         0x00000000, 0xffffffff },
9041                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9042                         0x00000000, 0xffffffff },
9043                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9044                         0x00000000, 0x000000ff },
9045                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9046                         0x00000000, 0xffffffff },
9047                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9048                         0x00000000, 0x000000ff },
9049                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9050                         0x00000000, 0xffffffff },
9051                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9052                         0x00000000, 0xffffffff },
9053                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9054                         0x00000000, 0xffffffff },
9055                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9056                         0x00000000, 0xffffffff },
9057                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9058                         0x00000000, 0xffffffff },
9059                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9060                         0xffffffff, 0x00000000 },
9061                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9062                         0xffffffff, 0x00000000 },
9063
9064                 /* Buffer Manager Control Registers. */
9065                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9066                         0x00000000, 0x007fff80 },
9067                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9068                         0x00000000, 0x007fffff },
9069                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9070                         0x00000000, 0x0000003f },
9071                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9072                         0x00000000, 0x000001ff },
9073                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9074                         0x00000000, 0x000001ff },
9075                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9076                         0xffffffff, 0x00000000 },
9077                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9078                         0xffffffff, 0x00000000 },
9079
9080                 /* Mailbox Registers */
9081                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9082                         0x00000000, 0x000001ff },
9083                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9084                         0x00000000, 0x000001ff },
9085                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9086                         0x00000000, 0x000007ff },
9087                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9088                         0x00000000, 0x000001ff },
9089
9090                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9091         };
9092
9093         is_5705 = is_5750 = 0;
9094         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9095                 is_5705 = 1;
9096                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9097                         is_5750 = 1;
9098         }
9099
9100         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9101                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9102                         continue;
9103
9104                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9105                         continue;
9106
9107                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9108                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9109                         continue;
9110
9111                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9112                         continue;
9113
9114                 offset = (u32) reg_tbl[i].offset;
9115                 read_mask = reg_tbl[i].read_mask;
9116                 write_mask = reg_tbl[i].write_mask;
9117
9118                 /* Save the original register content */
9119                 save_val = tr32(offset);
9120
9121                 /* Determine the read-only value. */
9122                 read_val = save_val & read_mask;
9123
9124                 /* Write zero to the register, then make sure the read-only bits
9125                  * are not changed and the read/write bits are all zeros.
9126                  */
9127                 tw32(offset, 0);
9128
9129                 val = tr32(offset);
9130
9131                 /* Test the read-only and read/write bits. */
9132                 if (((val & read_mask) != read_val) || (val & write_mask))
9133                         goto out;
9134
9135                 /* Write ones to all the bits defined by RdMask and WrMask, then
9136                  * make sure the read-only bits are not changed and the
9137                  * read/write bits are all ones.
9138                  */
9139                 tw32(offset, read_mask | write_mask);
9140
9141                 val = tr32(offset);
9142
9143                 /* Test the read-only bits. */
9144                 if ((val & read_mask) != read_val)
9145                         goto out;
9146
9147                 /* Test the read/write bits. */
9148                 if ((val & write_mask) != write_mask)
9149                         goto out;
9150
9151                 tw32(offset, save_val);
9152         }
9153
9154         return 0;
9155
9156 out:
9157         if (netif_msg_hw(tp))
9158                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9159                        offset);
9160         tw32(offset, save_val);
9161         return -EIO;
9162 }
9163
9164 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9165 {
9166         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9167         int i;
9168         u32 j;
9169
9170         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9171                 for (j = 0; j < len; j += 4) {
9172                         u32 val;
9173
9174                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9175                         tg3_read_mem(tp, offset + j, &val);
9176                         if (val != test_pattern[i])
9177                                 return -EIO;
9178                 }
9179         }
9180         return 0;
9181 }
9182
9183 static int tg3_test_memory(struct tg3 *tp)
9184 {
9185         static struct mem_entry {
9186                 u32 offset;
9187                 u32 len;
9188         } mem_tbl_570x[] = {
9189                 { 0x00000000, 0x00b50},
9190                 { 0x00002000, 0x1c000},
9191                 { 0xffffffff, 0x00000}
9192         }, mem_tbl_5705[] = {
9193                 { 0x00000100, 0x0000c},
9194                 { 0x00000200, 0x00008},
9195                 { 0x00004000, 0x00800},
9196                 { 0x00006000, 0x01000},
9197                 { 0x00008000, 0x02000},
9198                 { 0x00010000, 0x0e000},
9199                 { 0xffffffff, 0x00000}
9200         }, mem_tbl_5755[] = {
9201                 { 0x00000200, 0x00008},
9202                 { 0x00004000, 0x00800},
9203                 { 0x00006000, 0x00800},
9204                 { 0x00008000, 0x02000},
9205                 { 0x00010000, 0x0c000},
9206                 { 0xffffffff, 0x00000}
9207         }, mem_tbl_5906[] = {
9208                 { 0x00000200, 0x00008},
9209                 { 0x00004000, 0x00400},
9210                 { 0x00006000, 0x00400},
9211                 { 0x00008000, 0x01000},
9212                 { 0x00010000, 0x01000},
9213                 { 0xffffffff, 0x00000}
9214         };
9215         struct mem_entry *mem_tbl;
9216         int err = 0;
9217         int i;
9218
9219         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9221                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9222                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9223                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9224                         mem_tbl = mem_tbl_5755;
9225                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9226                         mem_tbl = mem_tbl_5906;
9227                 else
9228                         mem_tbl = mem_tbl_5705;
9229         } else
9230                 mem_tbl = mem_tbl_570x;
9231
9232         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9233                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9234                     mem_tbl[i].len)) != 0)
9235                         break;
9236         }
9237
9238         return err;
9239 }
9240
9241 #define TG3_MAC_LOOPBACK        0
9242 #define TG3_PHY_LOOPBACK        1
9243
9244 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9245 {
9246         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9247         u32 desc_idx;
9248         struct sk_buff *skb, *rx_skb;
9249         u8 *tx_data;
9250         dma_addr_t map;
9251         int num_pkts, tx_len, rx_len, i, err;
9252         struct tg3_rx_buffer_desc *desc;
9253
9254         if (loopback_mode == TG3_MAC_LOOPBACK) {
9255                 /* HW errata - mac loopback fails in some cases on 5780.
9256                  * Normal traffic and PHY loopback are not affected by
9257                  * errata.
9258                  */
9259                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9260                         return 0;
9261
9262                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9263                            MAC_MODE_PORT_INT_LPBACK;
9264                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9265                         mac_mode |= MAC_MODE_LINK_POLARITY;
9266                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9267                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9268                 else
9269                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9270                 tw32(MAC_MODE, mac_mode);
9271         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9272                 u32 val;
9273
9274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9275                         u32 phytest;
9276
9277                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9278                                 u32 phy;
9279
9280                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9281                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9282                                 if (!tg3_readphy(tp, 0x1b, &phy))
9283                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9284                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9285                         }
9286                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9287                 } else
9288                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9289
9290                 tg3_phy_toggle_automdix(tp, 0);
9291
9292                 tg3_writephy(tp, MII_BMCR, val);
9293                 udelay(40);
9294
9295                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9297                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9298                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9299                 } else
9300                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9301
9302                 /* reset to prevent losing 1st rx packet intermittently */
9303                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9304                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9305                         udelay(10);
9306                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9307                 }
9308                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9309                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9310                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9311                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9312                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9313                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9314                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9315                 }
9316                 tw32(MAC_MODE, mac_mode);
9317         }
9318         else
9319                 return -EINVAL;
9320
9321         err = -EIO;
9322
9323         tx_len = 1514;
9324         skb = netdev_alloc_skb(tp->dev, tx_len);
9325         if (!skb)
9326                 return -ENOMEM;
9327
9328         tx_data = skb_put(skb, tx_len);
9329         memcpy(tx_data, tp->dev->dev_addr, 6);
9330         memset(tx_data + 6, 0x0, 8);
9331
9332         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9333
9334         for (i = 14; i < tx_len; i++)
9335                 tx_data[i] = (u8) (i & 0xff);
9336
9337         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9338
9339         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9340              HOSTCC_MODE_NOW);
9341
9342         udelay(10);
9343
9344         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9345
9346         num_pkts = 0;
9347
9348         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9349
9350         tp->tx_prod++;
9351         num_pkts++;
9352
9353         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9354                      tp->tx_prod);
9355         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9356
9357         udelay(10);
9358
9359         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9360         for (i = 0; i < 25; i++) {
9361                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9362                        HOSTCC_MODE_NOW);
9363
9364                 udelay(10);
9365
9366                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9367                 rx_idx = tp->hw_status->idx[0].rx_producer;
9368                 if ((tx_idx == tp->tx_prod) &&
9369                     (rx_idx == (rx_start_idx + num_pkts)))
9370                         break;
9371         }
9372
9373         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9374         dev_kfree_skb(skb);
9375
9376         if (tx_idx != tp->tx_prod)
9377                 goto out;
9378
9379         if (rx_idx != rx_start_idx + num_pkts)
9380                 goto out;
9381
9382         desc = &tp->rx_rcb[rx_start_idx];
9383         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9384         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9385         if (opaque_key != RXD_OPAQUE_RING_STD)
9386                 goto out;
9387
9388         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9389             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9390                 goto out;
9391
9392         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9393         if (rx_len != tx_len)
9394                 goto out;
9395
9396         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9397
9398         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9399         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9400
9401         for (i = 14; i < tx_len; i++) {
9402                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9403                         goto out;
9404         }
9405         err = 0;
9406
9407         /* tg3_free_rings will unmap and free the rx_skb */
9408 out:
9409         return err;
9410 }
9411
9412 #define TG3_MAC_LOOPBACK_FAILED         1
9413 #define TG3_PHY_LOOPBACK_FAILED         2
9414 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9415                                          TG3_PHY_LOOPBACK_FAILED)
9416
9417 static int tg3_test_loopback(struct tg3 *tp)
9418 {
9419         int err = 0;
9420         u32 cpmuctrl = 0;
9421
9422         if (!netif_running(tp->dev))
9423                 return TG3_LOOPBACK_FAILED;
9424
9425         err = tg3_reset_hw(tp, 1);
9426         if (err)
9427                 return TG3_LOOPBACK_FAILED;
9428
9429         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9430                 int i;
9431                 u32 status;
9432
9433                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9434
9435                 /* Wait for up to 40 microseconds to acquire lock. */
9436                 for (i = 0; i < 4; i++) {
9437                         status = tr32(TG3_CPMU_MUTEX_GNT);
9438                         if (status == CPMU_MUTEX_GNT_DRIVER)
9439                                 break;
9440                         udelay(10);
9441                 }
9442
9443                 if (status != CPMU_MUTEX_GNT_DRIVER)
9444                         return TG3_LOOPBACK_FAILED;
9445
9446                 /* Turn off power management based on link speed. */
9447                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9448                 tw32(TG3_CPMU_CTRL,
9449                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9450                                   CPMU_CTRL_LINK_AWARE_MODE));
9451         }
9452
9453         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9454                 err |= TG3_MAC_LOOPBACK_FAILED;
9455
9456         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9457                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9458
9459                 /* Release the mutex */
9460                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9461         }
9462
9463         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9464                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9465                         err |= TG3_PHY_LOOPBACK_FAILED;
9466         }
9467
9468         return err;
9469 }
9470
9471 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9472                           u64 *data)
9473 {
9474         struct tg3 *tp = netdev_priv(dev);
9475
9476         if (tp->link_config.phy_is_low_power)
9477                 tg3_set_power_state(tp, PCI_D0);
9478
9479         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9480
9481         if (tg3_test_nvram(tp) != 0) {
9482                 etest->flags |= ETH_TEST_FL_FAILED;
9483                 data[0] = 1;
9484         }
9485         if (tg3_test_link(tp) != 0) {
9486                 etest->flags |= ETH_TEST_FL_FAILED;
9487                 data[1] = 1;
9488         }
9489         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9490                 int err, irq_sync = 0;
9491
9492                 if (netif_running(dev)) {
9493                         tg3_netif_stop(tp);
9494                         irq_sync = 1;
9495                 }
9496
9497                 tg3_full_lock(tp, irq_sync);
9498
9499                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9500                 err = tg3_nvram_lock(tp);
9501                 tg3_halt_cpu(tp, RX_CPU_BASE);
9502                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9503                         tg3_halt_cpu(tp, TX_CPU_BASE);
9504                 if (!err)
9505                         tg3_nvram_unlock(tp);
9506
9507                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9508                         tg3_phy_reset(tp);
9509
9510                 if (tg3_test_registers(tp) != 0) {
9511                         etest->flags |= ETH_TEST_FL_FAILED;
9512                         data[2] = 1;
9513                 }
9514                 if (tg3_test_memory(tp) != 0) {
9515                         etest->flags |= ETH_TEST_FL_FAILED;
9516                         data[3] = 1;
9517                 }
9518                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9519                         etest->flags |= ETH_TEST_FL_FAILED;
9520
9521                 tg3_full_unlock(tp);
9522
9523                 if (tg3_test_interrupt(tp) != 0) {
9524                         etest->flags |= ETH_TEST_FL_FAILED;
9525                         data[5] = 1;
9526                 }
9527
9528                 tg3_full_lock(tp, 0);
9529
9530                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9531                 if (netif_running(dev)) {
9532                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9533                         if (!tg3_restart_hw(tp, 1))
9534                                 tg3_netif_start(tp);
9535                 }
9536
9537                 tg3_full_unlock(tp);
9538         }
9539         if (tp->link_config.phy_is_low_power)
9540                 tg3_set_power_state(tp, PCI_D3hot);
9541
9542 }
9543
9544 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9545 {
9546         struct mii_ioctl_data *data = if_mii(ifr);
9547         struct tg3 *tp = netdev_priv(dev);
9548         int err;
9549
9550         switch(cmd) {
9551         case SIOCGMIIPHY:
9552                 data->phy_id = PHY_ADDR;
9553
9554                 /* fallthru */
9555         case SIOCGMIIREG: {
9556                 u32 mii_regval;
9557
9558                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9559                         break;                  /* We have no PHY */
9560
9561                 if (tp->link_config.phy_is_low_power)
9562                         return -EAGAIN;
9563
9564                 spin_lock_bh(&tp->lock);
9565                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9566                 spin_unlock_bh(&tp->lock);
9567
9568                 data->val_out = mii_regval;
9569
9570                 return err;
9571         }
9572
9573         case SIOCSMIIREG:
9574                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9575                         break;                  /* We have no PHY */
9576
9577                 if (!capable(CAP_NET_ADMIN))
9578                         return -EPERM;
9579
9580                 if (tp->link_config.phy_is_low_power)
9581                         return -EAGAIN;
9582
9583                 spin_lock_bh(&tp->lock);
9584                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9585                 spin_unlock_bh(&tp->lock);
9586
9587                 return err;
9588
9589         default:
9590                 /* do nothing */
9591                 break;
9592         }
9593         return -EOPNOTSUPP;
9594 }
9595
9596 #if TG3_VLAN_TAG_USED
9597 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9598 {
9599         struct tg3 *tp = netdev_priv(dev);
9600
9601         if (netif_running(dev))
9602                 tg3_netif_stop(tp);
9603
9604         tg3_full_lock(tp, 0);
9605
9606         tp->vlgrp = grp;
9607
9608         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9609         __tg3_set_rx_mode(dev);
9610
9611         if (netif_running(dev))
9612                 tg3_netif_start(tp);
9613
9614         tg3_full_unlock(tp);
9615 }
9616 #endif
9617
9618 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9619 {
9620         struct tg3 *tp = netdev_priv(dev);
9621
9622         memcpy(ec, &tp->coal, sizeof(*ec));
9623         return 0;
9624 }
9625
9626 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9627 {
9628         struct tg3 *tp = netdev_priv(dev);
9629         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9630         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9631
9632         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9633                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9634                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9635                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9636                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9637         }
9638
9639         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9640             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9641             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9642             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9643             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9644             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9645             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9646             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9647             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9648             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9649                 return -EINVAL;
9650
9651         /* No rx interrupts will be generated if both are zero */
9652         if ((ec->rx_coalesce_usecs == 0) &&
9653             (ec->rx_max_coalesced_frames == 0))
9654                 return -EINVAL;
9655
9656         /* No tx interrupts will be generated if both are zero */
9657         if ((ec->tx_coalesce_usecs == 0) &&
9658             (ec->tx_max_coalesced_frames == 0))
9659                 return -EINVAL;
9660
9661         /* Only copy relevant parameters, ignore all others. */
9662         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9663         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9664         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9665         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9666         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9667         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9668         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9669         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9670         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9671
9672         if (netif_running(dev)) {
9673                 tg3_full_lock(tp, 0);
9674                 __tg3_set_coalesce(tp, &tp->coal);
9675                 tg3_full_unlock(tp);
9676         }
9677         return 0;
9678 }
9679
9680 static const struct ethtool_ops tg3_ethtool_ops = {
9681         .get_settings           = tg3_get_settings,
9682         .set_settings           = tg3_set_settings,
9683         .get_drvinfo            = tg3_get_drvinfo,
9684         .get_regs_len           = tg3_get_regs_len,
9685         .get_regs               = tg3_get_regs,
9686         .get_wol                = tg3_get_wol,
9687         .set_wol                = tg3_set_wol,
9688         .get_msglevel           = tg3_get_msglevel,
9689         .set_msglevel           = tg3_set_msglevel,
9690         .nway_reset             = tg3_nway_reset,
9691         .get_link               = ethtool_op_get_link,
9692         .get_eeprom_len         = tg3_get_eeprom_len,
9693         .get_eeprom             = tg3_get_eeprom,
9694         .set_eeprom             = tg3_set_eeprom,
9695         .get_ringparam          = tg3_get_ringparam,
9696         .set_ringparam          = tg3_set_ringparam,
9697         .get_pauseparam         = tg3_get_pauseparam,
9698         .set_pauseparam         = tg3_set_pauseparam,
9699         .get_rx_csum            = tg3_get_rx_csum,
9700         .set_rx_csum            = tg3_set_rx_csum,
9701         .set_tx_csum            = tg3_set_tx_csum,
9702         .set_sg                 = ethtool_op_set_sg,
9703         .set_tso                = tg3_set_tso,
9704         .self_test              = tg3_self_test,
9705         .get_strings            = tg3_get_strings,
9706         .phys_id                = tg3_phys_id,
9707         .get_ethtool_stats      = tg3_get_ethtool_stats,
9708         .get_coalesce           = tg3_get_coalesce,
9709         .set_coalesce           = tg3_set_coalesce,
9710         .get_sset_count         = tg3_get_sset_count,
9711 };
9712
9713 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9714 {
9715         u32 cursize, val, magic;
9716
9717         tp->nvram_size = EEPROM_CHIP_SIZE;
9718
9719         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9720                 return;
9721
9722         if ((magic != TG3_EEPROM_MAGIC) &&
9723             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9724             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9725                 return;
9726
9727         /*
9728          * Size the chip by reading offsets at increasing powers of two.
9729          * When we encounter our validation signature, we know the addressing
9730          * has wrapped around, and thus have our chip size.
9731          */
9732         cursize = 0x10;
9733
9734         while (cursize < tp->nvram_size) {
9735                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9736                         return;
9737
9738                 if (val == magic)
9739                         break;
9740
9741                 cursize <<= 1;
9742         }
9743
9744         tp->nvram_size = cursize;
9745 }
9746
9747 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9748 {
9749         u32 val;
9750
9751         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9752                 return;
9753
9754         /* Selfboot format */
9755         if (val != TG3_EEPROM_MAGIC) {
9756                 tg3_get_eeprom_size(tp);
9757                 return;
9758         }
9759
9760         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9761                 if (val != 0) {
9762                         tp->nvram_size = (val >> 16) * 1024;
9763                         return;
9764                 }
9765         }
9766         tp->nvram_size = 0x80000;
9767 }
9768
9769 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9770 {
9771         u32 nvcfg1;
9772
9773         nvcfg1 = tr32(NVRAM_CFG1);
9774         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9775                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9776         }
9777         else {
9778                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9779                 tw32(NVRAM_CFG1, nvcfg1);
9780         }
9781
9782         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9783             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9784                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9785                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9786                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9787                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9788                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9789                                 break;
9790                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9791                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9792                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9793                                 break;
9794                         case FLASH_VENDOR_ATMEL_EEPROM:
9795                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9796                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9797                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9798                                 break;
9799                         case FLASH_VENDOR_ST:
9800                                 tp->nvram_jedecnum = JEDEC_ST;
9801                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9802                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9803                                 break;
9804                         case FLASH_VENDOR_SAIFUN:
9805                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9806                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9807                                 break;
9808                         case FLASH_VENDOR_SST_SMALL:
9809                         case FLASH_VENDOR_SST_LARGE:
9810                                 tp->nvram_jedecnum = JEDEC_SST;
9811                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9812                                 break;
9813                 }
9814         }
9815         else {
9816                 tp->nvram_jedecnum = JEDEC_ATMEL;
9817                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9818                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9819         }
9820 }
9821
9822 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9823 {
9824         u32 nvcfg1;
9825
9826         nvcfg1 = tr32(NVRAM_CFG1);
9827
9828         /* NVRAM protection for TPM */
9829         if (nvcfg1 & (1 << 27))
9830                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9831
9832         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9833                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9834                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9835                         tp->nvram_jedecnum = JEDEC_ATMEL;
9836                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9837                         break;
9838                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9839                         tp->nvram_jedecnum = JEDEC_ATMEL;
9840                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9841                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9842                         break;
9843                 case FLASH_5752VENDOR_ST_M45PE10:
9844                 case FLASH_5752VENDOR_ST_M45PE20:
9845                 case FLASH_5752VENDOR_ST_M45PE40:
9846                         tp->nvram_jedecnum = JEDEC_ST;
9847                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9848                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9849                         break;
9850         }
9851
9852         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9853                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9854                         case FLASH_5752PAGE_SIZE_256:
9855                                 tp->nvram_pagesize = 256;
9856                                 break;
9857                         case FLASH_5752PAGE_SIZE_512:
9858                                 tp->nvram_pagesize = 512;
9859                                 break;
9860                         case FLASH_5752PAGE_SIZE_1K:
9861                                 tp->nvram_pagesize = 1024;
9862                                 break;
9863                         case FLASH_5752PAGE_SIZE_2K:
9864                                 tp->nvram_pagesize = 2048;
9865                                 break;
9866                         case FLASH_5752PAGE_SIZE_4K:
9867                                 tp->nvram_pagesize = 4096;
9868                                 break;
9869                         case FLASH_5752PAGE_SIZE_264:
9870                                 tp->nvram_pagesize = 264;
9871                                 break;
9872                 }
9873         }
9874         else {
9875                 /* For eeprom, set pagesize to maximum eeprom size */
9876                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9877
9878                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9879                 tw32(NVRAM_CFG1, nvcfg1);
9880         }
9881 }
9882
9883 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9884 {
9885         u32 nvcfg1, protect = 0;
9886
9887         nvcfg1 = tr32(NVRAM_CFG1);
9888
9889         /* NVRAM protection for TPM */
9890         if (nvcfg1 & (1 << 27)) {
9891                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9892                 protect = 1;
9893         }
9894
9895         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9896         switch (nvcfg1) {
9897                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9898                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9899                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9900                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9901                         tp->nvram_jedecnum = JEDEC_ATMEL;
9902                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9903                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9904                         tp->nvram_pagesize = 264;
9905                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9906                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9907                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9908                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9909                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9910                         else
9911                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9912                         break;
9913                 case FLASH_5752VENDOR_ST_M45PE10:
9914                 case FLASH_5752VENDOR_ST_M45PE20:
9915                 case FLASH_5752VENDOR_ST_M45PE40:
9916                         tp->nvram_jedecnum = JEDEC_ST;
9917                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9918                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9919                         tp->nvram_pagesize = 256;
9920                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9921                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9922                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9923                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9924                         else
9925                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9926                         break;
9927         }
9928 }
9929
9930 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9931 {
9932         u32 nvcfg1;
9933
9934         nvcfg1 = tr32(NVRAM_CFG1);
9935
9936         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9937                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9938                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9939                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9940                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9941                         tp->nvram_jedecnum = JEDEC_ATMEL;
9942                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9943                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9944
9945                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9946                         tw32(NVRAM_CFG1, nvcfg1);
9947                         break;
9948                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9949                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9950                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9951                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9952                         tp->nvram_jedecnum = JEDEC_ATMEL;
9953                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9954                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9955                         tp->nvram_pagesize = 264;
9956                         break;
9957                 case FLASH_5752VENDOR_ST_M45PE10:
9958                 case FLASH_5752VENDOR_ST_M45PE20:
9959                 case FLASH_5752VENDOR_ST_M45PE40:
9960                         tp->nvram_jedecnum = JEDEC_ST;
9961                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9962                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9963                         tp->nvram_pagesize = 256;
9964                         break;
9965         }
9966 }
9967
9968 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9969 {
9970         u32 nvcfg1, protect = 0;
9971
9972         nvcfg1 = tr32(NVRAM_CFG1);
9973
9974         /* NVRAM protection for TPM */
9975         if (nvcfg1 & (1 << 27)) {
9976                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9977                 protect = 1;
9978         }
9979
9980         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9981         switch (nvcfg1) {
9982                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9983                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9984                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9985                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9986                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9987                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9988                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9989                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9990                         tp->nvram_jedecnum = JEDEC_ATMEL;
9991                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9992                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9993                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9994                         tp->nvram_pagesize = 256;
9995                         break;
9996                 case FLASH_5761VENDOR_ST_A_M45PE20:
9997                 case FLASH_5761VENDOR_ST_A_M45PE40:
9998                 case FLASH_5761VENDOR_ST_A_M45PE80:
9999                 case FLASH_5761VENDOR_ST_A_M45PE16:
10000                 case FLASH_5761VENDOR_ST_M_M45PE20:
10001                 case FLASH_5761VENDOR_ST_M_M45PE40:
10002                 case FLASH_5761VENDOR_ST_M_M45PE80:
10003                 case FLASH_5761VENDOR_ST_M_M45PE16:
10004                         tp->nvram_jedecnum = JEDEC_ST;
10005                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10006                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10007                         tp->nvram_pagesize = 256;
10008                         break;
10009         }
10010
10011         if (protect) {
10012                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10013         } else {
10014                 switch (nvcfg1) {
10015                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10016                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10017                         case FLASH_5761VENDOR_ST_A_M45PE16:
10018                         case FLASH_5761VENDOR_ST_M_M45PE16:
10019                                 tp->nvram_size = 0x100000;
10020                                 break;
10021                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10022                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10023                         case FLASH_5761VENDOR_ST_A_M45PE80:
10024                         case FLASH_5761VENDOR_ST_M_M45PE80:
10025                                 tp->nvram_size = 0x80000;
10026                                 break;
10027                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10028                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10029                         case FLASH_5761VENDOR_ST_A_M45PE40:
10030                         case FLASH_5761VENDOR_ST_M_M45PE40:
10031                                 tp->nvram_size = 0x40000;
10032                                 break;
10033                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10034                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10035                         case FLASH_5761VENDOR_ST_A_M45PE20:
10036                         case FLASH_5761VENDOR_ST_M_M45PE20:
10037                                 tp->nvram_size = 0x20000;
10038                                 break;
10039                 }
10040         }
10041 }
10042
10043 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10044 {
10045         tp->nvram_jedecnum = JEDEC_ATMEL;
10046         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10047         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10048 }
10049
10050 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10051 static void __devinit tg3_nvram_init(struct tg3 *tp)
10052 {
10053         tw32_f(GRC_EEPROM_ADDR,
10054              (EEPROM_ADDR_FSM_RESET |
10055               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10056                EEPROM_ADDR_CLKPERD_SHIFT)));
10057
10058         msleep(1);
10059
10060         /* Enable seeprom accesses. */
10061         tw32_f(GRC_LOCAL_CTRL,
10062              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10063         udelay(100);
10064
10065         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10066             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10067                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10068
10069                 if (tg3_nvram_lock(tp)) {
10070                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10071                                "tg3_nvram_init failed.\n", tp->dev->name);
10072                         return;
10073                 }
10074                 tg3_enable_nvram_access(tp);
10075
10076                 tp->nvram_size = 0;
10077
10078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10079                         tg3_get_5752_nvram_info(tp);
10080                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10081                         tg3_get_5755_nvram_info(tp);
10082                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10083                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10084                         tg3_get_5787_nvram_info(tp);
10085                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10086                         tg3_get_5761_nvram_info(tp);
10087                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10088                         tg3_get_5906_nvram_info(tp);
10089                 else
10090                         tg3_get_nvram_info(tp);
10091
10092                 if (tp->nvram_size == 0)
10093                         tg3_get_nvram_size(tp);
10094
10095                 tg3_disable_nvram_access(tp);
10096                 tg3_nvram_unlock(tp);
10097
10098         } else {
10099                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10100
10101                 tg3_get_eeprom_size(tp);
10102         }
10103 }
10104
10105 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10106                                         u32 offset, u32 *val)
10107 {
10108         u32 tmp;
10109         int i;
10110
10111         if (offset > EEPROM_ADDR_ADDR_MASK ||
10112             (offset % 4) != 0)
10113                 return -EINVAL;
10114
10115         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10116                                         EEPROM_ADDR_DEVID_MASK |
10117                                         EEPROM_ADDR_READ);
10118         tw32(GRC_EEPROM_ADDR,
10119              tmp |
10120              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10121              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10122               EEPROM_ADDR_ADDR_MASK) |
10123              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10124
10125         for (i = 0; i < 1000; i++) {
10126                 tmp = tr32(GRC_EEPROM_ADDR);
10127
10128                 if (tmp & EEPROM_ADDR_COMPLETE)
10129                         break;
10130                 msleep(1);
10131         }
10132         if (!(tmp & EEPROM_ADDR_COMPLETE))
10133                 return -EBUSY;
10134
10135         *val = tr32(GRC_EEPROM_DATA);
10136         return 0;
10137 }
10138
10139 #define NVRAM_CMD_TIMEOUT 10000
10140
10141 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10142 {
10143         int i;
10144
10145         tw32(NVRAM_CMD, nvram_cmd);
10146         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10147                 udelay(10);
10148                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10149                         udelay(10);
10150                         break;
10151                 }
10152         }
10153         if (i == NVRAM_CMD_TIMEOUT) {
10154                 return -EBUSY;
10155         }
10156         return 0;
10157 }
10158
10159 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10160 {
10161         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10162             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10163             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10164            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10165             (tp->nvram_jedecnum == JEDEC_ATMEL))
10166
10167                 addr = ((addr / tp->nvram_pagesize) <<
10168                         ATMEL_AT45DB0X1B_PAGE_POS) +
10169                        (addr % tp->nvram_pagesize);
10170
10171         return addr;
10172 }
10173
10174 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10175 {
10176         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10177             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10178             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10179            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10180             (tp->nvram_jedecnum == JEDEC_ATMEL))
10181
10182                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10183                         tp->nvram_pagesize) +
10184                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10185
10186         return addr;
10187 }
10188
10189 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10190 {
10191         int ret;
10192
10193         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10194                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10195
10196         offset = tg3_nvram_phys_addr(tp, offset);
10197
10198         if (offset > NVRAM_ADDR_MSK)
10199                 return -EINVAL;
10200
10201         ret = tg3_nvram_lock(tp);
10202         if (ret)
10203                 return ret;
10204
10205         tg3_enable_nvram_access(tp);
10206
10207         tw32(NVRAM_ADDR, offset);
10208         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10209                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10210
10211         if (ret == 0)
10212                 *val = swab32(tr32(NVRAM_RDDATA));
10213
10214         tg3_disable_nvram_access(tp);
10215
10216         tg3_nvram_unlock(tp);
10217
10218         return ret;
10219 }
10220
10221 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10222 {
10223         u32 v;
10224         int res = tg3_nvram_read(tp, offset, &v);
10225         if (!res)
10226                 *val = cpu_to_le32(v);
10227         return res;
10228 }
10229
10230 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10231 {
10232         int err;
10233         u32 tmp;
10234
10235         err = tg3_nvram_read(tp, offset, &tmp);
10236         *val = swab32(tmp);
10237         return err;
10238 }
10239
10240 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10241                                     u32 offset, u32 len, u8 *buf)
10242 {
10243         int i, j, rc = 0;
10244         u32 val;
10245
10246         for (i = 0; i < len; i += 4) {
10247                 u32 addr;
10248                 __le32 data;
10249
10250                 addr = offset + i;
10251
10252                 memcpy(&data, buf + i, 4);
10253
10254                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10255
10256                 val = tr32(GRC_EEPROM_ADDR);
10257                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10258
10259                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10260                         EEPROM_ADDR_READ);
10261                 tw32(GRC_EEPROM_ADDR, val |
10262                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10263                         (addr & EEPROM_ADDR_ADDR_MASK) |
10264                         EEPROM_ADDR_START |
10265                         EEPROM_ADDR_WRITE);
10266
10267                 for (j = 0; j < 1000; j++) {
10268                         val = tr32(GRC_EEPROM_ADDR);
10269
10270                         if (val & EEPROM_ADDR_COMPLETE)
10271                                 break;
10272                         msleep(1);
10273                 }
10274                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10275                         rc = -EBUSY;
10276                         break;
10277                 }
10278         }
10279
10280         return rc;
10281 }
10282
10283 /* offset and length are dword aligned */
10284 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10285                 u8 *buf)
10286 {
10287         int ret = 0;
10288         u32 pagesize = tp->nvram_pagesize;
10289         u32 pagemask = pagesize - 1;
10290         u32 nvram_cmd;
10291         u8 *tmp;
10292
10293         tmp = kmalloc(pagesize, GFP_KERNEL);
10294         if (tmp == NULL)
10295                 return -ENOMEM;
10296
10297         while (len) {
10298                 int j;
10299                 u32 phy_addr, page_off, size;
10300
10301                 phy_addr = offset & ~pagemask;
10302
10303                 for (j = 0; j < pagesize; j += 4) {
10304                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10305                                                 (__le32 *) (tmp + j))))
10306                                 break;
10307                 }
10308                 if (ret)
10309                         break;
10310
10311                 page_off = offset & pagemask;
10312                 size = pagesize;
10313                 if (len < size)
10314                         size = len;
10315
10316                 len -= size;
10317
10318                 memcpy(tmp + page_off, buf, size);
10319
10320                 offset = offset + (pagesize - page_off);
10321
10322                 tg3_enable_nvram_access(tp);
10323
10324                 /*
10325                  * Before we can erase the flash page, we need
10326                  * to issue a special "write enable" command.
10327                  */
10328                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10329
10330                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10331                         break;
10332
10333                 /* Erase the target page */
10334                 tw32(NVRAM_ADDR, phy_addr);
10335
10336                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10337                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10338
10339                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10340                         break;
10341
10342                 /* Issue another write enable to start the write. */
10343                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10344
10345                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10346                         break;
10347
10348                 for (j = 0; j < pagesize; j += 4) {
10349                         __be32 data;
10350
10351                         data = *((__be32 *) (tmp + j));
10352                         /* swab32(le32_to_cpu(data)), actually */
10353                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10354
10355                         tw32(NVRAM_ADDR, phy_addr + j);
10356
10357                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10358                                 NVRAM_CMD_WR;
10359
10360                         if (j == 0)
10361                                 nvram_cmd |= NVRAM_CMD_FIRST;
10362                         else if (j == (pagesize - 4))
10363                                 nvram_cmd |= NVRAM_CMD_LAST;
10364
10365                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10366                                 break;
10367                 }
10368                 if (ret)
10369                         break;
10370         }
10371
10372         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10373         tg3_nvram_exec_cmd(tp, nvram_cmd);
10374
10375         kfree(tmp);
10376
10377         return ret;
10378 }
10379
10380 /* offset and length are dword aligned */
10381 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10382                 u8 *buf)
10383 {
10384         int i, ret = 0;
10385
10386         for (i = 0; i < len; i += 4, offset += 4) {
10387                 u32 page_off, phy_addr, nvram_cmd;
10388                 __be32 data;
10389
10390                 memcpy(&data, buf + i, 4);
10391                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10392
10393                 page_off = offset % tp->nvram_pagesize;
10394
10395                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10396
10397                 tw32(NVRAM_ADDR, phy_addr);
10398
10399                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10400
10401                 if ((page_off == 0) || (i == 0))
10402                         nvram_cmd |= NVRAM_CMD_FIRST;
10403                 if (page_off == (tp->nvram_pagesize - 4))
10404                         nvram_cmd |= NVRAM_CMD_LAST;
10405
10406                 if (i == (len - 4))
10407                         nvram_cmd |= NVRAM_CMD_LAST;
10408
10409                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10410                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10411                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10412                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10413                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10414                     (tp->nvram_jedecnum == JEDEC_ST) &&
10415                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10416
10417                         if ((ret = tg3_nvram_exec_cmd(tp,
10418                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10419                                 NVRAM_CMD_DONE)))
10420
10421                                 break;
10422                 }
10423                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10424                         /* We always do complete word writes to eeprom. */
10425                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10426                 }
10427
10428                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10429                         break;
10430         }
10431         return ret;
10432 }
10433
10434 /* offset and length are dword aligned */
10435 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10436 {
10437         int ret;
10438
10439         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10440                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10441                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10442                 udelay(40);
10443         }
10444
10445         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10446                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10447         }
10448         else {
10449                 u32 grc_mode;
10450
10451                 ret = tg3_nvram_lock(tp);
10452                 if (ret)
10453                         return ret;
10454
10455                 tg3_enable_nvram_access(tp);
10456                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10457                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10458                         tw32(NVRAM_WRITE1, 0x406);
10459
10460                 grc_mode = tr32(GRC_MODE);
10461                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10462
10463                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10464                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10465
10466                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10467                                 buf);
10468                 }
10469                 else {
10470                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10471                                 buf);
10472                 }
10473
10474                 grc_mode = tr32(GRC_MODE);
10475                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10476
10477                 tg3_disable_nvram_access(tp);
10478                 tg3_nvram_unlock(tp);
10479         }
10480
10481         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10482                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10483                 udelay(40);
10484         }
10485
10486         return ret;
10487 }
10488
10489 struct subsys_tbl_ent {
10490         u16 subsys_vendor, subsys_devid;
10491         u32 phy_id;
10492 };
10493
10494 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10495         /* Broadcom boards. */
10496         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10497         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10498         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10499         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10500         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10501         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10502         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10503         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10504         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10505         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10506         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10507
10508         /* 3com boards. */
10509         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10510         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10511         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10512         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10513         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10514
10515         /* DELL boards. */
10516         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10517         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10518         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10519         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10520
10521         /* Compaq boards. */
10522         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10523         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10524         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10525         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10526         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10527
10528         /* IBM boards. */
10529         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10530 };
10531
10532 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10533 {
10534         int i;
10535
10536         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10537                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10538                      tp->pdev->subsystem_vendor) &&
10539                     (subsys_id_to_phy_id[i].subsys_devid ==
10540                      tp->pdev->subsystem_device))
10541                         return &subsys_id_to_phy_id[i];
10542         }
10543         return NULL;
10544 }
10545
10546 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10547 {
10548         u32 val;
10549         u16 pmcsr;
10550
10551         /* On some early chips the SRAM cannot be accessed in D3hot state,
10552          * so need make sure we're in D0.
10553          */
10554         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10555         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10556         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10557         msleep(1);
10558
10559         /* Make sure register accesses (indirect or otherwise)
10560          * will function correctly.
10561          */
10562         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10563                                tp->misc_host_ctrl);
10564
10565         /* The memory arbiter has to be enabled in order for SRAM accesses
10566          * to succeed.  Normally on powerup the tg3 chip firmware will make
10567          * sure it is enabled, but other entities such as system netboot
10568          * code might disable it.
10569          */
10570         val = tr32(MEMARB_MODE);
10571         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10572
10573         tp->phy_id = PHY_ID_INVALID;
10574         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10575
10576         /* Assume an onboard device and WOL capable by default.  */
10577         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10578
10579         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10580                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10581                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10582                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10583                 }
10584                 val = tr32(VCPU_CFGSHDW);
10585                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10586                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10587                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10588                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10589                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10590                 return;
10591         }
10592
10593         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10594         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10595                 u32 nic_cfg, led_cfg;
10596                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10597                 int eeprom_phy_serdes = 0;
10598
10599                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10600                 tp->nic_sram_data_cfg = nic_cfg;
10601
10602                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10603                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10604                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10605                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10606                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10607                     (ver > 0) && (ver < 0x100))
10608                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10609
10610                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10611                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10612                         eeprom_phy_serdes = 1;
10613
10614                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10615                 if (nic_phy_id != 0) {
10616                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10617                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10618
10619                         eeprom_phy_id  = (id1 >> 16) << 10;
10620                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10621                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10622                 } else
10623                         eeprom_phy_id = 0;
10624
10625                 tp->phy_id = eeprom_phy_id;
10626                 if (eeprom_phy_serdes) {
10627                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10628                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10629                         else
10630                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10631                 }
10632
10633                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10634                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10635                                     SHASTA_EXT_LED_MODE_MASK);
10636                 else
10637                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10638
10639                 switch (led_cfg) {
10640                 default:
10641                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10642                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10643                         break;
10644
10645                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10646                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10647                         break;
10648
10649                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10650                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10651
10652                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10653                          * read on some older 5700/5701 bootcode.
10654                          */
10655                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10656                             ASIC_REV_5700 ||
10657                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10658                             ASIC_REV_5701)
10659                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10660
10661                         break;
10662
10663                 case SHASTA_EXT_LED_SHARED:
10664                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10665                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10666                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10667                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10668                                                  LED_CTRL_MODE_PHY_2);
10669                         break;
10670
10671                 case SHASTA_EXT_LED_MAC:
10672                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10673                         break;
10674
10675                 case SHASTA_EXT_LED_COMBO:
10676                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10677                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10678                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10679                                                  LED_CTRL_MODE_PHY_2);
10680                         break;
10681
10682                 };
10683
10684                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10685                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10686                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10687                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10688
10689                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10690                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
10691                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10692
10693                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10694                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10695                         if ((tp->pdev->subsystem_vendor ==
10696                              PCI_VENDOR_ID_ARIMA) &&
10697                             (tp->pdev->subsystem_device == 0x205a ||
10698                              tp->pdev->subsystem_device == 0x2063))
10699                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10700                 } else {
10701                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10702                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10703                 }
10704
10705                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10706                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10707                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10708                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10709                 }
10710                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10711                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10712                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10713                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10714                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10715
10716                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10717                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10718                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10719
10720                 if (cfg2 & (1 << 17))
10721                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10722
10723                 /* serdes signal pre-emphasis in register 0x590 set by */
10724                 /* bootcode if bit 18 is set */
10725                 if (cfg2 & (1 << 18))
10726                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10727
10728                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10729                         u32 cfg3;
10730
10731                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10732                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10733                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10734                 }
10735         }
10736 }
10737
10738 static int __devinit tg3_phy_probe(struct tg3 *tp)
10739 {
10740         u32 hw_phy_id_1, hw_phy_id_2;
10741         u32 hw_phy_id, hw_phy_id_masked;
10742         int err;
10743
10744         /* Reading the PHY ID register can conflict with ASF
10745          * firwmare access to the PHY hardware.
10746          */
10747         err = 0;
10748         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10749             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10750                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10751         } else {
10752                 /* Now read the physical PHY_ID from the chip and verify
10753                  * that it is sane.  If it doesn't look good, we fall back
10754                  * to either the hard-coded table based PHY_ID and failing
10755                  * that the value found in the eeprom area.
10756                  */
10757                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10758                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10759
10760                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10761                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10762                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10763
10764                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10765         }
10766
10767         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10768                 tp->phy_id = hw_phy_id;
10769                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10770                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10771                 else
10772                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10773         } else {
10774                 if (tp->phy_id != PHY_ID_INVALID) {
10775                         /* Do nothing, phy ID already set up in
10776                          * tg3_get_eeprom_hw_cfg().
10777                          */
10778                 } else {
10779                         struct subsys_tbl_ent *p;
10780
10781                         /* No eeprom signature?  Try the hardcoded
10782                          * subsys device table.
10783                          */
10784                         p = lookup_by_subsys(tp);
10785                         if (!p)
10786                                 return -ENODEV;
10787
10788                         tp->phy_id = p->phy_id;
10789                         if (!tp->phy_id ||
10790                             tp->phy_id == PHY_ID_BCM8002)
10791                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10792                 }
10793         }
10794
10795         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10796             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10797             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10798                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10799
10800                 tg3_readphy(tp, MII_BMSR, &bmsr);
10801                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10802                     (bmsr & BMSR_LSTATUS))
10803                         goto skip_phy_reset;
10804
10805                 err = tg3_phy_reset(tp);
10806                 if (err)
10807                         return err;
10808
10809                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10810                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10811                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10812                 tg3_ctrl = 0;
10813                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10814                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10815                                     MII_TG3_CTRL_ADV_1000_FULL);
10816                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10817                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10818                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10819                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10820                 }
10821
10822                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10823                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10824                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10825                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10826                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10827
10828                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10829                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10830
10831                         tg3_writephy(tp, MII_BMCR,
10832                                      BMCR_ANENABLE | BMCR_ANRESTART);
10833                 }
10834                 tg3_phy_set_wirespeed(tp);
10835
10836                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10837                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10838                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10839         }
10840
10841 skip_phy_reset:
10842         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10843                 err = tg3_init_5401phy_dsp(tp);
10844                 if (err)
10845                         return err;
10846         }
10847
10848         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10849                 err = tg3_init_5401phy_dsp(tp);
10850         }
10851
10852         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10853                 tp->link_config.advertising =
10854                         (ADVERTISED_1000baseT_Half |
10855                          ADVERTISED_1000baseT_Full |
10856                          ADVERTISED_Autoneg |
10857                          ADVERTISED_FIBRE);
10858         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10859                 tp->link_config.advertising &=
10860                         ~(ADVERTISED_1000baseT_Half |
10861                           ADVERTISED_1000baseT_Full);
10862
10863         return err;
10864 }
10865
10866 static void __devinit tg3_read_partno(struct tg3 *tp)
10867 {
10868         unsigned char vpd_data[256];
10869         unsigned int i;
10870         u32 magic;
10871
10872         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10873                 goto out_not_found;
10874
10875         if (magic == TG3_EEPROM_MAGIC) {
10876                 for (i = 0; i < 256; i += 4) {
10877                         u32 tmp;
10878
10879                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10880                                 goto out_not_found;
10881
10882                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10883                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10884                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10885                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10886                 }
10887         } else {
10888                 int vpd_cap;
10889
10890                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10891                 for (i = 0; i < 256; i += 4) {
10892                         u32 tmp, j = 0;
10893                         __le32 v;
10894                         u16 tmp16;
10895
10896                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10897                                               i);
10898                         while (j++ < 100) {
10899                                 pci_read_config_word(tp->pdev, vpd_cap +
10900                                                      PCI_VPD_ADDR, &tmp16);
10901                                 if (tmp16 & 0x8000)
10902                                         break;
10903                                 msleep(1);
10904                         }
10905                         if (!(tmp16 & 0x8000))
10906                                 goto out_not_found;
10907
10908                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10909                                               &tmp);
10910                         v = cpu_to_le32(tmp);
10911                         memcpy(&vpd_data[i], &v, 4);
10912                 }
10913         }
10914
10915         /* Now parse and find the part number. */
10916         for (i = 0; i < 254; ) {
10917                 unsigned char val = vpd_data[i];
10918                 unsigned int block_end;
10919
10920                 if (val == 0x82 || val == 0x91) {
10921                         i = (i + 3 +
10922                              (vpd_data[i + 1] +
10923                               (vpd_data[i + 2] << 8)));
10924                         continue;
10925                 }
10926
10927                 if (val != 0x90)
10928                         goto out_not_found;
10929
10930                 block_end = (i + 3 +
10931                              (vpd_data[i + 1] +
10932                               (vpd_data[i + 2] << 8)));
10933                 i += 3;
10934
10935                 if (block_end > 256)
10936                         goto out_not_found;
10937
10938                 while (i < (block_end - 2)) {
10939                         if (vpd_data[i + 0] == 'P' &&
10940                             vpd_data[i + 1] == 'N') {
10941                                 int partno_len = vpd_data[i + 2];
10942
10943                                 i += 3;
10944                                 if (partno_len > 24 || (partno_len + i) > 256)
10945                                         goto out_not_found;
10946
10947                                 memcpy(tp->board_part_number,
10948                                        &vpd_data[i], partno_len);
10949
10950                                 /* Success. */
10951                                 return;
10952                         }
10953                         i += 3 + vpd_data[i + 2];
10954                 }
10955
10956                 /* Part number not found. */
10957                 goto out_not_found;
10958         }
10959
10960 out_not_found:
10961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10962                 strcpy(tp->board_part_number, "BCM95906");
10963         else
10964                 strcpy(tp->board_part_number, "none");
10965 }
10966
10967 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10968 {
10969         u32 val;
10970
10971         if (tg3_nvram_read_swab(tp, offset, &val) ||
10972             (val & 0xfc000000) != 0x0c000000 ||
10973             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10974             val != 0)
10975                 return 0;
10976
10977         return 1;
10978 }
10979
10980 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10981 {
10982         u32 val, offset, start;
10983         u32 ver_offset;
10984         int i, bcnt;
10985
10986         if (tg3_nvram_read_swab(tp, 0, &val))
10987                 return;
10988
10989         if (val != TG3_EEPROM_MAGIC)
10990                 return;
10991
10992         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10993             tg3_nvram_read_swab(tp, 0x4, &start))
10994                 return;
10995
10996         offset = tg3_nvram_logical_addr(tp, offset);
10997
10998         if (!tg3_fw_img_is_valid(tp, offset) ||
10999             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11000                 return;
11001
11002         offset = offset + ver_offset - start;
11003         for (i = 0; i < 16; i += 4) {
11004                 __le32 v;
11005                 if (tg3_nvram_read_le(tp, offset + i, &v))
11006                         return;
11007
11008                 memcpy(tp->fw_ver + i, &v, 4);
11009         }
11010
11011         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11012              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11013                 return;
11014
11015         for (offset = TG3_NVM_DIR_START;
11016              offset < TG3_NVM_DIR_END;
11017              offset += TG3_NVM_DIRENT_SIZE) {
11018                 if (tg3_nvram_read_swab(tp, offset, &val))
11019                         return;
11020
11021                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11022                         break;
11023         }
11024
11025         if (offset == TG3_NVM_DIR_END)
11026                 return;
11027
11028         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11029                 start = 0x08000000;
11030         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11031                 return;
11032
11033         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11034             !tg3_fw_img_is_valid(tp, offset) ||
11035             tg3_nvram_read_swab(tp, offset + 8, &val))
11036                 return;
11037
11038         offset += val - start;
11039
11040         bcnt = strlen(tp->fw_ver);
11041
11042         tp->fw_ver[bcnt++] = ',';
11043         tp->fw_ver[bcnt++] = ' ';
11044
11045         for (i = 0; i < 4; i++) {
11046                 __le32 v;
11047                 if (tg3_nvram_read_le(tp, offset, &v))
11048                         return;
11049
11050                 offset += sizeof(v);
11051
11052                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11053                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11054                         break;
11055                 }
11056
11057                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11058                 bcnt += sizeof(v);
11059         }
11060
11061         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11062 }
11063
11064 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11065
11066 static int __devinit tg3_get_invariants(struct tg3 *tp)
11067 {
11068         static struct pci_device_id write_reorder_chipsets[] = {
11069                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11070                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11071                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11072                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11073                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11074                              PCI_DEVICE_ID_VIA_8385_0) },
11075                 { },
11076         };
11077         u32 misc_ctrl_reg;
11078         u32 cacheline_sz_reg;
11079         u32 pci_state_reg, grc_misc_cfg;
11080         u32 val;
11081         u16 pci_cmd;
11082         int err, pcie_cap;
11083
11084         /* Force memory write invalidate off.  If we leave it on,
11085          * then on 5700_BX chips we have to enable a workaround.
11086          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11087          * to match the cacheline size.  The Broadcom driver have this
11088          * workaround but turns MWI off all the times so never uses
11089          * it.  This seems to suggest that the workaround is insufficient.
11090          */
11091         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11092         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11093         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11094
11095         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11096          * has the register indirect write enable bit set before
11097          * we try to access any of the MMIO registers.  It is also
11098          * critical that the PCI-X hw workaround situation is decided
11099          * before that as well.
11100          */
11101         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11102                               &misc_ctrl_reg);
11103
11104         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11105                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11107                 u32 prod_id_asic_rev;
11108
11109                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11110                                       &prod_id_asic_rev);
11111                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11112         }
11113
11114         /* Wrong chip ID in 5752 A0. This code can be removed later
11115          * as A0 is not in production.
11116          */
11117         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11118                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11119
11120         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11121          * we need to disable memory and use config. cycles
11122          * only to access all registers. The 5702/03 chips
11123          * can mistakenly decode the special cycles from the
11124          * ICH chipsets as memory write cycles, causing corruption
11125          * of register and memory space. Only certain ICH bridges
11126          * will drive special cycles with non-zero data during the
11127          * address phase which can fall within the 5703's address
11128          * range. This is not an ICH bug as the PCI spec allows
11129          * non-zero address during special cycles. However, only
11130          * these ICH bridges are known to drive non-zero addresses
11131          * during special cycles.
11132          *
11133          * Since special cycles do not cross PCI bridges, we only
11134          * enable this workaround if the 5703 is on the secondary
11135          * bus of these ICH bridges.
11136          */
11137         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11138             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11139                 static struct tg3_dev_id {
11140                         u32     vendor;
11141                         u32     device;
11142                         u32     rev;
11143                 } ich_chipsets[] = {
11144                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11145                           PCI_ANY_ID },
11146                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11147                           PCI_ANY_ID },
11148                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11149                           0xa },
11150                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11151                           PCI_ANY_ID },
11152                         { },
11153                 };
11154                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11155                 struct pci_dev *bridge = NULL;
11156
11157                 while (pci_id->vendor != 0) {
11158                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11159                                                 bridge);
11160                         if (!bridge) {
11161                                 pci_id++;
11162                                 continue;
11163                         }
11164                         if (pci_id->rev != PCI_ANY_ID) {
11165                                 if (bridge->revision > pci_id->rev)
11166                                         continue;
11167                         }
11168                         if (bridge->subordinate &&
11169                             (bridge->subordinate->number ==
11170                              tp->pdev->bus->number)) {
11171
11172                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11173                                 pci_dev_put(bridge);
11174                                 break;
11175                         }
11176                 }
11177         }
11178
11179         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11180          * DMA addresses > 40-bit. This bridge may have other additional
11181          * 57xx devices behind it in some 4-port NIC designs for example.
11182          * Any tg3 device found behind the bridge will also need the 40-bit
11183          * DMA workaround.
11184          */
11185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11187                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11188                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11189                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11190         }
11191         else {
11192                 struct pci_dev *bridge = NULL;
11193
11194                 do {
11195                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11196                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11197                                                 bridge);
11198                         if (bridge && bridge->subordinate &&
11199                             (bridge->subordinate->number <=
11200                              tp->pdev->bus->number) &&
11201                             (bridge->subordinate->subordinate >=
11202                              tp->pdev->bus->number)) {
11203                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11204                                 pci_dev_put(bridge);
11205                                 break;
11206                         }
11207                 } while (bridge);
11208         }
11209
11210         /* Initialize misc host control in PCI block. */
11211         tp->misc_host_ctrl |= (misc_ctrl_reg &
11212                                MISC_HOST_CTRL_CHIPREV);
11213         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11214                                tp->misc_host_ctrl);
11215
11216         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11217                               &cacheline_sz_reg);
11218
11219         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11220         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11221         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11222         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11223
11224         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11225             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11226                 tp->pdev_peer = tg3_find_peer(tp);
11227
11228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11230             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11235             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11236                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11237
11238         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11239             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11240                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11241
11242         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11243                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11244                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11245                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11246                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11247                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11248                      tp->pdev_peer == tp->pdev))
11249                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11250
11251                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11252                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11253                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11254                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11255                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11256                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11257                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11258                 } else {
11259                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11260                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11261                                 ASIC_REV_5750 &&
11262                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11263                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11264                 }
11265         }
11266
11267         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11268             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11269             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11270             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11271             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11272             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11273             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11274             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11275                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11276
11277         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11278         if (pcie_cap != 0) {
11279                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11280
11281                 pcie_set_readrq(tp->pdev, 4096);
11282
11283                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11284                         u16 lnkctl;
11285
11286                         pci_read_config_word(tp->pdev,
11287                                              pcie_cap + PCI_EXP_LNKCTL,
11288                                              &lnkctl);
11289                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11290                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11291                 }
11292         }
11293
11294         /* If we have an AMD 762 or VIA K8T800 chipset, write
11295          * reordering to the mailbox registers done by the host
11296          * controller can cause major troubles.  We read back from
11297          * every mailbox register write to force the writes to be
11298          * posted to the chip in order.
11299          */
11300         if (pci_dev_present(write_reorder_chipsets) &&
11301             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11302                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11303
11304         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11305             tp->pci_lat_timer < 64) {
11306                 tp->pci_lat_timer = 64;
11307
11308                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11309                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11310                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11311                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11312
11313                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11314                                        cacheline_sz_reg);
11315         }
11316
11317         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11318             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11319                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11320                 if (!tp->pcix_cap) {
11321                         printk(KERN_ERR PFX "Cannot find PCI-X "
11322                                             "capability, aborting.\n");
11323                         return -EIO;
11324                 }
11325         }
11326
11327         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11328                               &pci_state_reg);
11329
11330         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11331                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11332
11333                 /* If this is a 5700 BX chipset, and we are in PCI-X
11334                  * mode, enable register write workaround.
11335                  *
11336                  * The workaround is to use indirect register accesses
11337                  * for all chip writes not to mailbox registers.
11338                  */
11339                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11340                         u32 pm_reg;
11341
11342                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11343
11344                         /* The chip can have it's power management PCI config
11345                          * space registers clobbered due to this bug.
11346                          * So explicitly force the chip into D0 here.
11347                          */
11348                         pci_read_config_dword(tp->pdev,
11349                                               tp->pm_cap + PCI_PM_CTRL,
11350                                               &pm_reg);
11351                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11352                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11353                         pci_write_config_dword(tp->pdev,
11354                                                tp->pm_cap + PCI_PM_CTRL,
11355                                                pm_reg);
11356
11357                         /* Also, force SERR#/PERR# in PCI command. */
11358                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11359                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11360                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11361                 }
11362         }
11363
11364         /* 5700 BX chips need to have their TX producer index mailboxes
11365          * written twice to workaround a bug.
11366          */
11367         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11368                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11369
11370         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11371                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11372         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11373                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11374
11375         /* Chip-specific fixup from Broadcom driver */
11376         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11377             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11378                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11379                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11380         }
11381
11382         /* Default fast path register access methods */
11383         tp->read32 = tg3_read32;
11384         tp->write32 = tg3_write32;
11385         tp->read32_mbox = tg3_read32;
11386         tp->write32_mbox = tg3_write32;
11387         tp->write32_tx_mbox = tg3_write32;
11388         tp->write32_rx_mbox = tg3_write32;
11389
11390         /* Various workaround register access methods */
11391         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11392                 tp->write32 = tg3_write_indirect_reg32;
11393         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11394                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11395                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11396                 /*
11397                  * Back to back register writes can cause problems on these
11398                  * chips, the workaround is to read back all reg writes
11399                  * except those to mailbox regs.
11400                  *
11401                  * See tg3_write_indirect_reg32().
11402                  */
11403                 tp->write32 = tg3_write_flush_reg32;
11404         }
11405
11406
11407         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11408             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11409                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11410                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11411                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11412         }
11413
11414         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11415                 tp->read32 = tg3_read_indirect_reg32;
11416                 tp->write32 = tg3_write_indirect_reg32;
11417                 tp->read32_mbox = tg3_read_indirect_mbox;
11418                 tp->write32_mbox = tg3_write_indirect_mbox;
11419                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11420                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11421
11422                 iounmap(tp->regs);
11423                 tp->regs = NULL;
11424
11425                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11426                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11427                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11428         }
11429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11430                 tp->read32_mbox = tg3_read32_mbox_5906;
11431                 tp->write32_mbox = tg3_write32_mbox_5906;
11432                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11433                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11434         }
11435
11436         if (tp->write32 == tg3_write_indirect_reg32 ||
11437             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11438              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11439               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11440                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11441
11442         /* Get eeprom hw config before calling tg3_set_power_state().
11443          * In particular, the TG3_FLG2_IS_NIC flag must be
11444          * determined before calling tg3_set_power_state() so that
11445          * we know whether or not to switch out of Vaux power.
11446          * When the flag is set, it means that GPIO1 is used for eeprom
11447          * write protect and also implies that it is a LOM where GPIOs
11448          * are not used to switch power.
11449          */
11450         tg3_get_eeprom_hw_cfg(tp);
11451
11452         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11453                 /* Allow reads and writes to the
11454                  * APE register and memory space.
11455                  */
11456                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11457                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11458                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11459                                        pci_state_reg);
11460         }
11461
11462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11463             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11464                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11465
11466                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11467                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11468                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11469                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11470                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11471         }
11472
11473         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11474          * GPIO1 driven high will bring 5700's external PHY out of reset.
11475          * It is also used as eeprom write protect on LOMs.
11476          */
11477         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11479             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11480                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11481                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11482         /* Unused GPIO3 must be driven as output on 5752 because there
11483          * are no pull-up resistors on unused GPIO pins.
11484          */
11485         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11486                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11487
11488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11489                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11490
11491         /* Force the chip into D0. */
11492         err = tg3_set_power_state(tp, PCI_D0);
11493         if (err) {
11494                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11495                        pci_name(tp->pdev));
11496                 return err;
11497         }
11498
11499         /* 5700 B0 chips do not support checksumming correctly due
11500          * to hardware bugs.
11501          */
11502         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11503                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11504
11505         /* Derive initial jumbo mode from MTU assigned in
11506          * ether_setup() via the alloc_etherdev() call
11507          */
11508         if (tp->dev->mtu > ETH_DATA_LEN &&
11509             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11510                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11511
11512         /* Determine WakeOnLan speed to use. */
11513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11514             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11515             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11516             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11517                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11518         } else {
11519                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11520         }
11521
11522         /* A few boards don't want Ethernet@WireSpeed phy feature */
11523         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11524             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11525              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11526              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11527             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11528             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11529                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11530
11531         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11532             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11533                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11534         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11535                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11536
11537         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11538                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11539                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11541                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11542                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11543                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11544                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11545                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11546                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11547                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11548                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11549         }
11550
11551         tp->coalesce_mode = 0;
11552         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11553             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11554                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11555
11556         /* Initialize MAC MI mode, polling disabled. */
11557         tw32_f(MAC_MI_MODE, tp->mi_mode);
11558         udelay(80);
11559
11560         /* Initialize data/descriptor byte/word swapping. */
11561         val = tr32(GRC_MODE);
11562         val &= GRC_MODE_HOST_STACKUP;
11563         tw32(GRC_MODE, val | tp->grc_mode);
11564
11565         tg3_switch_clocks(tp);
11566
11567         /* Clear this out for sanity. */
11568         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11569
11570         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11571                               &pci_state_reg);
11572         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11573             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11574                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11575
11576                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11577                     chiprevid == CHIPREV_ID_5701_B0 ||
11578                     chiprevid == CHIPREV_ID_5701_B2 ||
11579                     chiprevid == CHIPREV_ID_5701_B5) {
11580                         void __iomem *sram_base;
11581
11582                         /* Write some dummy words into the SRAM status block
11583                          * area, see if it reads back correctly.  If the return
11584                          * value is bad, force enable the PCIX workaround.
11585                          */
11586                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11587
11588                         writel(0x00000000, sram_base);
11589                         writel(0x00000000, sram_base + 4);
11590                         writel(0xffffffff, sram_base + 4);
11591                         if (readl(sram_base) != 0x00000000)
11592                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11593                 }
11594         }
11595
11596         udelay(50);
11597         tg3_nvram_init(tp);
11598
11599         grc_misc_cfg = tr32(GRC_MISC_CFG);
11600         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11601
11602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11603             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11604              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11605                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11606
11607         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11608             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11609                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11610         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11611                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11612                                       HOSTCC_MODE_CLRTICK_TXBD);
11613
11614                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11615                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11616                                        tp->misc_host_ctrl);
11617         }
11618
11619         /* these are limited to 10/100 only */
11620         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11621              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11622             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11623              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11624              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11625               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11626               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11627             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11628              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11629               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11630               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11632                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11633
11634         err = tg3_phy_probe(tp);
11635         if (err) {
11636                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11637                        pci_name(tp->pdev), err);
11638                 /* ... but do not return immediately ... */
11639         }
11640
11641         tg3_read_partno(tp);
11642         tg3_read_fw_ver(tp);
11643
11644         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11645                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11646         } else {
11647                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11648                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11649                 else
11650                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11651         }
11652
11653         /* 5700 {AX,BX} chips have a broken status block link
11654          * change bit implementation, so we must use the
11655          * status register in those cases.
11656          */
11657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11658                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11659         else
11660                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11661
11662         /* The led_ctrl is set during tg3_phy_probe, here we might
11663          * have to force the link status polling mechanism based
11664          * upon subsystem IDs.
11665          */
11666         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11667             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11668             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11669                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11670                                   TG3_FLAG_USE_LINKCHG_REG);
11671         }
11672
11673         /* For all SERDES we poll the MAC status register. */
11674         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11675                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11676         else
11677                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11678
11679         /* All chips before 5787 can get confused if TX buffers
11680          * straddle the 4GB address boundary in some cases.
11681          */
11682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11687                 tp->dev->hard_start_xmit = tg3_start_xmit;
11688         else
11689                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11690
11691         tp->rx_offset = 2;
11692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11693             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11694                 tp->rx_offset = 0;
11695
11696         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11697
11698         /* Increment the rx prod index on the rx std ring by at most
11699          * 8 for these chips to workaround hw errata.
11700          */
11701         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11704                 tp->rx_std_max_post = 8;
11705
11706         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11707                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11708                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11709
11710         return err;
11711 }
11712
11713 #ifdef CONFIG_SPARC
11714 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11715 {
11716         struct net_device *dev = tp->dev;
11717         struct pci_dev *pdev = tp->pdev;
11718         struct device_node *dp = pci_device_to_OF_node(pdev);
11719         const unsigned char *addr;
11720         int len;
11721
11722         addr = of_get_property(dp, "local-mac-address", &len);
11723         if (addr && len == 6) {
11724                 memcpy(dev->dev_addr, addr, 6);
11725                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11726                 return 0;
11727         }
11728         return -ENODEV;
11729 }
11730
11731 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11732 {
11733         struct net_device *dev = tp->dev;
11734
11735         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11736         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11737         return 0;
11738 }
11739 #endif
11740
11741 static int __devinit tg3_get_device_address(struct tg3 *tp)
11742 {
11743         struct net_device *dev = tp->dev;
11744         u32 hi, lo, mac_offset;
11745         int addr_ok = 0;
11746
11747 #ifdef CONFIG_SPARC
11748         if (!tg3_get_macaddr_sparc(tp))
11749                 return 0;
11750 #endif
11751
11752         mac_offset = 0x7c;
11753         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11754             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11755                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11756                         mac_offset = 0xcc;
11757                 if (tg3_nvram_lock(tp))
11758                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11759                 else
11760                         tg3_nvram_unlock(tp);
11761         }
11762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11763                 mac_offset = 0x10;
11764
11765         /* First try to get it from MAC address mailbox. */
11766         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11767         if ((hi >> 16) == 0x484b) {
11768                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11769                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11770
11771                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11772                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11773                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11774                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11775                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11776
11777                 /* Some old bootcode may report a 0 MAC address in SRAM */
11778                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11779         }
11780         if (!addr_ok) {
11781                 /* Next, try NVRAM. */
11782                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11783                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11784                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11785                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11786                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11787                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11788                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11789                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11790                 }
11791                 /* Finally just fetch it out of the MAC control regs. */
11792                 else {
11793                         hi = tr32(MAC_ADDR_0_HIGH);
11794                         lo = tr32(MAC_ADDR_0_LOW);
11795
11796                         dev->dev_addr[5] = lo & 0xff;
11797                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11798                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11799                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11800                         dev->dev_addr[1] = hi & 0xff;
11801                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11802                 }
11803         }
11804
11805         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11806 #ifdef CONFIG_SPARC64
11807                 if (!tg3_get_default_macaddr_sparc(tp))
11808                         return 0;
11809 #endif
11810                 return -EINVAL;
11811         }
11812         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11813         return 0;
11814 }
11815
11816 #define BOUNDARY_SINGLE_CACHELINE       1
11817 #define BOUNDARY_MULTI_CACHELINE        2
11818
11819 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11820 {
11821         int cacheline_size;
11822         u8 byte;
11823         int goal;
11824
11825         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11826         if (byte == 0)
11827                 cacheline_size = 1024;
11828         else
11829                 cacheline_size = (int) byte * 4;
11830
11831         /* On 5703 and later chips, the boundary bits have no
11832          * effect.
11833          */
11834         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11835             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11836             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11837                 goto out;
11838
11839 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11840         goal = BOUNDARY_MULTI_CACHELINE;
11841 #else
11842 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11843         goal = BOUNDARY_SINGLE_CACHELINE;
11844 #else
11845         goal = 0;
11846 #endif
11847 #endif
11848
11849         if (!goal)
11850                 goto out;
11851
11852         /* PCI controllers on most RISC systems tend to disconnect
11853          * when a device tries to burst across a cache-line boundary.
11854          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11855          *
11856          * Unfortunately, for PCI-E there are only limited
11857          * write-side controls for this, and thus for reads
11858          * we will still get the disconnects.  We'll also waste
11859          * these PCI cycles for both read and write for chips
11860          * other than 5700 and 5701 which do not implement the
11861          * boundary bits.
11862          */
11863         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11864             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11865                 switch (cacheline_size) {
11866                 case 16:
11867                 case 32:
11868                 case 64:
11869                 case 128:
11870                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11871                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11872                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11873                         } else {
11874                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11875                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11876                         }
11877                         break;
11878
11879                 case 256:
11880                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11881                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11882                         break;
11883
11884                 default:
11885                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11886                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11887                         break;
11888                 };
11889         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11890                 switch (cacheline_size) {
11891                 case 16:
11892                 case 32:
11893                 case 64:
11894                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11895                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11896                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11897                                 break;
11898                         }
11899                         /* fallthrough */
11900                 case 128:
11901                 default:
11902                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11903                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11904                         break;
11905                 };
11906         } else {
11907                 switch (cacheline_size) {
11908                 case 16:
11909                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11910                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11911                                         DMA_RWCTRL_WRITE_BNDRY_16);
11912                                 break;
11913                         }
11914                         /* fallthrough */
11915                 case 32:
11916                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11917                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11918                                         DMA_RWCTRL_WRITE_BNDRY_32);
11919                                 break;
11920                         }
11921                         /* fallthrough */
11922                 case 64:
11923                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11924                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11925                                         DMA_RWCTRL_WRITE_BNDRY_64);
11926                                 break;
11927                         }
11928                         /* fallthrough */
11929                 case 128:
11930                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11931                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11932                                         DMA_RWCTRL_WRITE_BNDRY_128);
11933                                 break;
11934                         }
11935                         /* fallthrough */
11936                 case 256:
11937                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11938                                 DMA_RWCTRL_WRITE_BNDRY_256);
11939                         break;
11940                 case 512:
11941                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11942                                 DMA_RWCTRL_WRITE_BNDRY_512);
11943                         break;
11944                 case 1024:
11945                 default:
11946                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11947                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11948                         break;
11949                 };
11950         }
11951
11952 out:
11953         return val;
11954 }
11955
11956 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11957 {
11958         struct tg3_internal_buffer_desc test_desc;
11959         u32 sram_dma_descs;
11960         int i, ret;
11961
11962         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11963
11964         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11965         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11966         tw32(RDMAC_STATUS, 0);
11967         tw32(WDMAC_STATUS, 0);
11968
11969         tw32(BUFMGR_MODE, 0);
11970         tw32(FTQ_RESET, 0);
11971
11972         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11973         test_desc.addr_lo = buf_dma & 0xffffffff;
11974         test_desc.nic_mbuf = 0x00002100;
11975         test_desc.len = size;
11976
11977         /*
11978          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11979          * the *second* time the tg3 driver was getting loaded after an
11980          * initial scan.
11981          *
11982          * Broadcom tells me:
11983          *   ...the DMA engine is connected to the GRC block and a DMA
11984          *   reset may affect the GRC block in some unpredictable way...
11985          *   The behavior of resets to individual blocks has not been tested.
11986          *
11987          * Broadcom noted the GRC reset will also reset all sub-components.
11988          */
11989         if (to_device) {
11990                 test_desc.cqid_sqid = (13 << 8) | 2;
11991
11992                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11993                 udelay(40);
11994         } else {
11995                 test_desc.cqid_sqid = (16 << 8) | 7;
11996
11997                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11998                 udelay(40);
11999         }
12000         test_desc.flags = 0x00000005;
12001
12002         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12003                 u32 val;
12004
12005                 val = *(((u32 *)&test_desc) + i);
12006                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12007                                        sram_dma_descs + (i * sizeof(u32)));
12008                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12009         }
12010         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12011
12012         if (to_device) {
12013                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12014         } else {
12015                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12016         }
12017
12018         ret = -ENODEV;
12019         for (i = 0; i < 40; i++) {
12020                 u32 val;
12021
12022                 if (to_device)
12023                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12024                 else
12025                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12026                 if ((val & 0xffff) == sram_dma_descs) {
12027                         ret = 0;
12028                         break;
12029                 }
12030
12031                 udelay(100);
12032         }
12033
12034         return ret;
12035 }
12036
12037 #define TEST_BUFFER_SIZE        0x2000
12038
12039 static int __devinit tg3_test_dma(struct tg3 *tp)
12040 {
12041         dma_addr_t buf_dma;
12042         u32 *buf, saved_dma_rwctrl;
12043         int ret;
12044
12045         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12046         if (!buf) {
12047                 ret = -ENOMEM;
12048                 goto out_nofree;
12049         }
12050
12051         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12052                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12053
12054         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12055
12056         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12057                 /* DMA read watermark not used on PCIE */
12058                 tp->dma_rwctrl |= 0x00180000;
12059         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12062                         tp->dma_rwctrl |= 0x003f0000;
12063                 else
12064                         tp->dma_rwctrl |= 0x003f000f;
12065         } else {
12066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12067                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12068                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12069                         u32 read_water = 0x7;
12070
12071                         /* If the 5704 is behind the EPB bridge, we can
12072                          * do the less restrictive ONE_DMA workaround for
12073                          * better performance.
12074                          */
12075                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12076                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12077                                 tp->dma_rwctrl |= 0x8000;
12078                         else if (ccval == 0x6 || ccval == 0x7)
12079                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12080
12081                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12082                                 read_water = 4;
12083                         /* Set bit 23 to enable PCIX hw bug fix */
12084                         tp->dma_rwctrl |=
12085                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12086                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12087                                 (1 << 23);
12088                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12089                         /* 5780 always in PCIX mode */
12090                         tp->dma_rwctrl |= 0x00144000;
12091                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12092                         /* 5714 always in PCIX mode */
12093                         tp->dma_rwctrl |= 0x00148000;
12094                 } else {
12095                         tp->dma_rwctrl |= 0x001b000f;
12096                 }
12097         }
12098
12099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12101                 tp->dma_rwctrl &= 0xfffffff0;
12102
12103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12105                 /* Remove this if it causes problems for some boards. */
12106                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12107
12108                 /* On 5700/5701 chips, we need to set this bit.
12109                  * Otherwise the chip will issue cacheline transactions
12110                  * to streamable DMA memory with not all the byte
12111                  * enables turned on.  This is an error on several
12112                  * RISC PCI controllers, in particular sparc64.
12113                  *
12114                  * On 5703/5704 chips, this bit has been reassigned
12115                  * a different meaning.  In particular, it is used
12116                  * on those chips to enable a PCI-X workaround.
12117                  */
12118                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12119         }
12120
12121         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12122
12123 #if 0
12124         /* Unneeded, already done by tg3_get_invariants.  */
12125         tg3_switch_clocks(tp);
12126 #endif
12127
12128         ret = 0;
12129         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12130             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12131                 goto out;
12132
12133         /* It is best to perform DMA test with maximum write burst size
12134          * to expose the 5700/5701 write DMA bug.
12135          */
12136         saved_dma_rwctrl = tp->dma_rwctrl;
12137         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12138         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12139
12140         while (1) {
12141                 u32 *p = buf, i;
12142
12143                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12144                         p[i] = i;
12145
12146                 /* Send the buffer to the chip. */
12147                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12148                 if (ret) {
12149                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12150                         break;
12151                 }
12152
12153 #if 0
12154                 /* validate data reached card RAM correctly. */
12155                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12156                         u32 val;
12157                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12158                         if (le32_to_cpu(val) != p[i]) {
12159                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12160                                 /* ret = -ENODEV here? */
12161                         }
12162                         p[i] = 0;
12163                 }
12164 #endif
12165                 /* Now read it back. */
12166                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12167                 if (ret) {
12168                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12169
12170                         break;
12171                 }
12172
12173                 /* Verify it. */
12174                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12175                         if (p[i] == i)
12176                                 continue;
12177
12178                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12179                             DMA_RWCTRL_WRITE_BNDRY_16) {
12180                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12181                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12182                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12183                                 break;
12184                         } else {
12185                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12186                                 ret = -ENODEV;
12187                                 goto out;
12188                         }
12189                 }
12190
12191                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12192                         /* Success. */
12193                         ret = 0;
12194                         break;
12195                 }
12196         }
12197         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12198             DMA_RWCTRL_WRITE_BNDRY_16) {
12199                 static struct pci_device_id dma_wait_state_chipsets[] = {
12200                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12201                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12202                         { },
12203                 };
12204
12205                 /* DMA test passed without adjusting DMA boundary,
12206                  * now look for chipsets that are known to expose the
12207                  * DMA bug without failing the test.
12208                  */
12209                 if (pci_dev_present(dma_wait_state_chipsets)) {
12210                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12211                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12212                 }
12213                 else
12214                         /* Safe to use the calculated DMA boundary. */
12215                         tp->dma_rwctrl = saved_dma_rwctrl;
12216
12217                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12218         }
12219
12220 out:
12221         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12222 out_nofree:
12223         return ret;
12224 }
12225
12226 static void __devinit tg3_init_link_config(struct tg3 *tp)
12227 {
12228         tp->link_config.advertising =
12229                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12230                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12231                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12232                  ADVERTISED_Autoneg | ADVERTISED_MII);
12233         tp->link_config.speed = SPEED_INVALID;
12234         tp->link_config.duplex = DUPLEX_INVALID;
12235         tp->link_config.autoneg = AUTONEG_ENABLE;
12236         tp->link_config.active_speed = SPEED_INVALID;
12237         tp->link_config.active_duplex = DUPLEX_INVALID;
12238         tp->link_config.phy_is_low_power = 0;
12239         tp->link_config.orig_speed = SPEED_INVALID;
12240         tp->link_config.orig_duplex = DUPLEX_INVALID;
12241         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12242 }
12243
12244 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12245 {
12246         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12247                 tp->bufmgr_config.mbuf_read_dma_low_water =
12248                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12249                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12250                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12251                 tp->bufmgr_config.mbuf_high_water =
12252                         DEFAULT_MB_HIGH_WATER_5705;
12253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12254                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12255                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12256                         tp->bufmgr_config.mbuf_high_water =
12257                                 DEFAULT_MB_HIGH_WATER_5906;
12258                 }
12259
12260                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12261                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12262                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12263                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12264                 tp->bufmgr_config.mbuf_high_water_jumbo =
12265                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12266         } else {
12267                 tp->bufmgr_config.mbuf_read_dma_low_water =
12268                         DEFAULT_MB_RDMA_LOW_WATER;
12269                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12270                         DEFAULT_MB_MACRX_LOW_WATER;
12271                 tp->bufmgr_config.mbuf_high_water =
12272                         DEFAULT_MB_HIGH_WATER;
12273
12274                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12275                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12276                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12277                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12278                 tp->bufmgr_config.mbuf_high_water_jumbo =
12279                         DEFAULT_MB_HIGH_WATER_JUMBO;
12280         }
12281
12282         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12283         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12284 }
12285
12286 static char * __devinit tg3_phy_string(struct tg3 *tp)
12287 {
12288         switch (tp->phy_id & PHY_ID_MASK) {
12289         case PHY_ID_BCM5400:    return "5400";
12290         case PHY_ID_BCM5401:    return "5401";
12291         case PHY_ID_BCM5411:    return "5411";
12292         case PHY_ID_BCM5701:    return "5701";
12293         case PHY_ID_BCM5703:    return "5703";
12294         case PHY_ID_BCM5704:    return "5704";
12295         case PHY_ID_BCM5705:    return "5705";
12296         case PHY_ID_BCM5750:    return "5750";
12297         case PHY_ID_BCM5752:    return "5752";
12298         case PHY_ID_BCM5714:    return "5714";
12299         case PHY_ID_BCM5780:    return "5780";
12300         case PHY_ID_BCM5755:    return "5755";
12301         case PHY_ID_BCM5787:    return "5787";
12302         case PHY_ID_BCM5784:    return "5784";
12303         case PHY_ID_BCM5756:    return "5722/5756";
12304         case PHY_ID_BCM5906:    return "5906";
12305         case PHY_ID_BCM5761:    return "5761";
12306         case PHY_ID_BCM8002:    return "8002/serdes";
12307         case 0:                 return "serdes";
12308         default:                return "unknown";
12309         };
12310 }
12311
12312 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12313 {
12314         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12315                 strcpy(str, "PCI Express");
12316                 return str;
12317         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12318                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12319
12320                 strcpy(str, "PCIX:");
12321
12322                 if ((clock_ctrl == 7) ||
12323                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12324                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12325                         strcat(str, "133MHz");
12326                 else if (clock_ctrl == 0)
12327                         strcat(str, "33MHz");
12328                 else if (clock_ctrl == 2)
12329                         strcat(str, "50MHz");
12330                 else if (clock_ctrl == 4)
12331                         strcat(str, "66MHz");
12332                 else if (clock_ctrl == 6)
12333                         strcat(str, "100MHz");
12334         } else {
12335                 strcpy(str, "PCI:");
12336                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12337                         strcat(str, "66MHz");
12338                 else
12339                         strcat(str, "33MHz");
12340         }
12341         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12342                 strcat(str, ":32-bit");
12343         else
12344                 strcat(str, ":64-bit");
12345         return str;
12346 }
12347
12348 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12349 {
12350         struct pci_dev *peer;
12351         unsigned int func, devnr = tp->pdev->devfn & ~7;
12352
12353         for (func = 0; func < 8; func++) {
12354                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12355                 if (peer && peer != tp->pdev)
12356                         break;
12357                 pci_dev_put(peer);
12358         }
12359         /* 5704 can be configured in single-port mode, set peer to
12360          * tp->pdev in that case.
12361          */
12362         if (!peer) {
12363                 peer = tp->pdev;
12364                 return peer;
12365         }
12366
12367         /*
12368          * We don't need to keep the refcount elevated; there's no way
12369          * to remove one half of this device without removing the other
12370          */
12371         pci_dev_put(peer);
12372
12373         return peer;
12374 }
12375
12376 static void __devinit tg3_init_coal(struct tg3 *tp)
12377 {
12378         struct ethtool_coalesce *ec = &tp->coal;
12379
12380         memset(ec, 0, sizeof(*ec));
12381         ec->cmd = ETHTOOL_GCOALESCE;
12382         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12383         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12384         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12385         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12386         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12387         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12388         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12389         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12390         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12391
12392         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12393                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12394                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12395                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12396                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12397                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12398         }
12399
12400         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12401                 ec->rx_coalesce_usecs_irq = 0;
12402                 ec->tx_coalesce_usecs_irq = 0;
12403                 ec->stats_block_coalesce_usecs = 0;
12404         }
12405 }
12406
12407 static int __devinit tg3_init_one(struct pci_dev *pdev,
12408                                   const struct pci_device_id *ent)
12409 {
12410         static int tg3_version_printed = 0;
12411         unsigned long tg3reg_base, tg3reg_len;
12412         struct net_device *dev;
12413         struct tg3 *tp;
12414         int err, pm_cap;
12415         char str[40];
12416         u64 dma_mask, persist_dma_mask;
12417         DECLARE_MAC_BUF(mac);
12418
12419         if (tg3_version_printed++ == 0)
12420                 printk(KERN_INFO "%s", version);
12421
12422         err = pci_enable_device(pdev);
12423         if (err) {
12424                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12425                        "aborting.\n");
12426                 return err;
12427         }
12428
12429         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12430                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12431                        "base address, aborting.\n");
12432                 err = -ENODEV;
12433                 goto err_out_disable_pdev;
12434         }
12435
12436         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12437         if (err) {
12438                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12439                        "aborting.\n");
12440                 goto err_out_disable_pdev;
12441         }
12442
12443         pci_set_master(pdev);
12444
12445         /* Find power-management capability. */
12446         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12447         if (pm_cap == 0) {
12448                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12449                        "aborting.\n");
12450                 err = -EIO;
12451                 goto err_out_free_res;
12452         }
12453
12454         tg3reg_base = pci_resource_start(pdev, 0);
12455         tg3reg_len = pci_resource_len(pdev, 0);
12456
12457         dev = alloc_etherdev(sizeof(*tp));
12458         if (!dev) {
12459                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12460                 err = -ENOMEM;
12461                 goto err_out_free_res;
12462         }
12463
12464         SET_NETDEV_DEV(dev, &pdev->dev);
12465
12466 #if TG3_VLAN_TAG_USED
12467         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12468         dev->vlan_rx_register = tg3_vlan_rx_register;
12469 #endif
12470
12471         tp = netdev_priv(dev);
12472         tp->pdev = pdev;
12473         tp->dev = dev;
12474         tp->pm_cap = pm_cap;
12475         tp->mac_mode = TG3_DEF_MAC_MODE;
12476         tp->rx_mode = TG3_DEF_RX_MODE;
12477         tp->tx_mode = TG3_DEF_TX_MODE;
12478         tp->mi_mode = MAC_MI_MODE_BASE;
12479         if (tg3_debug > 0)
12480                 tp->msg_enable = tg3_debug;
12481         else
12482                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12483
12484         /* The word/byte swap controls here control register access byte
12485          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12486          * setting below.
12487          */
12488         tp->misc_host_ctrl =
12489                 MISC_HOST_CTRL_MASK_PCI_INT |
12490                 MISC_HOST_CTRL_WORD_SWAP |
12491                 MISC_HOST_CTRL_INDIR_ACCESS |
12492                 MISC_HOST_CTRL_PCISTATE_RW;
12493
12494         /* The NONFRM (non-frame) byte/word swap controls take effect
12495          * on descriptor entries, anything which isn't packet data.
12496          *
12497          * The StrongARM chips on the board (one for tx, one for rx)
12498          * are running in big-endian mode.
12499          */
12500         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12501                         GRC_MODE_WSWAP_NONFRM_DATA);
12502 #ifdef __BIG_ENDIAN
12503         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12504 #endif
12505         spin_lock_init(&tp->lock);
12506         spin_lock_init(&tp->indirect_lock);
12507         INIT_WORK(&tp->reset_task, tg3_reset_task);
12508
12509         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12510         if (!tp->regs) {
12511                 printk(KERN_ERR PFX "Cannot map device registers, "
12512                        "aborting.\n");
12513                 err = -ENOMEM;
12514                 goto err_out_free_dev;
12515         }
12516
12517         tg3_init_link_config(tp);
12518
12519         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12520         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12521         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12522
12523         dev->open = tg3_open;
12524         dev->stop = tg3_close;
12525         dev->get_stats = tg3_get_stats;
12526         dev->set_multicast_list = tg3_set_rx_mode;
12527         dev->set_mac_address = tg3_set_mac_addr;
12528         dev->do_ioctl = tg3_ioctl;
12529         dev->tx_timeout = tg3_tx_timeout;
12530         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12531         dev->ethtool_ops = &tg3_ethtool_ops;
12532         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12533         dev->change_mtu = tg3_change_mtu;
12534         dev->irq = pdev->irq;
12535 #ifdef CONFIG_NET_POLL_CONTROLLER
12536         dev->poll_controller = tg3_poll_controller;
12537 #endif
12538
12539         err = tg3_get_invariants(tp);
12540         if (err) {
12541                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12542                        "aborting.\n");
12543                 goto err_out_iounmap;
12544         }
12545
12546         /* The EPB bridge inside 5714, 5715, and 5780 and any
12547          * device behind the EPB cannot support DMA addresses > 40-bit.
12548          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12549          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12550          * do DMA address check in tg3_start_xmit().
12551          */
12552         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12553                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12554         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12555                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12556 #ifdef CONFIG_HIGHMEM
12557                 dma_mask = DMA_64BIT_MASK;
12558 #endif
12559         } else
12560                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12561
12562         /* Configure DMA attributes. */
12563         if (dma_mask > DMA_32BIT_MASK) {
12564                 err = pci_set_dma_mask(pdev, dma_mask);
12565                 if (!err) {
12566                         dev->features |= NETIF_F_HIGHDMA;
12567                         err = pci_set_consistent_dma_mask(pdev,
12568                                                           persist_dma_mask);
12569                         if (err < 0) {
12570                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12571                                        "DMA for consistent allocations\n");
12572                                 goto err_out_iounmap;
12573                         }
12574                 }
12575         }
12576         if (err || dma_mask == DMA_32BIT_MASK) {
12577                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12578                 if (err) {
12579                         printk(KERN_ERR PFX "No usable DMA configuration, "
12580                                "aborting.\n");
12581                         goto err_out_iounmap;
12582                 }
12583         }
12584
12585         tg3_init_bufmgr_config(tp);
12586
12587         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12588                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12589         }
12590         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12592             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12594             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12595                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12596         } else {
12597                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12598         }
12599
12600         /* TSO is on by default on chips that support hardware TSO.
12601          * Firmware TSO on older chips gives lower performance, so it
12602          * is off by default, but can be enabled using ethtool.
12603          */
12604         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12605                 dev->features |= NETIF_F_TSO;
12606                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12607                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12608                         dev->features |= NETIF_F_TSO6;
12609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12610                         dev->features |= NETIF_F_TSO_ECN;
12611         }
12612
12613
12614         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12615             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12616             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12617                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12618                 tp->rx_pending = 63;
12619         }
12620
12621         err = tg3_get_device_address(tp);
12622         if (err) {
12623                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12624                        "aborting.\n");
12625                 goto err_out_iounmap;
12626         }
12627
12628         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12629                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12630                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12631                                "base address for APE, aborting.\n");
12632                         err = -ENODEV;
12633                         goto err_out_iounmap;
12634                 }
12635
12636                 tg3reg_base = pci_resource_start(pdev, 2);
12637                 tg3reg_len = pci_resource_len(pdev, 2);
12638
12639                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12640                 if (tp->aperegs == 0UL) {
12641                         printk(KERN_ERR PFX "Cannot map APE registers, "
12642                                "aborting.\n");
12643                         err = -ENOMEM;
12644                         goto err_out_iounmap;
12645                 }
12646
12647                 tg3_ape_lock_init(tp);
12648         }
12649
12650         /*
12651          * Reset chip in case UNDI or EFI driver did not shutdown
12652          * DMA self test will enable WDMAC and we'll see (spurious)
12653          * pending DMA on the PCI bus at that point.
12654          */
12655         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12656             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12657                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12658                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12659         }
12660
12661         err = tg3_test_dma(tp);
12662         if (err) {
12663                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12664                 goto err_out_apeunmap;
12665         }
12666
12667         /* Tigon3 can do ipv4 only... and some chips have buggy
12668          * checksumming.
12669          */
12670         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12671                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12672                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12673                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12674                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12676                         dev->features |= NETIF_F_IPV6_CSUM;
12677
12678                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12679         } else
12680                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12681
12682         /* flow control autonegotiation is default behavior */
12683         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12684         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12685
12686         tg3_init_coal(tp);
12687
12688         pci_set_drvdata(pdev, dev);
12689
12690         err = register_netdev(dev);
12691         if (err) {
12692                 printk(KERN_ERR PFX "Cannot register net device, "
12693                        "aborting.\n");
12694                 goto err_out_apeunmap;
12695         }
12696
12697         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12698                "(%s) %s Ethernet %s\n",
12699                dev->name,
12700                tp->board_part_number,
12701                tp->pci_chip_rev_id,
12702                tg3_phy_string(tp),
12703                tg3_bus_string(tp, str),
12704                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12705                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12706                  "10/100/1000Base-T")),
12707                print_mac(mac, dev->dev_addr));
12708
12709         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12710                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12711                dev->name,
12712                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12713                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12714                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12715                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12716                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12717                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12718         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12719                dev->name, tp->dma_rwctrl,
12720                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12721                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12722
12723         return 0;
12724
12725 err_out_apeunmap:
12726         if (tp->aperegs) {
12727                 iounmap(tp->aperegs);
12728                 tp->aperegs = NULL;
12729         }
12730
12731 err_out_iounmap:
12732         if (tp->regs) {
12733                 iounmap(tp->regs);
12734                 tp->regs = NULL;
12735         }
12736
12737 err_out_free_dev:
12738         free_netdev(dev);
12739
12740 err_out_free_res:
12741         pci_release_regions(pdev);
12742
12743 err_out_disable_pdev:
12744         pci_disable_device(pdev);
12745         pci_set_drvdata(pdev, NULL);
12746         return err;
12747 }
12748
12749 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12750 {
12751         struct net_device *dev = pci_get_drvdata(pdev);
12752
12753         if (dev) {
12754                 struct tg3 *tp = netdev_priv(dev);
12755
12756                 flush_scheduled_work();
12757                 unregister_netdev(dev);
12758                 if (tp->aperegs) {
12759                         iounmap(tp->aperegs);
12760                         tp->aperegs = NULL;
12761                 }
12762                 if (tp->regs) {
12763                         iounmap(tp->regs);
12764                         tp->regs = NULL;
12765                 }
12766                 free_netdev(dev);
12767                 pci_release_regions(pdev);
12768                 pci_disable_device(pdev);
12769                 pci_set_drvdata(pdev, NULL);
12770         }
12771 }
12772
12773 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12774 {
12775         struct net_device *dev = pci_get_drvdata(pdev);
12776         struct tg3 *tp = netdev_priv(dev);
12777         int err;
12778
12779         /* PCI register 4 needs to be saved whether netif_running() or not.
12780          * MSI address and data need to be saved if using MSI and
12781          * netif_running().
12782          */
12783         pci_save_state(pdev);
12784
12785         if (!netif_running(dev))
12786                 return 0;
12787
12788         flush_scheduled_work();
12789         tg3_netif_stop(tp);
12790
12791         del_timer_sync(&tp->timer);
12792
12793         tg3_full_lock(tp, 1);
12794         tg3_disable_ints(tp);
12795         tg3_full_unlock(tp);
12796
12797         netif_device_detach(dev);
12798
12799         tg3_full_lock(tp, 0);
12800         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12801         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12802         tg3_full_unlock(tp);
12803
12804         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12805         if (err) {
12806                 tg3_full_lock(tp, 0);
12807
12808                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12809                 if (tg3_restart_hw(tp, 1))
12810                         goto out;
12811
12812                 tp->timer.expires = jiffies + tp->timer_offset;
12813                 add_timer(&tp->timer);
12814
12815                 netif_device_attach(dev);
12816                 tg3_netif_start(tp);
12817
12818 out:
12819                 tg3_full_unlock(tp);
12820         }
12821
12822         return err;
12823 }
12824
12825 static int tg3_resume(struct pci_dev *pdev)
12826 {
12827         struct net_device *dev = pci_get_drvdata(pdev);
12828         struct tg3 *tp = netdev_priv(dev);
12829         int err;
12830
12831         pci_restore_state(tp->pdev);
12832
12833         if (!netif_running(dev))
12834                 return 0;
12835
12836         err = tg3_set_power_state(tp, PCI_D0);
12837         if (err)
12838                 return err;
12839
12840         netif_device_attach(dev);
12841
12842         tg3_full_lock(tp, 0);
12843
12844         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12845         err = tg3_restart_hw(tp, 1);
12846         if (err)
12847                 goto out;
12848
12849         tp->timer.expires = jiffies + tp->timer_offset;
12850         add_timer(&tp->timer);
12851
12852         tg3_netif_start(tp);
12853
12854 out:
12855         tg3_full_unlock(tp);
12856
12857         return err;
12858 }
12859
12860 static struct pci_driver tg3_driver = {
12861         .name           = DRV_MODULE_NAME,
12862         .id_table       = tg3_pci_tbl,
12863         .probe          = tg3_init_one,
12864         .remove         = __devexit_p(tg3_remove_one),
12865         .suspend        = tg3_suspend,
12866         .resume         = tg3_resume
12867 };
12868
12869 static int __init tg3_init(void)
12870 {
12871         return pci_register_driver(&tg3_driver);
12872 }
12873
12874 static void __exit tg3_cleanup(void)
12875 {
12876         pci_unregister_driver(&tg3_driver);
12877 }
12878
12879 module_init(tg3_init);
12880 module_exit(tg3_cleanup);