]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.83"
68 #define DRV_MODULE_RELDATE      "October 10, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
320 {
321         writel(val, tp->aperegs + off);
322 }
323
324 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
325 {
326         return (readl(tp->aperegs + off));
327 }
328
329 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         unsigned long flags;
332
333         spin_lock_irqsave(&tp->indirect_lock, flags);
334         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
336         spin_unlock_irqrestore(&tp->indirect_lock, flags);
337 }
338
339 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
340 {
341         writel(val, tp->regs + off);
342         readl(tp->regs + off);
343 }
344
345 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
346 {
347         unsigned long flags;
348         u32 val;
349
350         spin_lock_irqsave(&tp->indirect_lock, flags);
351         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353         spin_unlock_irqrestore(&tp->indirect_lock, flags);
354         return val;
355 }
356
357 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         unsigned long flags;
360
361         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363                                        TG3_64BIT_REG_LOW, val);
364                 return;
365         }
366         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368                                        TG3_64BIT_REG_LOW, val);
369                 return;
370         }
371
372         spin_lock_irqsave(&tp->indirect_lock, flags);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375         spin_unlock_irqrestore(&tp->indirect_lock, flags);
376
377         /* In indirect mode when disabling interrupts, we also need
378          * to clear the interrupt bit in the GRC local ctrl register.
379          */
380         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
381             (val == 0x1)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
384         }
385 }
386
387 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 /* usec_wait specifies the wait time in usec when writing to certain registers
400  * where it is unsafe to read back the register without some delay.
401  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
403  */
404 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
405 {
406         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408                 /* Non-posted methods */
409                 tp->write32(tp, off, val);
410         else {
411                 /* Posted method */
412                 tg3_write32(tp, off, val);
413                 if (usec_wait)
414                         udelay(usec_wait);
415                 tp->read32(tp, off);
416         }
417         /* Wait again after the read for the posted method to guarantee that
418          * the wait time is met.
419          */
420         if (usec_wait)
421                 udelay(usec_wait);
422 }
423
424 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
425 {
426         tp->write32_mbox(tp, off, val);
427         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429                 tp->read32_mbox(tp, off);
430 }
431
432 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
433 {
434         void __iomem *mbox = tp->regs + off;
435         writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
437                 writel(val, mbox);
438         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
439                 readl(mbox);
440 }
441
442 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
443 {
444         return (readl(tp->regs + off + GRCMBOX_BASE));
445 }
446
447 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off + GRCMBOX_BASE);
450 }
451
452 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
453 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
454 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
455 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
456 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
457
458 #define tw32(reg,val)           tp->write32(tp, reg, val)
459 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
460 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
461 #define tr32(reg)               tp->read32(tp, reg)
462
463 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
464 {
465         unsigned long flags;
466
467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
469                 return;
470
471         spin_lock_irqsave(&tp->indirect_lock, flags);
472         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
475
476                 /* Always leave this as zero. */
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         } else {
479                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
481
482                 /* Always leave this as zero. */
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
484         }
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 }
487
488 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
489 {
490         unsigned long flags;
491
492         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
494                 *val = 0;
495                 return;
496         }
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503                 /* Always leave this as zero. */
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         } else {
506                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507                 *val = tr32(TG3PCI_MEM_WIN_DATA);
508
509                 /* Always leave this as zero. */
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         }
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 }
514
515 static void tg3_ape_lock_init(struct tg3 *tp)
516 {
517         int i;
518
519         /* Make sure the driver hasn't any stale locks. */
520         for (i = 0; i < 8; i++)
521                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522                                 APE_LOCK_GRANT_DRIVER);
523 }
524
525 static int tg3_ape_lock(struct tg3 *tp, int locknum)
526 {
527         int i, off;
528         int ret = 0;
529         u32 status;
530
531         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
532                 return 0;
533
534         switch (locknum) {
535                 case TG3_APE_LOCK_MEM:
536                         break;
537                 default:
538                         return -EINVAL;
539         }
540
541         off = 4 * locknum;
542
543         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
544
545         /* Wait for up to 1 millisecond to acquire lock. */
546         for (i = 0; i < 100; i++) {
547                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548                 if (status == APE_LOCK_GRANT_DRIVER)
549                         break;
550                 udelay(10);
551         }
552
553         if (status != APE_LOCK_GRANT_DRIVER) {
554                 /* Revoke the lock request. */
555                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556                                 APE_LOCK_GRANT_DRIVER);
557
558                 ret = -EBUSY;
559         }
560
561         return ret;
562 }
563
564 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
565 {
566         int off;
567
568         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
569                 return;
570
571         switch (locknum) {
572                 case TG3_APE_LOCK_MEM:
573                         break;
574                 default:
575                         return;
576         }
577
578         off = 4 * locknum;
579         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
580 }
581
582 static void tg3_disable_ints(struct tg3 *tp)
583 {
584         tw32(TG3PCI_MISC_HOST_CTRL,
585              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
586         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
587 }
588
589 static inline void tg3_cond_int(struct tg3 *tp)
590 {
591         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592             (tp->hw_status->status & SD_STATUS_UPDATED))
593                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
594         else
595                 tw32(HOSTCC_MODE, tp->coalesce_mode |
596                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
597 }
598
599 static void tg3_enable_ints(struct tg3 *tp)
600 {
601         tp->irq_sync = 0;
602         wmb();
603
604         tw32(TG3PCI_MISC_HOST_CTRL,
605              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
606         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607                        (tp->last_tag << 24));
608         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610                                (tp->last_tag << 24));
611         tg3_cond_int(tp);
612 }
613
614 static inline unsigned int tg3_has_work(struct tg3 *tp)
615 {
616         struct tg3_hw_status *sblk = tp->hw_status;
617         unsigned int work_exists = 0;
618
619         /* check for phy events */
620         if (!(tp->tg3_flags &
621               (TG3_FLAG_USE_LINKCHG_REG |
622                TG3_FLAG_POLL_SERDES))) {
623                 if (sblk->status & SD_STATUS_LINK_CHG)
624                         work_exists = 1;
625         }
626         /* check for RX/TX work to do */
627         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
629                 work_exists = 1;
630
631         return work_exists;
632 }
633
634 /* tg3_restart_ints
635  *  similar to tg3_enable_ints, but it accurately determines whether there
636  *  is new work pending and can return without flushing the PIO write
637  *  which reenables interrupts
638  */
639 static void tg3_restart_ints(struct tg3 *tp)
640 {
641         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642                      tp->last_tag << 24);
643         mmiowb();
644
645         /* When doing tagged status, this work check is unnecessary.
646          * The last_tag we write above tells the chip which piece of
647          * work we've completed.
648          */
649         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
650             tg3_has_work(tp))
651                 tw32(HOSTCC_MODE, tp->coalesce_mode |
652                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
653 }
654
655 static inline void tg3_netif_stop(struct tg3 *tp)
656 {
657         tp->dev->trans_start = jiffies; /* prevent tx timeout */
658         napi_disable(&tp->napi);
659         netif_tx_disable(tp->dev);
660 }
661
662 static inline void tg3_netif_start(struct tg3 *tp)
663 {
664         netif_wake_queue(tp->dev);
665         /* NOTE: unconditional netif_wake_queue is only appropriate
666          * so long as all callers are assured to have free tx slots
667          * (such as after tg3_init_hw)
668          */
669         napi_enable(&tp->napi);
670         tp->hw_status->status |= SD_STATUS_UPDATED;
671         tg3_enable_ints(tp);
672 }
673
674 static void tg3_switch_clocks(struct tg3 *tp)
675 {
676         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
677         u32 orig_clock_ctrl;
678
679         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
681                 return;
682
683         orig_clock_ctrl = clock_ctrl;
684         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685                        CLOCK_CTRL_CLKRUN_OENABLE |
686                        0x1f);
687         tp->pci_clock_ctrl = clock_ctrl;
688
689         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
691                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
692                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
693                 }
694         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
695                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
696                             clock_ctrl |
697                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
698                             40);
699                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
701                             40);
702         }
703         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
704 }
705
706 #define PHY_BUSY_LOOPS  5000
707
708 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         *val = 0x0;
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
727
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0) {
745                 *val = frame_val & MI_COM_DATA_MASK;
746                 ret = 0;
747         }
748
749         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750                 tw32_f(MAC_MI_MODE, tp->mi_mode);
751                 udelay(80);
752         }
753
754         return ret;
755 }
756
757 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
758 {
759         u32 frame_val;
760         unsigned int loops;
761         int ret;
762
763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
765                 return 0;
766
767         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
768                 tw32_f(MAC_MI_MODE,
769                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
770                 udelay(80);
771         }
772
773         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774                       MI_COM_PHY_ADDR_MASK);
775         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776                       MI_COM_REG_ADDR_MASK);
777         frame_val |= (val & MI_COM_DATA_MASK);
778         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
779
780         tw32_f(MAC_MI_COM, frame_val);
781
782         loops = PHY_BUSY_LOOPS;
783         while (loops != 0) {
784                 udelay(10);
785                 frame_val = tr32(MAC_MI_COM);
786                 if ((frame_val & MI_COM_BUSY) == 0) {
787                         udelay(5);
788                         frame_val = tr32(MAC_MI_COM);
789                         break;
790                 }
791                 loops -= 1;
792         }
793
794         ret = -EBUSY;
795         if (loops != 0)
796                 ret = 0;
797
798         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799                 tw32_f(MAC_MI_MODE, tp->mi_mode);
800                 udelay(80);
801         }
802
803         return ret;
804 }
805
806 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
807 {
808         u32 phy;
809
810         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
812                 return;
813
814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
815                 u32 ephy;
816
817                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
819                                      ephy | MII_TG3_EPHY_SHADOW_EN);
820                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
821                                 if (enable)
822                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
823                                 else
824                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
826                         }
827                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
828                 }
829         } else {
830                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831                       MII_TG3_AUXCTL_SHDWSEL_MISC;
832                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
834                         if (enable)
835                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836                         else
837                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838                         phy |= MII_TG3_AUXCTL_MISC_WREN;
839                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
840                 }
841         }
842 }
843
844 static void tg3_phy_set_wirespeed(struct tg3 *tp)
845 {
846         u32 val;
847
848         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
849                 return;
850
851         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854                              (val | (1 << 15) | (1 << 4)));
855 }
856
857 static int tg3_bmcr_reset(struct tg3 *tp)
858 {
859         u32 phy_control;
860         int limit, err;
861
862         /* OK, reset it, and poll the BMCR_RESET bit until it
863          * clears or we time out.
864          */
865         phy_control = BMCR_RESET;
866         err = tg3_writephy(tp, MII_BMCR, phy_control);
867         if (err != 0)
868                 return -EBUSY;
869
870         limit = 5000;
871         while (limit--) {
872                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
873                 if (err != 0)
874                         return -EBUSY;
875
876                 if ((phy_control & BMCR_RESET) == 0) {
877                         udelay(40);
878                         break;
879                 }
880                 udelay(10);
881         }
882         if (limit <= 0)
883                 return -EBUSY;
884
885         return 0;
886 }
887
888 static int tg3_wait_macro_done(struct tg3 *tp)
889 {
890         int limit = 100;
891
892         while (limit--) {
893                 u32 tmp32;
894
895                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896                         if ((tmp32 & 0x1000) == 0)
897                                 break;
898                 }
899         }
900         if (limit <= 0)
901                 return -EBUSY;
902
903         return 0;
904 }
905
906 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
907 {
908         static const u32 test_pat[4][6] = {
909         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
913         };
914         int chan;
915
916         for (chan = 0; chan < 4; chan++) {
917                 int i;
918
919                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920                              (chan * 0x2000) | 0x0200);
921                 tg3_writephy(tp, 0x16, 0x0002);
922
923                 for (i = 0; i < 6; i++)
924                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
925                                      test_pat[chan][i]);
926
927                 tg3_writephy(tp, 0x16, 0x0202);
928                 if (tg3_wait_macro_done(tp)) {
929                         *resetp = 1;
930                         return -EBUSY;
931                 }
932
933                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934                              (chan * 0x2000) | 0x0200);
935                 tg3_writephy(tp, 0x16, 0x0082);
936                 if (tg3_wait_macro_done(tp)) {
937                         *resetp = 1;
938                         return -EBUSY;
939                 }
940
941                 tg3_writephy(tp, 0x16, 0x0802);
942                 if (tg3_wait_macro_done(tp)) {
943                         *resetp = 1;
944                         return -EBUSY;
945                 }
946
947                 for (i = 0; i < 6; i += 2) {
948                         u32 low, high;
949
950                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952                             tg3_wait_macro_done(tp)) {
953                                 *resetp = 1;
954                                 return -EBUSY;
955                         }
956                         low &= 0x7fff;
957                         high &= 0x000f;
958                         if (low != test_pat[chan][i] ||
959                             high != test_pat[chan][i+1]) {
960                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
963
964                                 return -EBUSY;
965                         }
966                 }
967         }
968
969         return 0;
970 }
971
972 static int tg3_phy_reset_chanpat(struct tg3 *tp)
973 {
974         int chan;
975
976         for (chan = 0; chan < 4; chan++) {
977                 int i;
978
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980                              (chan * 0x2000) | 0x0200);
981                 tg3_writephy(tp, 0x16, 0x0002);
982                 for (i = 0; i < 6; i++)
983                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984                 tg3_writephy(tp, 0x16, 0x0202);
985                 if (tg3_wait_macro_done(tp))
986                         return -EBUSY;
987         }
988
989         return 0;
990 }
991
992 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
993 {
994         u32 reg32, phy9_orig;
995         int retries, do_phy_reset, err;
996
997         retries = 10;
998         do_phy_reset = 1;
999         do {
1000                 if (do_phy_reset) {
1001                         err = tg3_bmcr_reset(tp);
1002                         if (err)
1003                                 return err;
1004                         do_phy_reset = 0;
1005                 }
1006
1007                 /* Disable transmitter and interrupt.  */
1008                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1009                         continue;
1010
1011                 reg32 |= 0x3000;
1012                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1013
1014                 /* Set full-duplex, 1000 mbps.  */
1015                 tg3_writephy(tp, MII_BMCR,
1016                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1017
1018                 /* Set to master mode.  */
1019                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1020                         continue;
1021
1022                 tg3_writephy(tp, MII_TG3_CTRL,
1023                              (MII_TG3_CTRL_AS_MASTER |
1024                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1025
1026                 /* Enable SM_DSP_CLOCK and 6dB.  */
1027                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1028
1029                 /* Block the PHY control access.  */
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1032
1033                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1034                 if (!err)
1035                         break;
1036         } while (--retries);
1037
1038         err = tg3_phy_reset_chanpat(tp);
1039         if (err)
1040                 return err;
1041
1042         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1044
1045         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046         tg3_writephy(tp, 0x16, 0x0000);
1047
1048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050                 /* Set Extended packet length bit for jumbo frames */
1051                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1052         }
1053         else {
1054                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1055         }
1056
1057         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1058
1059         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1060                 reg32 &= ~0x3000;
1061                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1062         } else if (!err)
1063                 err = -EBUSY;
1064
1065         return err;
1066 }
1067
1068 static void tg3_link_report(struct tg3 *);
1069
1070 /* This will reset the tigon3 PHY if there is no valid
1071  * link unless the FORCE argument is non-zero.
1072  */
1073 static int tg3_phy_reset(struct tg3 *tp)
1074 {
1075         u32 phy_status;
1076         int err;
1077
1078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1079                 u32 val;
1080
1081                 val = tr32(GRC_MISC_CFG);
1082                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1083                 udelay(40);
1084         }
1085         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1086         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1087         if (err != 0)
1088                 return -EBUSY;
1089
1090         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091                 netif_carrier_off(tp->dev);
1092                 tg3_link_report(tp);
1093         }
1094
1095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098                 err = tg3_phy_reset_5703_4_5(tp);
1099                 if (err)
1100                         return err;
1101                 goto out;
1102         }
1103
1104         err = tg3_bmcr_reset(tp);
1105         if (err)
1106                 return err;
1107
1108 out:
1109         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1116         }
1117         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118                 tg3_writephy(tp, 0x1c, 0x8d68);
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120         }
1121         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130         }
1131         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136                         tg3_writephy(tp, MII_TG3_TEST1,
1137                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1138                 } else
1139                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1140                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1141         }
1142         /* Set Extended packet length bit (bit 14) on all chips that */
1143         /* support jumbo frames */
1144         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145                 /* Cannot do read-modify-write on 5401 */
1146                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1147         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1148                 u32 phy_reg;
1149
1150                 /* Set bit 14 with read-modify-write to preserve other bits */
1151                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1154         }
1155
1156         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157          * jumbo frames transmission.
1158          */
1159         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1160                 u32 phy_reg;
1161
1162                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1165         }
1166
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1168                 /* adjust output voltage */
1169                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1170         }
1171
1172         tg3_phy_toggle_automdix(tp, 1);
1173         tg3_phy_set_wirespeed(tp);
1174         return 0;
1175 }
1176
1177 static void tg3_frob_aux_power(struct tg3 *tp)
1178 {
1179         struct tg3 *tp_peer = tp;
1180
1181         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1182                 return;
1183
1184         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186                 struct net_device *dev_peer;
1187
1188                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1189                 /* remove_one() may have been run on the peer. */
1190                 if (!dev_peer)
1191                         tp_peer = tp;
1192                 else
1193                         tp_peer = netdev_priv(dev_peer);
1194         }
1195
1196         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1202                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203                                     (GRC_LCLCTRL_GPIO_OE0 |
1204                                      GRC_LCLCTRL_GPIO_OE1 |
1205                                      GRC_LCLCTRL_GPIO_OE2 |
1206                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1208                                     100);
1209                 } else {
1210                         u32 no_gpio2;
1211                         u32 grc_local_ctrl = 0;
1212
1213                         if (tp_peer != tp &&
1214                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1215                                 return;
1216
1217                         /* Workaround to prevent overdrawing Amps. */
1218                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1219                             ASIC_REV_5714) {
1220                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1221                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                             grc_local_ctrl, 100);
1223                         }
1224
1225                         /* On 5753 and variants, GPIO2 cannot be used. */
1226                         no_gpio2 = tp->nic_sram_data_cfg &
1227                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1228
1229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1230                                          GRC_LCLCTRL_GPIO_OE1 |
1231                                          GRC_LCLCTRL_GPIO_OE2 |
1232                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1234                         if (no_gpio2) {
1235                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1237                         }
1238                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239                                                     grc_local_ctrl, 100);
1240
1241                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1242
1243                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244                                                     grc_local_ctrl, 100);
1245
1246                         if (!no_gpio2) {
1247                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1248                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249                                             grc_local_ctrl, 100);
1250                         }
1251                 }
1252         } else {
1253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255                         if (tp_peer != tp &&
1256                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1257                                 return;
1258
1259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260                                     (GRC_LCLCTRL_GPIO_OE1 |
1261                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                     GRC_LCLCTRL_GPIO_OE1, 100);
1265
1266                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267                                     (GRC_LCLCTRL_GPIO_OE1 |
1268                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1269                 }
1270         }
1271 }
1272
1273 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1274 {
1275         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1276                 return 1;
1277         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278                 if (speed != SPEED_10)
1279                         return 1;
1280         } else if (speed == SPEED_10)
1281                 return 1;
1282
1283         return 0;
1284 }
1285
1286 static int tg3_setup_phy(struct tg3 *, int);
1287
1288 #define RESET_KIND_SHUTDOWN     0
1289 #define RESET_KIND_INIT         1
1290 #define RESET_KIND_SUSPEND      2
1291
1292 static void tg3_write_sig_post_reset(struct tg3 *, int);
1293 static int tg3_halt_cpu(struct tg3 *, u32);
1294 static int tg3_nvram_lock(struct tg3 *);
1295 static void tg3_nvram_unlock(struct tg3 *);
1296
1297 static void tg3_power_down_phy(struct tg3 *tp)
1298 {
1299         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1303
1304                         sg_dig_ctrl |=
1305                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1308                 }
1309                 return;
1310         }
1311
1312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1313                 u32 val;
1314
1315                 tg3_bmcr_reset(tp);
1316                 val = tr32(GRC_MISC_CFG);
1317                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1318                 udelay(40);
1319                 return;
1320         } else {
1321                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1324         }
1325
1326         /* The PHY should not be powered down on some chips because
1327          * of bugs.
1328          */
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1333                 return;
1334         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1335 }
1336
1337 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1338 {
1339         u32 misc_host_ctrl;
1340         u16 power_control, power_caps;
1341         int pm = tp->pm_cap;
1342
1343         /* Make sure register accesses (indirect or otherwise)
1344          * will function correctly.
1345          */
1346         pci_write_config_dword(tp->pdev,
1347                                TG3PCI_MISC_HOST_CTRL,
1348                                tp->misc_host_ctrl);
1349
1350         pci_read_config_word(tp->pdev,
1351                              pm + PCI_PM_CTRL,
1352                              &power_control);
1353         power_control |= PCI_PM_CTRL_PME_STATUS;
1354         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1355         switch (state) {
1356         case PCI_D0:
1357                 power_control |= 0;
1358                 pci_write_config_word(tp->pdev,
1359                                       pm + PCI_PM_CTRL,
1360                                       power_control);
1361                 udelay(100);    /* Delay after power state change */
1362
1363                 /* Switch out of Vaux if it is a NIC */
1364                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1365                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1366
1367                 return 0;
1368
1369         case PCI_D1:
1370                 power_control |= 1;
1371                 break;
1372
1373         case PCI_D2:
1374                 power_control |= 2;
1375                 break;
1376
1377         case PCI_D3hot:
1378                 power_control |= 3;
1379                 break;
1380
1381         default:
1382                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1383                        "requested.\n",
1384                        tp->dev->name, state);
1385                 return -EINVAL;
1386         };
1387
1388         power_control |= PCI_PM_CTRL_PME_ENABLE;
1389
1390         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391         tw32(TG3PCI_MISC_HOST_CTRL,
1392              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1393
1394         if (tp->link_config.phy_is_low_power == 0) {
1395                 tp->link_config.phy_is_low_power = 1;
1396                 tp->link_config.orig_speed = tp->link_config.speed;
1397                 tp->link_config.orig_duplex = tp->link_config.duplex;
1398                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1399         }
1400
1401         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1402                 tp->link_config.speed = SPEED_10;
1403                 tp->link_config.duplex = DUPLEX_HALF;
1404                 tp->link_config.autoneg = AUTONEG_ENABLE;
1405                 tg3_setup_phy(tp, 0);
1406         }
1407
1408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1409                 u32 val;
1410
1411                 val = tr32(GRC_VCPU_EXT_CTRL);
1412                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1414                 int i;
1415                 u32 val;
1416
1417                 for (i = 0; i < 200; i++) {
1418                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1420                                 break;
1421                         msleep(1);
1422                 }
1423         }
1424         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426                                                      WOL_DRV_STATE_SHUTDOWN |
1427                                                      WOL_DRV_WOL |
1428                                                      WOL_SET_MAGIC_PKT);
1429
1430         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1431
1432         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1433                 u32 mac_mode;
1434
1435                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1437                         udelay(40);
1438
1439                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1441                         else
1442                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1443
1444                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1446                             ASIC_REV_5700) {
1447                                 u32 speed = (tp->tg3_flags &
1448                                              TG3_FLAG_WOL_SPEED_100MB) ?
1449                                              SPEED_100 : SPEED_10;
1450                                 if (tg3_5700_link_polarity(tp, speed))
1451                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1452                                 else
1453                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1454                         }
1455                 } else {
1456                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1457                 }
1458
1459                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1460                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1461
1462                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1465
1466                 tw32_f(MAC_MODE, mac_mode);
1467                 udelay(100);
1468
1469                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1470                 udelay(10);
1471         }
1472
1473         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1476                 u32 base_val;
1477
1478                 base_val = tp->pci_clock_ctrl;
1479                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480                              CLOCK_CTRL_TXCLK_DISABLE);
1481
1482                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1484         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1485                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1486                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1487                 /* do nothing */
1488         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1489                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490                 u32 newbits1, newbits2;
1491
1492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495                                     CLOCK_CTRL_TXCLK_DISABLE |
1496                                     CLOCK_CTRL_ALTCLK);
1497                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499                         newbits1 = CLOCK_CTRL_625_CORE;
1500                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1501                 } else {
1502                         newbits1 = CLOCK_CTRL_ALTCLK;
1503                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1504                 }
1505
1506                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1507                             40);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1510                             40);
1511
1512                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1513                         u32 newbits3;
1514
1515                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518                                             CLOCK_CTRL_TXCLK_DISABLE |
1519                                             CLOCK_CTRL_44MHZ_CORE);
1520                         } else {
1521                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1522                         }
1523
1524                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525                                     tp->pci_clock_ctrl | newbits3, 40);
1526                 }
1527         }
1528
1529         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1530             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1532                 tg3_power_down_phy(tp);
1533
1534         tg3_frob_aux_power(tp);
1535
1536         /* Workaround for unstable PLL clock */
1537         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539                 u32 val = tr32(0x7d00);
1540
1541                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1542                 tw32(0x7d00, val);
1543                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1544                         int err;
1545
1546                         err = tg3_nvram_lock(tp);
1547                         tg3_halt_cpu(tp, RX_CPU_BASE);
1548                         if (!err)
1549                                 tg3_nvram_unlock(tp);
1550                 }
1551         }
1552
1553         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1554
1555         /* Finally, set the new power state. */
1556         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1557         udelay(100);    /* Delay after power state change */
1558
1559         return 0;
1560 }
1561
1562 static void tg3_link_report(struct tg3 *tp)
1563 {
1564         if (!netif_carrier_ok(tp->dev)) {
1565                 if (netif_msg_link(tp))
1566                         printk(KERN_INFO PFX "%s: Link is down.\n",
1567                                tp->dev->name);
1568         } else if (netif_msg_link(tp)) {
1569                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1570                        tp->dev->name,
1571                        (tp->link_config.active_speed == SPEED_1000 ?
1572                         1000 :
1573                         (tp->link_config.active_speed == SPEED_100 ?
1574                          100 : 10)),
1575                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1576                         "full" : "half"));
1577
1578                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1579                        "%s for RX.\n",
1580                        tp->dev->name,
1581                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1583         }
1584 }
1585
1586 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1587 {
1588         u32 new_tg3_flags = 0;
1589         u32 old_rx_mode = tp->rx_mode;
1590         u32 old_tx_mode = tp->tx_mode;
1591
1592         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1593
1594                 /* Convert 1000BaseX flow control bits to 1000BaseT
1595                  * bits before resolving flow control.
1596                  */
1597                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599                                        ADVERTISE_PAUSE_ASYM);
1600                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1601
1602                         if (local_adv & ADVERTISE_1000XPAUSE)
1603                                 local_adv |= ADVERTISE_PAUSE_CAP;
1604                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1606                         if (remote_adv & LPA_1000XPAUSE)
1607                                 remote_adv |= LPA_PAUSE_CAP;
1608                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1609                                 remote_adv |= LPA_PAUSE_ASYM;
1610                 }
1611
1612                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614                                 if (remote_adv & LPA_PAUSE_CAP)
1615                                         new_tg3_flags |=
1616                                                 (TG3_FLAG_RX_PAUSE |
1617                                                 TG3_FLAG_TX_PAUSE);
1618                                 else if (remote_adv & LPA_PAUSE_ASYM)
1619                                         new_tg3_flags |=
1620                                                 (TG3_FLAG_RX_PAUSE);
1621                         } else {
1622                                 if (remote_adv & LPA_PAUSE_CAP)
1623                                         new_tg3_flags |=
1624                                                 (TG3_FLAG_RX_PAUSE |
1625                                                 TG3_FLAG_TX_PAUSE);
1626                         }
1627                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628                         if ((remote_adv & LPA_PAUSE_CAP) &&
1629                         (remote_adv & LPA_PAUSE_ASYM))
1630                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1631                 }
1632
1633                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634                 tp->tg3_flags |= new_tg3_flags;
1635         } else {
1636                 new_tg3_flags = tp->tg3_flags;
1637         }
1638
1639         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1641         else
1642                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1643
1644         if (old_rx_mode != tp->rx_mode) {
1645                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1646         }
1647
1648         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1650         else
1651                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1652
1653         if (old_tx_mode != tp->tx_mode) {
1654                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1655         }
1656 }
1657
1658 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1659 {
1660         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661         case MII_TG3_AUX_STAT_10HALF:
1662                 *speed = SPEED_10;
1663                 *duplex = DUPLEX_HALF;
1664                 break;
1665
1666         case MII_TG3_AUX_STAT_10FULL:
1667                 *speed = SPEED_10;
1668                 *duplex = DUPLEX_FULL;
1669                 break;
1670
1671         case MII_TG3_AUX_STAT_100HALF:
1672                 *speed = SPEED_100;
1673                 *duplex = DUPLEX_HALF;
1674                 break;
1675
1676         case MII_TG3_AUX_STAT_100FULL:
1677                 *speed = SPEED_100;
1678                 *duplex = DUPLEX_FULL;
1679                 break;
1680
1681         case MII_TG3_AUX_STAT_1000HALF:
1682                 *speed = SPEED_1000;
1683                 *duplex = DUPLEX_HALF;
1684                 break;
1685
1686         case MII_TG3_AUX_STAT_1000FULL:
1687                 *speed = SPEED_1000;
1688                 *duplex = DUPLEX_FULL;
1689                 break;
1690
1691         default:
1692                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1694                                  SPEED_10;
1695                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1696                                   DUPLEX_HALF;
1697                         break;
1698                 }
1699                 *speed = SPEED_INVALID;
1700                 *duplex = DUPLEX_INVALID;
1701                 break;
1702         };
1703 }
1704
1705 static void tg3_phy_copper_begin(struct tg3 *tp)
1706 {
1707         u32 new_adv;
1708         int i;
1709
1710         if (tp->link_config.phy_is_low_power) {
1711                 /* Entering low power mode.  Disable gigabit and
1712                  * 100baseT advertisements.
1713                  */
1714                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1715
1716                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1720
1721                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722         } else if (tp->link_config.speed == SPEED_INVALID) {
1723                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724                         tp->link_config.advertising &=
1725                                 ~(ADVERTISED_1000baseT_Half |
1726                                   ADVERTISED_1000baseT_Full);
1727
1728                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730                         new_adv |= ADVERTISE_10HALF;
1731                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732                         new_adv |= ADVERTISE_10FULL;
1733                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734                         new_adv |= ADVERTISE_100HALF;
1735                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736                         new_adv |= ADVERTISE_100FULL;
1737                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1738
1739                 if (tp->link_config.advertising &
1740                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1741                         new_adv = 0;
1742                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1751                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1752                 } else {
1753                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1754                 }
1755         } else {
1756                 /* Asking for a specific link mode. */
1757                 if (tp->link_config.speed == SPEED_1000) {
1758                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1760
1761                         if (tp->link_config.duplex == DUPLEX_FULL)
1762                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1763                         else
1764                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1769                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1770                 } else {
1771                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1772
1773                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774                         if (tp->link_config.speed == SPEED_100) {
1775                                 if (tp->link_config.duplex == DUPLEX_FULL)
1776                                         new_adv |= ADVERTISE_100FULL;
1777                                 else
1778                                         new_adv |= ADVERTISE_100HALF;
1779                         } else {
1780                                 if (tp->link_config.duplex == DUPLEX_FULL)
1781                                         new_adv |= ADVERTISE_10FULL;
1782                                 else
1783                                         new_adv |= ADVERTISE_10HALF;
1784                         }
1785                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1786                 }
1787         }
1788
1789         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790             tp->link_config.speed != SPEED_INVALID) {
1791                 u32 bmcr, orig_bmcr;
1792
1793                 tp->link_config.active_speed = tp->link_config.speed;
1794                 tp->link_config.active_duplex = tp->link_config.duplex;
1795
1796                 bmcr = 0;
1797                 switch (tp->link_config.speed) {
1798                 default:
1799                 case SPEED_10:
1800                         break;
1801
1802                 case SPEED_100:
1803                         bmcr |= BMCR_SPEED100;
1804                         break;
1805
1806                 case SPEED_1000:
1807                         bmcr |= TG3_BMCR_SPEED1000;
1808                         break;
1809                 };
1810
1811                 if (tp->link_config.duplex == DUPLEX_FULL)
1812                         bmcr |= BMCR_FULLDPLX;
1813
1814                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815                     (bmcr != orig_bmcr)) {
1816                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817                         for (i = 0; i < 1500; i++) {
1818                                 u32 tmp;
1819
1820                                 udelay(10);
1821                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822                                     tg3_readphy(tp, MII_BMSR, &tmp))
1823                                         continue;
1824                                 if (!(tmp & BMSR_LSTATUS)) {
1825                                         udelay(40);
1826                                         break;
1827                                 }
1828                         }
1829                         tg3_writephy(tp, MII_BMCR, bmcr);
1830                         udelay(40);
1831                 }
1832         } else {
1833                 tg3_writephy(tp, MII_BMCR,
1834                              BMCR_ANENABLE | BMCR_ANRESTART);
1835         }
1836 }
1837
1838 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1839 {
1840         int err;
1841
1842         /* Turn off tap power management. */
1843         /* Set Extended packet length bit */
1844         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1845
1846         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1848
1849         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1851
1852         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1854
1855         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1857
1858         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1860
1861         udelay(40);
1862
1863         return err;
1864 }
1865
1866 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1867 {
1868         u32 adv_reg, all_mask = 0;
1869
1870         if (mask & ADVERTISED_10baseT_Half)
1871                 all_mask |= ADVERTISE_10HALF;
1872         if (mask & ADVERTISED_10baseT_Full)
1873                 all_mask |= ADVERTISE_10FULL;
1874         if (mask & ADVERTISED_100baseT_Half)
1875                 all_mask |= ADVERTISE_100HALF;
1876         if (mask & ADVERTISED_100baseT_Full)
1877                 all_mask |= ADVERTISE_100FULL;
1878
1879         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1880                 return 0;
1881
1882         if ((adv_reg & all_mask) != all_mask)
1883                 return 0;
1884         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1885                 u32 tg3_ctrl;
1886
1887                 all_mask = 0;
1888                 if (mask & ADVERTISED_1000baseT_Half)
1889                         all_mask |= ADVERTISE_1000HALF;
1890                 if (mask & ADVERTISED_1000baseT_Full)
1891                         all_mask |= ADVERTISE_1000FULL;
1892
1893                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1894                         return 0;
1895
1896                 if ((tg3_ctrl & all_mask) != all_mask)
1897                         return 0;
1898         }
1899         return 1;
1900 }
1901
1902 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1903 {
1904         int current_link_up;
1905         u32 bmsr, dummy;
1906         u16 current_speed;
1907         u8 current_duplex;
1908         int i, err;
1909
1910         tw32(MAC_EVENT, 0);
1911
1912         tw32_f(MAC_STATUS,
1913              (MAC_STATUS_SYNC_CHANGED |
1914               MAC_STATUS_CFG_CHANGED |
1915               MAC_STATUS_MI_COMPLETION |
1916               MAC_STATUS_LNKSTATE_CHANGED));
1917         udelay(40);
1918
1919         tp->mi_mode = MAC_MI_MODE_BASE;
1920         tw32_f(MAC_MI_MODE, tp->mi_mode);
1921         udelay(80);
1922
1923         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1924
1925         /* Some third-party PHYs need to be reset on link going
1926          * down.
1927          */
1928         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931             netif_carrier_ok(tp->dev)) {
1932                 tg3_readphy(tp, MII_BMSR, &bmsr);
1933                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934                     !(bmsr & BMSR_LSTATUS))
1935                         force_reset = 1;
1936         }
1937         if (force_reset)
1938                 tg3_phy_reset(tp);
1939
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941                 tg3_readphy(tp, MII_BMSR, &bmsr);
1942                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1944                         bmsr = 0;
1945
1946                 if (!(bmsr & BMSR_LSTATUS)) {
1947                         err = tg3_init_5401phy_dsp(tp);
1948                         if (err)
1949                                 return err;
1950
1951                         tg3_readphy(tp, MII_BMSR, &bmsr);
1952                         for (i = 0; i < 1000; i++) {
1953                                 udelay(10);
1954                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955                                     (bmsr & BMSR_LSTATUS)) {
1956                                         udelay(40);
1957                                         break;
1958                                 }
1959                         }
1960
1961                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962                             !(bmsr & BMSR_LSTATUS) &&
1963                             tp->link_config.active_speed == SPEED_1000) {
1964                                 err = tg3_phy_reset(tp);
1965                                 if (!err)
1966                                         err = tg3_init_5401phy_dsp(tp);
1967                                 if (err)
1968                                         return err;
1969                         }
1970                 }
1971         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973                 /* 5701 {A0,B0} CRC bug workaround */
1974                 tg3_writephy(tp, 0x15, 0x0a75);
1975                 tg3_writephy(tp, 0x1c, 0x8c68);
1976                 tg3_writephy(tp, 0x1c, 0x8d68);
1977                 tg3_writephy(tp, 0x1c, 0x8c68);
1978         }
1979
1980         /* Clear pending interrupts... */
1981         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983
1984         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1986         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1987                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1988
1989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1994                 else
1995                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1996         }
1997
1998         current_link_up = 0;
1999         current_speed = SPEED_INVALID;
2000         current_duplex = DUPLEX_INVALID;
2001
2002         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2003                 u32 val;
2004
2005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007                 if (!(val & (1 << 10))) {
2008                         val |= (1 << 10);
2009                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2010                         goto relink;
2011                 }
2012         }
2013
2014         bmsr = 0;
2015         for (i = 0; i < 100; i++) {
2016                 tg3_readphy(tp, MII_BMSR, &bmsr);
2017                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018                     (bmsr & BMSR_LSTATUS))
2019                         break;
2020                 udelay(40);
2021         }
2022
2023         if (bmsr & BMSR_LSTATUS) {
2024                 u32 aux_stat, bmcr;
2025
2026                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027                 for (i = 0; i < 2000; i++) {
2028                         udelay(10);
2029                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2030                             aux_stat)
2031                                 break;
2032                 }
2033
2034                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2035                                              &current_speed,
2036                                              &current_duplex);
2037
2038                 bmcr = 0;
2039                 for (i = 0; i < 200; i++) {
2040                         tg3_readphy(tp, MII_BMCR, &bmcr);
2041                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2042                                 continue;
2043                         if (bmcr && bmcr != 0x7fff)
2044                                 break;
2045                         udelay(10);
2046                 }
2047
2048                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049                         if (bmcr & BMCR_ANENABLE) {
2050                                 current_link_up = 1;
2051
2052                                 /* Force autoneg restart if we are exiting
2053                                  * low power mode.
2054                                  */
2055                                 if (!tg3_copper_is_advertising_all(tp,
2056                                                 tp->link_config.advertising))
2057                                         current_link_up = 0;
2058                         } else {
2059                                 current_link_up = 0;
2060                         }
2061                 } else {
2062                         if (!(bmcr & BMCR_ANENABLE) &&
2063                             tp->link_config.speed == current_speed &&
2064                             tp->link_config.duplex == current_duplex) {
2065                                 current_link_up = 1;
2066                         } else {
2067                                 current_link_up = 0;
2068                         }
2069                 }
2070
2071                 tp->link_config.active_speed = current_speed;
2072                 tp->link_config.active_duplex = current_duplex;
2073         }
2074
2075         if (current_link_up == 1 &&
2076             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078                 u32 local_adv, remote_adv;
2079
2080                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2081                         local_adv = 0;
2082                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2083
2084                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2085                         remote_adv = 0;
2086
2087                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2088
2089                 /* If we are not advertising full pause capability,
2090                  * something is wrong.  Bring the link down and reconfigure.
2091                  */
2092                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093                         current_link_up = 0;
2094                 } else {
2095                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2096                 }
2097         }
2098 relink:
2099         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2100                 u32 tmp;
2101
2102                 tg3_phy_copper_begin(tp);
2103
2104                 tg3_readphy(tp, MII_BMSR, &tmp);
2105                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106                     (tmp & BMSR_LSTATUS))
2107                         current_link_up = 1;
2108         }
2109
2110         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111         if (current_link_up == 1) {
2112                 if (tp->link_config.active_speed == SPEED_100 ||
2113                     tp->link_config.active_speed == SPEED_10)
2114                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2115                 else
2116                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117         } else
2118                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2119
2120         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121         if (tp->link_config.active_duplex == DUPLEX_HALF)
2122                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2123
2124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2125                 if (current_link_up == 1 &&
2126                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2127                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2128                 else
2129                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2130         }
2131
2132         /* ??? Without this setting Netgear GA302T PHY does not
2133          * ??? send/receive packets...
2134          */
2135         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2139                 udelay(80);
2140         }
2141
2142         tw32_f(MAC_MODE, tp->mac_mode);
2143         udelay(40);
2144
2145         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146                 /* Polled via timer. */
2147                 tw32_f(MAC_EVENT, 0);
2148         } else {
2149                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2150         }
2151         udelay(40);
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154             current_link_up == 1 &&
2155             tp->link_config.active_speed == SPEED_1000 &&
2156             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2158                 udelay(120);
2159                 tw32_f(MAC_STATUS,
2160                      (MAC_STATUS_SYNC_CHANGED |
2161                       MAC_STATUS_CFG_CHANGED));
2162                 udelay(40);
2163                 tg3_write_mem(tp,
2164                               NIC_SRAM_FIRMWARE_MBOX,
2165                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2166         }
2167
2168         if (current_link_up != netif_carrier_ok(tp->dev)) {
2169                 if (current_link_up)
2170                         netif_carrier_on(tp->dev);
2171                 else
2172                         netif_carrier_off(tp->dev);
2173                 tg3_link_report(tp);
2174         }
2175
2176         return 0;
2177 }
2178
2179 struct tg3_fiber_aneginfo {
2180         int state;
2181 #define ANEG_STATE_UNKNOWN              0
2182 #define ANEG_STATE_AN_ENABLE            1
2183 #define ANEG_STATE_RESTART_INIT         2
2184 #define ANEG_STATE_RESTART              3
2185 #define ANEG_STATE_DISABLE_LINK_OK      4
2186 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2187 #define ANEG_STATE_ABILITY_DETECT       6
2188 #define ANEG_STATE_ACK_DETECT_INIT      7
2189 #define ANEG_STATE_ACK_DETECT           8
2190 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2191 #define ANEG_STATE_COMPLETE_ACK         10
2192 #define ANEG_STATE_IDLE_DETECT_INIT     11
2193 #define ANEG_STATE_IDLE_DETECT          12
2194 #define ANEG_STATE_LINK_OK              13
2195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2196 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2197
2198         u32 flags;
2199 #define MR_AN_ENABLE            0x00000001
2200 #define MR_RESTART_AN           0x00000002
2201 #define MR_AN_COMPLETE          0x00000004
2202 #define MR_PAGE_RX              0x00000008
2203 #define MR_NP_LOADED            0x00000010
2204 #define MR_TOGGLE_TX            0x00000020
2205 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2206 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2207 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2208 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2212 #define MR_TOGGLE_RX            0x00002000
2213 #define MR_NP_RX                0x00004000
2214
2215 #define MR_LINK_OK              0x80000000
2216
2217         unsigned long link_time, cur_time;
2218
2219         u32 ability_match_cfg;
2220         int ability_match_count;
2221
2222         char ability_match, idle_match, ack_match;
2223
2224         u32 txconfig, rxconfig;
2225 #define ANEG_CFG_NP             0x00000080
2226 #define ANEG_CFG_ACK            0x00000040
2227 #define ANEG_CFG_RF2            0x00000020
2228 #define ANEG_CFG_RF1            0x00000010
2229 #define ANEG_CFG_PS2            0x00000001
2230 #define ANEG_CFG_PS1            0x00008000
2231 #define ANEG_CFG_HD             0x00004000
2232 #define ANEG_CFG_FD             0x00002000
2233 #define ANEG_CFG_INVAL          0x00001f06
2234
2235 };
2236 #define ANEG_OK         0
2237 #define ANEG_DONE       1
2238 #define ANEG_TIMER_ENAB 2
2239 #define ANEG_FAILED     -1
2240
2241 #define ANEG_STATE_SETTLE_TIME  10000
2242
2243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244                                    struct tg3_fiber_aneginfo *ap)
2245 {
2246         unsigned long delta;
2247         u32 rx_cfg_reg;
2248         int ret;
2249
2250         if (ap->state == ANEG_STATE_UNKNOWN) {
2251                 ap->rxconfig = 0;
2252                 ap->link_time = 0;
2253                 ap->cur_time = 0;
2254                 ap->ability_match_cfg = 0;
2255                 ap->ability_match_count = 0;
2256                 ap->ability_match = 0;
2257                 ap->idle_match = 0;
2258                 ap->ack_match = 0;
2259         }
2260         ap->cur_time++;
2261
2262         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2264
2265                 if (rx_cfg_reg != ap->ability_match_cfg) {
2266                         ap->ability_match_cfg = rx_cfg_reg;
2267                         ap->ability_match = 0;
2268                         ap->ability_match_count = 0;
2269                 } else {
2270                         if (++ap->ability_match_count > 1) {
2271                                 ap->ability_match = 1;
2272                                 ap->ability_match_cfg = rx_cfg_reg;
2273                         }
2274                 }
2275                 if (rx_cfg_reg & ANEG_CFG_ACK)
2276                         ap->ack_match = 1;
2277                 else
2278                         ap->ack_match = 0;
2279
2280                 ap->idle_match = 0;
2281         } else {
2282                 ap->idle_match = 1;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->ack_match = 0;
2287
2288                 rx_cfg_reg = 0;
2289         }
2290
2291         ap->rxconfig = rx_cfg_reg;
2292         ret = ANEG_OK;
2293
2294         switch(ap->state) {
2295         case ANEG_STATE_UNKNOWN:
2296                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297                         ap->state = ANEG_STATE_AN_ENABLE;
2298
2299                 /* fallthru */
2300         case ANEG_STATE_AN_ENABLE:
2301                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302                 if (ap->flags & MR_AN_ENABLE) {
2303                         ap->link_time = 0;
2304                         ap->cur_time = 0;
2305                         ap->ability_match_cfg = 0;
2306                         ap->ability_match_count = 0;
2307                         ap->ability_match = 0;
2308                         ap->idle_match = 0;
2309                         ap->ack_match = 0;
2310
2311                         ap->state = ANEG_STATE_RESTART_INIT;
2312                 } else {
2313                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2314                 }
2315                 break;
2316
2317         case ANEG_STATE_RESTART_INIT:
2318                 ap->link_time = ap->cur_time;
2319                 ap->flags &= ~(MR_NP_LOADED);
2320                 ap->txconfig = 0;
2321                 tw32(MAC_TX_AUTO_NEG, 0);
2322                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323                 tw32_f(MAC_MODE, tp->mac_mode);
2324                 udelay(40);
2325
2326                 ret = ANEG_TIMER_ENAB;
2327                 ap->state = ANEG_STATE_RESTART;
2328
2329                 /* fallthru */
2330         case ANEG_STATE_RESTART:
2331                 delta = ap->cur_time - ap->link_time;
2332                 if (delta > ANEG_STATE_SETTLE_TIME) {
2333                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2334                 } else {
2335                         ret = ANEG_TIMER_ENAB;
2336                 }
2337                 break;
2338
2339         case ANEG_STATE_DISABLE_LINK_OK:
2340                 ret = ANEG_DONE;
2341                 break;
2342
2343         case ANEG_STATE_ABILITY_DETECT_INIT:
2344                 ap->flags &= ~(MR_TOGGLE_TX);
2345                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348                 tw32_f(MAC_MODE, tp->mac_mode);
2349                 udelay(40);
2350
2351                 ap->state = ANEG_STATE_ABILITY_DETECT;
2352                 break;
2353
2354         case ANEG_STATE_ABILITY_DETECT:
2355                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2357                 }
2358                 break;
2359
2360         case ANEG_STATE_ACK_DETECT_INIT:
2361                 ap->txconfig |= ANEG_CFG_ACK;
2362                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364                 tw32_f(MAC_MODE, tp->mac_mode);
2365                 udelay(40);
2366
2367                 ap->state = ANEG_STATE_ACK_DETECT;
2368
2369                 /* fallthru */
2370         case ANEG_STATE_ACK_DETECT:
2371                 if (ap->ack_match != 0) {
2372                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2375                         } else {
2376                                 ap->state = ANEG_STATE_AN_ENABLE;
2377                         }
2378                 } else if (ap->ability_match != 0 &&
2379                            ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                 }
2382                 break;
2383
2384         case ANEG_STATE_COMPLETE_ACK_INIT:
2385                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2386                         ret = ANEG_FAILED;
2387                         break;
2388                 }
2389                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390                                MR_LP_ADV_HALF_DUPLEX |
2391                                MR_LP_ADV_SYM_PAUSE |
2392                                MR_LP_ADV_ASYM_PAUSE |
2393                                MR_LP_ADV_REMOTE_FAULT1 |
2394                                MR_LP_ADV_REMOTE_FAULT2 |
2395                                MR_LP_ADV_NEXT_PAGE |
2396                                MR_TOGGLE_RX |
2397                                MR_NP_RX);
2398                 if (ap->rxconfig & ANEG_CFG_FD)
2399                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400                 if (ap->rxconfig & ANEG_CFG_HD)
2401                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402                 if (ap->rxconfig & ANEG_CFG_PS1)
2403                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404                 if (ap->rxconfig & ANEG_CFG_PS2)
2405                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406                 if (ap->rxconfig & ANEG_CFG_RF1)
2407                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408                 if (ap->rxconfig & ANEG_CFG_RF2)
2409                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410                 if (ap->rxconfig & ANEG_CFG_NP)
2411                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2412
2413                 ap->link_time = ap->cur_time;
2414
2415                 ap->flags ^= (MR_TOGGLE_TX);
2416                 if (ap->rxconfig & 0x0008)
2417                         ap->flags |= MR_TOGGLE_RX;
2418                 if (ap->rxconfig & ANEG_CFG_NP)
2419                         ap->flags |= MR_NP_RX;
2420                 ap->flags |= MR_PAGE_RX;
2421
2422                 ap->state = ANEG_STATE_COMPLETE_ACK;
2423                 ret = ANEG_TIMER_ENAB;
2424                 break;
2425
2426         case ANEG_STATE_COMPLETE_ACK:
2427                 if (ap->ability_match != 0 &&
2428                     ap->rxconfig == 0) {
2429                         ap->state = ANEG_STATE_AN_ENABLE;
2430                         break;
2431                 }
2432                 delta = ap->cur_time - ap->link_time;
2433                 if (delta > ANEG_STATE_SETTLE_TIME) {
2434                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2436                         } else {
2437                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438                                     !(ap->flags & MR_NP_RX)) {
2439                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2440                                 } else {
2441                                         ret = ANEG_FAILED;
2442                                 }
2443                         }
2444                 }
2445                 break;
2446
2447         case ANEG_STATE_IDLE_DETECT_INIT:
2448                 ap->link_time = ap->cur_time;
2449                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450                 tw32_f(MAC_MODE, tp->mac_mode);
2451                 udelay(40);
2452
2453                 ap->state = ANEG_STATE_IDLE_DETECT;
2454                 ret = ANEG_TIMER_ENAB;
2455                 break;
2456
2457         case ANEG_STATE_IDLE_DETECT:
2458                 if (ap->ability_match != 0 &&
2459                     ap->rxconfig == 0) {
2460                         ap->state = ANEG_STATE_AN_ENABLE;
2461                         break;
2462                 }
2463                 delta = ap->cur_time - ap->link_time;
2464                 if (delta > ANEG_STATE_SETTLE_TIME) {
2465                         /* XXX another gem from the Broadcom driver :( */
2466                         ap->state = ANEG_STATE_LINK_OK;
2467                 }
2468                 break;
2469
2470         case ANEG_STATE_LINK_OK:
2471                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2472                 ret = ANEG_DONE;
2473                 break;
2474
2475         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476                 /* ??? unimplemented */
2477                 break;
2478
2479         case ANEG_STATE_NEXT_PAGE_WAIT:
2480                 /* ??? unimplemented */
2481                 break;
2482
2483         default:
2484                 ret = ANEG_FAILED;
2485                 break;
2486         };
2487
2488         return ret;
2489 }
2490
2491 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2492 {
2493         int res = 0;
2494         struct tg3_fiber_aneginfo aninfo;
2495         int status = ANEG_FAILED;
2496         unsigned int tick;
2497         u32 tmp;
2498
2499         tw32_f(MAC_TX_AUTO_NEG, 0);
2500
2501         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2503         udelay(40);
2504
2505         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2506         udelay(40);
2507
2508         memset(&aninfo, 0, sizeof(aninfo));
2509         aninfo.flags |= MR_AN_ENABLE;
2510         aninfo.state = ANEG_STATE_UNKNOWN;
2511         aninfo.cur_time = 0;
2512         tick = 0;
2513         while (++tick < 195000) {
2514                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515                 if (status == ANEG_DONE || status == ANEG_FAILED)
2516                         break;
2517
2518                 udelay(1);
2519         }
2520
2521         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         *flags = aninfo.flags;
2526
2527         if (status == ANEG_DONE &&
2528             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529                              MR_LP_ADV_FULL_DUPLEX)))
2530                 res = 1;
2531
2532         return res;
2533 }
2534
2535 static void tg3_init_bcm8002(struct tg3 *tp)
2536 {
2537         u32 mac_status = tr32(MAC_STATUS);
2538         int i;
2539
2540         /* Reset when initting first time or we have a link. */
2541         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542             !(mac_status & MAC_STATUS_PCS_SYNCED))
2543                 return;
2544
2545         /* Set PLL lock range. */
2546         tg3_writephy(tp, 0x16, 0x8007);
2547
2548         /* SW reset */
2549         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2550
2551         /* Wait for reset to complete. */
2552         /* XXX schedule_timeout() ... */
2553         for (i = 0; i < 500; i++)
2554                 udelay(10);
2555
2556         /* Config mode; select PMA/Ch 1 regs. */
2557         tg3_writephy(tp, 0x10, 0x8411);
2558
2559         /* Enable auto-lock and comdet, select txclk for tx. */
2560         tg3_writephy(tp, 0x11, 0x0a10);
2561
2562         tg3_writephy(tp, 0x18, 0x00a0);
2563         tg3_writephy(tp, 0x16, 0x41ff);
2564
2565         /* Assert and deassert POR. */
2566         tg3_writephy(tp, 0x13, 0x0400);
2567         udelay(40);
2568         tg3_writephy(tp, 0x13, 0x0000);
2569
2570         tg3_writephy(tp, 0x11, 0x0a50);
2571         udelay(40);
2572         tg3_writephy(tp, 0x11, 0x0a10);
2573
2574         /* Wait for signal to stabilize */
2575         /* XXX schedule_timeout() ... */
2576         for (i = 0; i < 15000; i++)
2577                 udelay(10);
2578
2579         /* Deselect the channel register so we can read the PHYID
2580          * later.
2581          */
2582         tg3_writephy(tp, 0x10, 0x8011);
2583 }
2584
2585 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2586 {
2587         u32 sg_dig_ctrl, sg_dig_status;
2588         u32 serdes_cfg, expected_sg_dig_ctrl;
2589         int workaround, port_a;
2590         int current_link_up;
2591
2592         serdes_cfg = 0;
2593         expected_sg_dig_ctrl = 0;
2594         workaround = 0;
2595         port_a = 1;
2596         current_link_up = 0;
2597
2598         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2600                 workaround = 1;
2601                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2602                         port_a = 0;
2603
2604                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605                 /* preserve bits 20-23 for voltage regulator */
2606                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2607         }
2608
2609         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2610
2611         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612                 if (sg_dig_ctrl & (1 << 31)) {
2613                         if (workaround) {
2614                                 u32 val = serdes_cfg;
2615
2616                                 if (port_a)
2617                                         val |= 0xc010000;
2618                                 else
2619                                         val |= 0x4010000;
2620                                 tw32_f(MAC_SERDES_CFG, val);
2621                         }
2622                         tw32_f(SG_DIG_CTRL, 0x01388400);
2623                 }
2624                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625                         tg3_setup_flow_control(tp, 0, 0);
2626                         current_link_up = 1;
2627                 }
2628                 goto out;
2629         }
2630
2631         /* Want auto-negotiation.  */
2632         expected_sg_dig_ctrl = 0x81388400;
2633
2634         /* Pause capability */
2635         expected_sg_dig_ctrl |= (1 << 11);
2636
2637         /* Asymettric pause */
2638         expected_sg_dig_ctrl |= (1 << 12);
2639
2640         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2641                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642                     tp->serdes_counter &&
2643                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644                                     MAC_STATUS_RCVD_CFG)) ==
2645                      MAC_STATUS_PCS_SYNCED)) {
2646                         tp->serdes_counter--;
2647                         current_link_up = 1;
2648                         goto out;
2649                 }
2650 restart_autoneg:
2651                 if (workaround)
2652                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2654                 udelay(5);
2655                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2656
2657                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2659         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660                                  MAC_STATUS_SIGNAL_DET)) {
2661                 sg_dig_status = tr32(SG_DIG_STATUS);
2662                 mac_status = tr32(MAC_STATUS);
2663
2664                 if ((sg_dig_status & (1 << 1)) &&
2665                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666                         u32 local_adv, remote_adv;
2667
2668                         local_adv = ADVERTISE_PAUSE_CAP;
2669                         remote_adv = 0;
2670                         if (sg_dig_status & (1 << 19))
2671                                 remote_adv |= LPA_PAUSE_CAP;
2672                         if (sg_dig_status & (1 << 20))
2673                                 remote_adv |= LPA_PAUSE_ASYM;
2674
2675                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2676                         current_link_up = 1;
2677                         tp->serdes_counter = 0;
2678                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2679                 } else if (!(sg_dig_status & (1 << 1))) {
2680                         if (tp->serdes_counter)
2681                                 tp->serdes_counter--;
2682                         else {
2683                                 if (workaround) {
2684                                         u32 val = serdes_cfg;
2685
2686                                         if (port_a)
2687                                                 val |= 0xc010000;
2688                                         else
2689                                                 val |= 0x4010000;
2690
2691                                         tw32_f(MAC_SERDES_CFG, val);
2692                                 }
2693
2694                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2695                                 udelay(40);
2696
2697                                 /* Link parallel detection - link is up */
2698                                 /* only if we have PCS_SYNC and not */
2699                                 /* receiving config code words */
2700                                 mac_status = tr32(MAC_STATUS);
2701                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703                                         tg3_setup_flow_control(tp, 0, 0);
2704                                         current_link_up = 1;
2705                                         tp->tg3_flags2 |=
2706                                                 TG3_FLG2_PARALLEL_DETECT;
2707                                         tp->serdes_counter =
2708                                                 SERDES_PARALLEL_DET_TIMEOUT;
2709                                 } else
2710                                         goto restart_autoneg;
2711                         }
2712                 }
2713         } else {
2714                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2716         }
2717
2718 out:
2719         return current_link_up;
2720 }
2721
2722 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2723 {
2724         int current_link_up = 0;
2725
2726         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2727                 goto out;
2728
2729         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2730                 u32 flags;
2731                 int i;
2732
2733                 if (fiber_autoneg(tp, &flags)) {
2734                         u32 local_adv, remote_adv;
2735
2736                         local_adv = ADVERTISE_PAUSE_CAP;
2737                         remote_adv = 0;
2738                         if (flags & MR_LP_ADV_SYM_PAUSE)
2739                                 remote_adv |= LPA_PAUSE_CAP;
2740                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2741                                 remote_adv |= LPA_PAUSE_ASYM;
2742
2743                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2744
2745                         current_link_up = 1;
2746                 }
2747                 for (i = 0; i < 30; i++) {
2748                         udelay(20);
2749                         tw32_f(MAC_STATUS,
2750                                (MAC_STATUS_SYNC_CHANGED |
2751                                 MAC_STATUS_CFG_CHANGED));
2752                         udelay(40);
2753                         if ((tr32(MAC_STATUS) &
2754                              (MAC_STATUS_SYNC_CHANGED |
2755                               MAC_STATUS_CFG_CHANGED)) == 0)
2756                                 break;
2757                 }
2758
2759                 mac_status = tr32(MAC_STATUS);
2760                 if (current_link_up == 0 &&
2761                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762                     !(mac_status & MAC_STATUS_RCVD_CFG))
2763                         current_link_up = 1;
2764         } else {
2765                 /* Forcing 1000FD link up. */
2766                 current_link_up = 1;
2767
2768                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2769                 udelay(40);
2770
2771                 tw32_f(MAC_MODE, tp->mac_mode);
2772                 udelay(40);
2773         }
2774
2775 out:
2776         return current_link_up;
2777 }
2778
2779 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2780 {
2781         u32 orig_pause_cfg;
2782         u16 orig_active_speed;
2783         u8 orig_active_duplex;
2784         u32 mac_status;
2785         int current_link_up;
2786         int i;
2787
2788         orig_pause_cfg =
2789                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790                                   TG3_FLAG_TX_PAUSE));
2791         orig_active_speed = tp->link_config.active_speed;
2792         orig_active_duplex = tp->link_config.active_duplex;
2793
2794         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795             netif_carrier_ok(tp->dev) &&
2796             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797                 mac_status = tr32(MAC_STATUS);
2798                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799                                MAC_STATUS_SIGNAL_DET |
2800                                MAC_STATUS_CFG_CHANGED |
2801                                MAC_STATUS_RCVD_CFG);
2802                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803                                    MAC_STATUS_SIGNAL_DET)) {
2804                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805                                             MAC_STATUS_CFG_CHANGED));
2806                         return 0;
2807                 }
2808         }
2809
2810         tw32_f(MAC_TX_AUTO_NEG, 0);
2811
2812         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814         tw32_f(MAC_MODE, tp->mac_mode);
2815         udelay(40);
2816
2817         if (tp->phy_id == PHY_ID_BCM8002)
2818                 tg3_init_bcm8002(tp);
2819
2820         /* Enable link change event even when serdes polling.  */
2821         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822         udelay(40);
2823
2824         current_link_up = 0;
2825         mac_status = tr32(MAC_STATUS);
2826
2827         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2829         else
2830                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2831
2832         tp->hw_status->status =
2833                 (SD_STATUS_UPDATED |
2834                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2835
2836         for (i = 0; i < 100; i++) {
2837                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838                                     MAC_STATUS_CFG_CHANGED));
2839                 udelay(5);
2840                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2841                                          MAC_STATUS_CFG_CHANGED |
2842                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2843                         break;
2844         }
2845
2846         mac_status = tr32(MAC_STATUS);
2847         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848                 current_link_up = 0;
2849                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850                     tp->serdes_counter == 0) {
2851                         tw32_f(MAC_MODE, (tp->mac_mode |
2852                                           MAC_MODE_SEND_CONFIGS));
2853                         udelay(1);
2854                         tw32_f(MAC_MODE, tp->mac_mode);
2855                 }
2856         }
2857
2858         if (current_link_up == 1) {
2859                 tp->link_config.active_speed = SPEED_1000;
2860                 tp->link_config.active_duplex = DUPLEX_FULL;
2861                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862                                     LED_CTRL_LNKLED_OVERRIDE |
2863                                     LED_CTRL_1000MBPS_ON));
2864         } else {
2865                 tp->link_config.active_speed = SPEED_INVALID;
2866                 tp->link_config.active_duplex = DUPLEX_INVALID;
2867                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868                                     LED_CTRL_LNKLED_OVERRIDE |
2869                                     LED_CTRL_TRAFFIC_OVERRIDE));
2870         }
2871
2872         if (current_link_up != netif_carrier_ok(tp->dev)) {
2873                 if (current_link_up)
2874                         netif_carrier_on(tp->dev);
2875                 else
2876                         netif_carrier_off(tp->dev);
2877                 tg3_link_report(tp);
2878         } else {
2879                 u32 now_pause_cfg =
2880                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2881                                          TG3_FLAG_TX_PAUSE);
2882                 if (orig_pause_cfg != now_pause_cfg ||
2883                     orig_active_speed != tp->link_config.active_speed ||
2884                     orig_active_duplex != tp->link_config.active_duplex)
2885                         tg3_link_report(tp);
2886         }
2887
2888         return 0;
2889 }
2890
2891 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2892 {
2893         int current_link_up, err = 0;
2894         u32 bmsr, bmcr;
2895         u16 current_speed;
2896         u8 current_duplex;
2897
2898         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899         tw32_f(MAC_MODE, tp->mac_mode);
2900         udelay(40);
2901
2902         tw32(MAC_EVENT, 0);
2903
2904         tw32_f(MAC_STATUS,
2905              (MAC_STATUS_SYNC_CHANGED |
2906               MAC_STATUS_CFG_CHANGED |
2907               MAC_STATUS_MI_COMPLETION |
2908               MAC_STATUS_LNKSTATE_CHANGED));
2909         udelay(40);
2910
2911         if (force_reset)
2912                 tg3_phy_reset(tp);
2913
2914         current_link_up = 0;
2915         current_speed = SPEED_INVALID;
2916         current_duplex = DUPLEX_INVALID;
2917
2918         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922                         bmsr |= BMSR_LSTATUS;
2923                 else
2924                         bmsr &= ~BMSR_LSTATUS;
2925         }
2926
2927         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2928
2929         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931                 /* do nothing, just check for link up at the end */
2932         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2933                 u32 adv, new_adv;
2934
2935                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937                                   ADVERTISE_1000XPAUSE |
2938                                   ADVERTISE_1000XPSE_ASYM |
2939                                   ADVERTISE_SLCT);
2940
2941                 /* Always advertise symmetric PAUSE just like copper */
2942                 new_adv |= ADVERTISE_1000XPAUSE;
2943
2944                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945                         new_adv |= ADVERTISE_1000XHALF;
2946                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947                         new_adv |= ADVERTISE_1000XFULL;
2948
2949                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952                         tg3_writephy(tp, MII_BMCR, bmcr);
2953
2954                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2955                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2956                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957
2958                         return err;
2959                 }
2960         } else {
2961                 u32 new_bmcr;
2962
2963                 bmcr &= ~BMCR_SPEED1000;
2964                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2965
2966                 if (tp->link_config.duplex == DUPLEX_FULL)
2967                         new_bmcr |= BMCR_FULLDPLX;
2968
2969                 if (new_bmcr != bmcr) {
2970                         /* BMCR_SPEED1000 is a reserved bit that needs
2971                          * to be set on write.
2972                          */
2973                         new_bmcr |= BMCR_SPEED1000;
2974
2975                         /* Force a linkdown */
2976                         if (netif_carrier_ok(tp->dev)) {
2977                                 u32 adv;
2978
2979                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980                                 adv &= ~(ADVERTISE_1000XFULL |
2981                                          ADVERTISE_1000XHALF |
2982                                          ADVERTISE_SLCT);
2983                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2984                                 tg3_writephy(tp, MII_BMCR, bmcr |
2985                                                            BMCR_ANRESTART |
2986                                                            BMCR_ANENABLE);
2987                                 udelay(10);
2988                                 netif_carrier_off(tp->dev);
2989                         }
2990                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2991                         bmcr = new_bmcr;
2992                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2995                             ASIC_REV_5714) {
2996                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997                                         bmsr |= BMSR_LSTATUS;
2998                                 else
2999                                         bmsr &= ~BMSR_LSTATUS;
3000                         }
3001                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3002                 }
3003         }
3004
3005         if (bmsr & BMSR_LSTATUS) {
3006                 current_speed = SPEED_1000;
3007                 current_link_up = 1;
3008                 if (bmcr & BMCR_FULLDPLX)
3009                         current_duplex = DUPLEX_FULL;
3010                 else
3011                         current_duplex = DUPLEX_HALF;
3012
3013                 if (bmcr & BMCR_ANENABLE) {
3014                         u32 local_adv, remote_adv, common;
3015
3016                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018                         common = local_adv & remote_adv;
3019                         if (common & (ADVERTISE_1000XHALF |
3020                                       ADVERTISE_1000XFULL)) {
3021                                 if (common & ADVERTISE_1000XFULL)
3022                                         current_duplex = DUPLEX_FULL;
3023                                 else
3024                                         current_duplex = DUPLEX_HALF;
3025
3026                                 tg3_setup_flow_control(tp, local_adv,
3027                                                        remote_adv);
3028                         }
3029                         else
3030                                 current_link_up = 0;
3031                 }
3032         }
3033
3034         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035         if (tp->link_config.active_duplex == DUPLEX_HALF)
3036                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3037
3038         tw32_f(MAC_MODE, tp->mac_mode);
3039         udelay(40);
3040
3041         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3042
3043         tp->link_config.active_speed = current_speed;
3044         tp->link_config.active_duplex = current_duplex;
3045
3046         if (current_link_up != netif_carrier_ok(tp->dev)) {
3047                 if (current_link_up)
3048                         netif_carrier_on(tp->dev);
3049                 else {
3050                         netif_carrier_off(tp->dev);
3051                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3052                 }
3053                 tg3_link_report(tp);
3054         }
3055         return err;
3056 }
3057
3058 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3059 {
3060         if (tp->serdes_counter) {
3061                 /* Give autoneg time to complete. */
3062                 tp->serdes_counter--;
3063                 return;
3064         }
3065         if (!netif_carrier_ok(tp->dev) &&
3066             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3067                 u32 bmcr;
3068
3069                 tg3_readphy(tp, MII_BMCR, &bmcr);
3070                 if (bmcr & BMCR_ANENABLE) {
3071                         u32 phy1, phy2;
3072
3073                         /* Select shadow register 0x1f */
3074                         tg3_writephy(tp, 0x1c, 0x7c00);
3075                         tg3_readphy(tp, 0x1c, &phy1);
3076
3077                         /* Select expansion interrupt status register */
3078                         tg3_writephy(tp, 0x17, 0x0f01);
3079                         tg3_readphy(tp, 0x15, &phy2);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081
3082                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083                                 /* We have signal detect and not receiving
3084                                  * config code words, link is up by parallel
3085                                  * detection.
3086                                  */
3087
3088                                 bmcr &= ~BMCR_ANENABLE;
3089                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090                                 tg3_writephy(tp, MII_BMCR, bmcr);
3091                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3092                         }
3093                 }
3094         }
3095         else if (netif_carrier_ok(tp->dev) &&
3096                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3098                 u32 phy2;
3099
3100                 /* Select expansion interrupt status register */
3101                 tg3_writephy(tp, 0x17, 0x0f01);
3102                 tg3_readphy(tp, 0x15, &phy2);
3103                 if (phy2 & 0x20) {
3104                         u32 bmcr;
3105
3106                         /* Config code words received, turn on autoneg. */
3107                         tg3_readphy(tp, MII_BMCR, &bmcr);
3108                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3109
3110                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3111
3112                 }
3113         }
3114 }
3115
3116 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3117 {
3118         int err;
3119
3120         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121                 err = tg3_setup_fiber_phy(tp, force_reset);
3122         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3124         } else {
3125                 err = tg3_setup_copper_phy(tp, force_reset);
3126         }
3127
3128         if (tp->link_config.active_speed == SPEED_1000 &&
3129             tp->link_config.active_duplex == DUPLEX_HALF)
3130                 tw32(MAC_TX_LENGTHS,
3131                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132                       (6 << TX_LENGTHS_IPG_SHIFT) |
3133                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3134         else
3135                 tw32(MAC_TX_LENGTHS,
3136                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137                       (6 << TX_LENGTHS_IPG_SHIFT) |
3138                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3139
3140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141                 if (netif_carrier_ok(tp->dev)) {
3142                         tw32(HOSTCC_STAT_COAL_TICKS,
3143                              tp->coal.stats_block_coalesce_usecs);
3144                 } else {
3145                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3146                 }
3147         }
3148
3149         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151                 if (!netif_carrier_ok(tp->dev))
3152                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3153                               tp->pwrmgmt_thresh;
3154                 else
3155                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156                 tw32(PCIE_PWR_MGMT_THRESH, val);
3157         }
3158
3159         return err;
3160 }
3161
3162 /* This is called whenever we suspect that the system chipset is re-
3163  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164  * is bogus tx completions. We try to recover by setting the
3165  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3166  * in the workqueue.
3167  */
3168 static void tg3_tx_recover(struct tg3 *tp)
3169 {
3170         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3172
3173         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174                "mapped I/O cycles to the network device, attempting to "
3175                "recover. Please report the problem to the driver maintainer "
3176                "and include system chipset information.\n", tp->dev->name);
3177
3178         spin_lock(&tp->lock);
3179         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3180         spin_unlock(&tp->lock);
3181 }
3182
3183 static inline u32 tg3_tx_avail(struct tg3 *tp)
3184 {
3185         smp_mb();
3186         return (tp->tx_pending -
3187                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3188 }
3189
3190 /* Tigon3 never reports partial packet sends.  So we do not
3191  * need special logic to handle SKBs that have not had all
3192  * of their frags sent yet, like SunGEM does.
3193  */
3194 static void tg3_tx(struct tg3 *tp)
3195 {
3196         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197         u32 sw_idx = tp->tx_cons;
3198
3199         while (sw_idx != hw_idx) {
3200                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201                 struct sk_buff *skb = ri->skb;
3202                 int i, tx_bug = 0;
3203
3204                 if (unlikely(skb == NULL)) {
3205                         tg3_tx_recover(tp);
3206                         return;
3207                 }
3208
3209                 pci_unmap_single(tp->pdev,
3210                                  pci_unmap_addr(ri, mapping),
3211                                  skb_headlen(skb),
3212                                  PCI_DMA_TODEVICE);
3213
3214                 ri->skb = NULL;
3215
3216                 sw_idx = NEXT_TX(sw_idx);
3217
3218                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3219                         ri = &tp->tx_buffers[sw_idx];
3220                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3221                                 tx_bug = 1;
3222
3223                         pci_unmap_page(tp->pdev,
3224                                        pci_unmap_addr(ri, mapping),
3225                                        skb_shinfo(skb)->frags[i].size,
3226                                        PCI_DMA_TODEVICE);
3227
3228                         sw_idx = NEXT_TX(sw_idx);
3229                 }
3230
3231                 dev_kfree_skb(skb);
3232
3233                 if (unlikely(tx_bug)) {
3234                         tg3_tx_recover(tp);
3235                         return;
3236                 }
3237         }
3238
3239         tp->tx_cons = sw_idx;
3240
3241         /* Need to make the tx_cons update visible to tg3_start_xmit()
3242          * before checking for netif_queue_stopped().  Without the
3243          * memory barrier, there is a small possibility that tg3_start_xmit()
3244          * will miss it and cause the queue to be stopped forever.
3245          */
3246         smp_mb();
3247
3248         if (unlikely(netif_queue_stopped(tp->dev) &&
3249                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3250                 netif_tx_lock(tp->dev);
3251                 if (netif_queue_stopped(tp->dev) &&
3252                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3253                         netif_wake_queue(tp->dev);
3254                 netif_tx_unlock(tp->dev);
3255         }
3256 }
3257
3258 /* Returns size of skb allocated or < 0 on error.
3259  *
3260  * We only need to fill in the address because the other members
3261  * of the RX descriptor are invariant, see tg3_init_rings.
3262  *
3263  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3264  * posting buffers we only dirty the first cache line of the RX
3265  * descriptor (containing the address).  Whereas for the RX status
3266  * buffers the cpu only reads the last cacheline of the RX descriptor
3267  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3268  */
3269 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3270                             int src_idx, u32 dest_idx_unmasked)
3271 {
3272         struct tg3_rx_buffer_desc *desc;
3273         struct ring_info *map, *src_map;
3274         struct sk_buff *skb;
3275         dma_addr_t mapping;
3276         int skb_size, dest_idx;
3277
3278         src_map = NULL;
3279         switch (opaque_key) {
3280         case RXD_OPAQUE_RING_STD:
3281                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3282                 desc = &tp->rx_std[dest_idx];
3283                 map = &tp->rx_std_buffers[dest_idx];
3284                 if (src_idx >= 0)
3285                         src_map = &tp->rx_std_buffers[src_idx];
3286                 skb_size = tp->rx_pkt_buf_sz;
3287                 break;
3288
3289         case RXD_OPAQUE_RING_JUMBO:
3290                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3291                 desc = &tp->rx_jumbo[dest_idx];
3292                 map = &tp->rx_jumbo_buffers[dest_idx];
3293                 if (src_idx >= 0)
3294                         src_map = &tp->rx_jumbo_buffers[src_idx];
3295                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3296                 break;
3297
3298         default:
3299                 return -EINVAL;
3300         };
3301
3302         /* Do not overwrite any of the map or rp information
3303          * until we are sure we can commit to a new buffer.
3304          *
3305          * Callers depend upon this behavior and assume that
3306          * we leave everything unchanged if we fail.
3307          */
3308         skb = netdev_alloc_skb(tp->dev, skb_size);
3309         if (skb == NULL)
3310                 return -ENOMEM;
3311
3312         skb_reserve(skb, tp->rx_offset);
3313
3314         mapping = pci_map_single(tp->pdev, skb->data,
3315                                  skb_size - tp->rx_offset,
3316                                  PCI_DMA_FROMDEVICE);
3317
3318         map->skb = skb;
3319         pci_unmap_addr_set(map, mapping, mapping);
3320
3321         if (src_map != NULL)
3322                 src_map->skb = NULL;
3323
3324         desc->addr_hi = ((u64)mapping >> 32);
3325         desc->addr_lo = ((u64)mapping & 0xffffffff);
3326
3327         return skb_size;
3328 }
3329
3330 /* We only need to move over in the address because the other
3331  * members of the RX descriptor are invariant.  See notes above
3332  * tg3_alloc_rx_skb for full details.
3333  */
3334 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3335                            int src_idx, u32 dest_idx_unmasked)
3336 {
3337         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3338         struct ring_info *src_map, *dest_map;
3339         int dest_idx;
3340
3341         switch (opaque_key) {
3342         case RXD_OPAQUE_RING_STD:
3343                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3344                 dest_desc = &tp->rx_std[dest_idx];
3345                 dest_map = &tp->rx_std_buffers[dest_idx];
3346                 src_desc = &tp->rx_std[src_idx];
3347                 src_map = &tp->rx_std_buffers[src_idx];
3348                 break;
3349
3350         case RXD_OPAQUE_RING_JUMBO:
3351                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3352                 dest_desc = &tp->rx_jumbo[dest_idx];
3353                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3354                 src_desc = &tp->rx_jumbo[src_idx];
3355                 src_map = &tp->rx_jumbo_buffers[src_idx];
3356                 break;
3357
3358         default:
3359                 return;
3360         };
3361
3362         dest_map->skb = src_map->skb;
3363         pci_unmap_addr_set(dest_map, mapping,
3364                            pci_unmap_addr(src_map, mapping));
3365         dest_desc->addr_hi = src_desc->addr_hi;
3366         dest_desc->addr_lo = src_desc->addr_lo;
3367
3368         src_map->skb = NULL;
3369 }
3370
3371 #if TG3_VLAN_TAG_USED
3372 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3373 {
3374         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3375 }
3376 #endif
3377
3378 /* The RX ring scheme is composed of multiple rings which post fresh
3379  * buffers to the chip, and one special ring the chip uses to report
3380  * status back to the host.
3381  *
3382  * The special ring reports the status of received packets to the
3383  * host.  The chip does not write into the original descriptor the
3384  * RX buffer was obtained from.  The chip simply takes the original
3385  * descriptor as provided by the host, updates the status and length
3386  * field, then writes this into the next status ring entry.
3387  *
3388  * Each ring the host uses to post buffers to the chip is described
3389  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3390  * it is first placed into the on-chip ram.  When the packet's length
3391  * is known, it walks down the TG3_BDINFO entries to select the ring.
3392  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3393  * which is within the range of the new packet's length is chosen.
3394  *
3395  * The "separate ring for rx status" scheme may sound queer, but it makes
3396  * sense from a cache coherency perspective.  If only the host writes
3397  * to the buffer post rings, and only the chip writes to the rx status
3398  * rings, then cache lines never move beyond shared-modified state.
3399  * If both the host and chip were to write into the same ring, cache line
3400  * eviction could occur since both entities want it in an exclusive state.
3401  */
3402 static int tg3_rx(struct tg3 *tp, int budget)
3403 {
3404         u32 work_mask, rx_std_posted = 0;
3405         u32 sw_idx = tp->rx_rcb_ptr;
3406         u16 hw_idx;
3407         int received;
3408
3409         hw_idx = tp->hw_status->idx[0].rx_producer;
3410         /*
3411          * We need to order the read of hw_idx and the read of
3412          * the opaque cookie.
3413          */
3414         rmb();
3415         work_mask = 0;
3416         received = 0;
3417         while (sw_idx != hw_idx && budget > 0) {
3418                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3419                 unsigned int len;
3420                 struct sk_buff *skb;
3421                 dma_addr_t dma_addr;
3422                 u32 opaque_key, desc_idx, *post_ptr;
3423
3424                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3425                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3426                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3427                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3428                                                   mapping);
3429                         skb = tp->rx_std_buffers[desc_idx].skb;
3430                         post_ptr = &tp->rx_std_ptr;
3431                         rx_std_posted++;
3432                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3433                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3434                                                   mapping);
3435                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3436                         post_ptr = &tp->rx_jumbo_ptr;
3437                 }
3438                 else {
3439                         goto next_pkt_nopost;
3440                 }
3441
3442                 work_mask |= opaque_key;
3443
3444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3446                 drop_it:
3447                         tg3_recycle_rx(tp, opaque_key,
3448                                        desc_idx, *post_ptr);
3449                 drop_it_no_recycle:
3450                         /* Other statistics kept track of by card. */
3451                         tp->net_stats.rx_dropped++;
3452                         goto next_pkt;
3453                 }
3454
3455                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3456
3457                 if (len > RX_COPY_THRESHOLD
3458                         && tp->rx_offset == 2
3459                         /* rx_offset != 2 iff this is a 5701 card running
3460                          * in PCI-X mode [see tg3_get_invariants()] */
3461                 ) {
3462                         int skb_size;
3463
3464                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3465                                                     desc_idx, *post_ptr);
3466                         if (skb_size < 0)
3467                                 goto drop_it;
3468
3469                         pci_unmap_single(tp->pdev, dma_addr,
3470                                          skb_size - tp->rx_offset,
3471                                          PCI_DMA_FROMDEVICE);
3472
3473                         skb_put(skb, len);
3474                 } else {
3475                         struct sk_buff *copy_skb;
3476
3477                         tg3_recycle_rx(tp, opaque_key,
3478                                        desc_idx, *post_ptr);
3479
3480                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3481                         if (copy_skb == NULL)
3482                                 goto drop_it_no_recycle;
3483
3484                         skb_reserve(copy_skb, 2);
3485                         skb_put(copy_skb, len);
3486                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3488                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3489
3490                         /* We'll reuse the original ring buffer. */
3491                         skb = copy_skb;
3492                 }
3493
3494                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3495                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3496                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3497                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3499                 else
3500                         skb->ip_summed = CHECKSUM_NONE;
3501
3502                 skb->protocol = eth_type_trans(skb, tp->dev);
3503 #if TG3_VLAN_TAG_USED
3504                 if (tp->vlgrp != NULL &&
3505                     desc->type_flags & RXD_FLAG_VLAN) {
3506                         tg3_vlan_rx(tp, skb,
3507                                     desc->err_vlan & RXD_VLAN_MASK);
3508                 } else
3509 #endif
3510                         netif_receive_skb(skb);
3511
3512                 tp->dev->last_rx = jiffies;
3513                 received++;
3514                 budget--;
3515
3516 next_pkt:
3517                 (*post_ptr)++;
3518
3519                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3520                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3521
3522                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3523                                      TG3_64BIT_REG_LOW, idx);
3524                         work_mask &= ~RXD_OPAQUE_RING_STD;
3525                         rx_std_posted = 0;
3526                 }
3527 next_pkt_nopost:
3528                 sw_idx++;
3529                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3530
3531                 /* Refresh hw_idx to see if there is new work */
3532                 if (sw_idx == hw_idx) {
3533                         hw_idx = tp->hw_status->idx[0].rx_producer;
3534                         rmb();
3535                 }
3536         }
3537
3538         /* ACK the status ring. */
3539         tp->rx_rcb_ptr = sw_idx;
3540         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3541
3542         /* Refill RX ring(s). */
3543         if (work_mask & RXD_OPAQUE_RING_STD) {
3544                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3545                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3546                              sw_idx);
3547         }
3548         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3549                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3550                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3551                              sw_idx);
3552         }
3553         mmiowb();
3554
3555         return received;
3556 }
3557
3558 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3559 {
3560         struct tg3_hw_status *sblk = tp->hw_status;
3561
3562         /* handle link change and other phy events */
3563         if (!(tp->tg3_flags &
3564               (TG3_FLAG_USE_LINKCHG_REG |
3565                TG3_FLAG_POLL_SERDES))) {
3566                 if (sblk->status & SD_STATUS_LINK_CHG) {
3567                         sblk->status = SD_STATUS_UPDATED |
3568                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3569                         spin_lock(&tp->lock);
3570                         tg3_setup_phy(tp, 0);
3571                         spin_unlock(&tp->lock);
3572                 }
3573         }
3574
3575         /* run TX completion thread */
3576         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3577                 tg3_tx(tp);
3578                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3579                         return 0;
3580         }
3581
3582         /* run RX thread, within the bounds set by NAPI.
3583          * All RX "locking" is done by ensuring outside
3584          * code synchronizes with tg3->napi.poll()
3585          */
3586         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3587                 work_done += tg3_rx(tp, budget - work_done);
3588
3589         return work_done;
3590 }
3591
3592 static int tg3_poll(struct napi_struct *napi, int budget)
3593 {
3594         struct tg3 *tp = container_of(napi, struct tg3, napi);
3595         int work_done = 0;
3596
3597         while (1) {
3598                 work_done = tg3_poll_work(tp, work_done, budget);
3599
3600                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3601                         goto tx_recovery;
3602
3603                 if (unlikely(work_done >= budget))
3604                         break;
3605
3606                 if (likely(!tg3_has_work(tp))) {
3607                         struct tg3_hw_status *sblk = tp->hw_status;
3608
3609                         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3610                                 tp->last_tag = sblk->status_tag;
3611                                 rmb();
3612                         } else
3613                                 sblk->status &= ~SD_STATUS_UPDATED;
3614
3615                         netif_rx_complete(tp->dev, napi);
3616                         tg3_restart_ints(tp);
3617                         break;
3618                 }
3619         }
3620
3621         return work_done;
3622
3623 tx_recovery:
3624         netif_rx_complete(tp->dev, napi);
3625         schedule_work(&tp->reset_task);
3626         return 0;
3627 }
3628
3629 static void tg3_irq_quiesce(struct tg3 *tp)
3630 {
3631         BUG_ON(tp->irq_sync);
3632
3633         tp->irq_sync = 1;
3634         smp_mb();
3635
3636         synchronize_irq(tp->pdev->irq);
3637 }
3638
3639 static inline int tg3_irq_sync(struct tg3 *tp)
3640 {
3641         return tp->irq_sync;
3642 }
3643
3644 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3645  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3646  * with as well.  Most of the time, this is not necessary except when
3647  * shutting down the device.
3648  */
3649 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3650 {
3651         spin_lock_bh(&tp->lock);
3652         if (irq_sync)
3653                 tg3_irq_quiesce(tp);
3654 }
3655
3656 static inline void tg3_full_unlock(struct tg3 *tp)
3657 {
3658         spin_unlock_bh(&tp->lock);
3659 }
3660
3661 /* One-shot MSI handler - Chip automatically disables interrupt
3662  * after sending MSI so driver doesn't have to do it.
3663  */
3664 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3665 {
3666         struct net_device *dev = dev_id;
3667         struct tg3 *tp = netdev_priv(dev);
3668
3669         prefetch(tp->hw_status);
3670         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3671
3672         if (likely(!tg3_irq_sync(tp)))
3673                 netif_rx_schedule(dev, &tp->napi);
3674
3675         return IRQ_HANDLED;
3676 }
3677
3678 /* MSI ISR - No need to check for interrupt sharing and no need to
3679  * flush status block and interrupt mailbox. PCI ordering rules
3680  * guarantee that MSI will arrive after the status block.
3681  */
3682 static irqreturn_t tg3_msi(int irq, void *dev_id)
3683 {
3684         struct net_device *dev = dev_id;
3685         struct tg3 *tp = netdev_priv(dev);
3686
3687         prefetch(tp->hw_status);
3688         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3689         /*
3690          * Writing any value to intr-mbox-0 clears PCI INTA# and
3691          * chip-internal interrupt pending events.
3692          * Writing non-zero to intr-mbox-0 additional tells the
3693          * NIC to stop sending us irqs, engaging "in-intr-handler"
3694          * event coalescing.
3695          */
3696         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3697         if (likely(!tg3_irq_sync(tp)))
3698                 netif_rx_schedule(dev, &tp->napi);
3699
3700         return IRQ_RETVAL(1);
3701 }
3702
3703 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3704 {
3705         struct net_device *dev = dev_id;
3706         struct tg3 *tp = netdev_priv(dev);
3707         struct tg3_hw_status *sblk = tp->hw_status;
3708         unsigned int handled = 1;
3709
3710         /* In INTx mode, it is possible for the interrupt to arrive at
3711          * the CPU before the status block posted prior to the interrupt.
3712          * Reading the PCI State register will confirm whether the
3713          * interrupt is ours and will flush the status block.
3714          */
3715         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3716                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3717                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3718                         handled = 0;
3719                         goto out;
3720                 }
3721         }
3722
3723         /*
3724          * Writing any value to intr-mbox-0 clears PCI INTA# and
3725          * chip-internal interrupt pending events.
3726          * Writing non-zero to intr-mbox-0 additional tells the
3727          * NIC to stop sending us irqs, engaging "in-intr-handler"
3728          * event coalescing.
3729          *
3730          * Flush the mailbox to de-assert the IRQ immediately to prevent
3731          * spurious interrupts.  The flush impacts performance but
3732          * excessive spurious interrupts can be worse in some cases.
3733          */
3734         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3735         if (tg3_irq_sync(tp))
3736                 goto out;
3737         sblk->status &= ~SD_STATUS_UPDATED;
3738         if (likely(tg3_has_work(tp))) {
3739                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3740                 netif_rx_schedule(dev, &tp->napi);
3741         } else {
3742                 /* No work, shared interrupt perhaps?  re-enable
3743                  * interrupts, and flush that PCI write
3744                  */
3745                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3746                                0x00000000);
3747         }
3748 out:
3749         return IRQ_RETVAL(handled);
3750 }
3751
3752 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3753 {
3754         struct net_device *dev = dev_id;
3755         struct tg3 *tp = netdev_priv(dev);
3756         struct tg3_hw_status *sblk = tp->hw_status;
3757         unsigned int handled = 1;
3758
3759         /* In INTx mode, it is possible for the interrupt to arrive at
3760          * the CPU before the status block posted prior to the interrupt.
3761          * Reading the PCI State register will confirm whether the
3762          * interrupt is ours and will flush the status block.
3763          */
3764         if (unlikely(sblk->status_tag == tp->last_tag)) {
3765                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3766                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3767                         handled = 0;
3768                         goto out;
3769                 }
3770         }
3771
3772         /*
3773          * writing any value to intr-mbox-0 clears PCI INTA# and
3774          * chip-internal interrupt pending events.
3775          * writing non-zero to intr-mbox-0 additional tells the
3776          * NIC to stop sending us irqs, engaging "in-intr-handler"
3777          * event coalescing.
3778          *
3779          * Flush the mailbox to de-assert the IRQ immediately to prevent
3780          * spurious interrupts.  The flush impacts performance but
3781          * excessive spurious interrupts can be worse in some cases.
3782          */
3783         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3784         if (tg3_irq_sync(tp))
3785                 goto out;
3786         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3787                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3788                 /* Update last_tag to mark that this status has been
3789                  * seen. Because interrupt may be shared, we may be
3790                  * racing with tg3_poll(), so only update last_tag
3791                  * if tg3_poll() is not scheduled.
3792                  */
3793                 tp->last_tag = sblk->status_tag;
3794                 __netif_rx_schedule(dev, &tp->napi);
3795         }
3796 out:
3797         return IRQ_RETVAL(handled);
3798 }
3799
3800 /* ISR for interrupt test */
3801 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3802 {
3803         struct net_device *dev = dev_id;
3804         struct tg3 *tp = netdev_priv(dev);
3805         struct tg3_hw_status *sblk = tp->hw_status;
3806
3807         if ((sblk->status & SD_STATUS_UPDATED) ||
3808             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3809                 tg3_disable_ints(tp);
3810                 return IRQ_RETVAL(1);
3811         }
3812         return IRQ_RETVAL(0);
3813 }
3814
3815 static int tg3_init_hw(struct tg3 *, int);
3816 static int tg3_halt(struct tg3 *, int, int);
3817
3818 /* Restart hardware after configuration changes, self-test, etc.
3819  * Invoked with tp->lock held.
3820  */
3821 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3822 {
3823         int err;
3824
3825         err = tg3_init_hw(tp, reset_phy);
3826         if (err) {
3827                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3828                        "aborting.\n", tp->dev->name);
3829                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3830                 tg3_full_unlock(tp);
3831                 del_timer_sync(&tp->timer);
3832                 tp->irq_sync = 0;
3833                 napi_enable(&tp->napi);
3834                 dev_close(tp->dev);
3835                 tg3_full_lock(tp, 0);
3836         }
3837         return err;
3838 }
3839
3840 #ifdef CONFIG_NET_POLL_CONTROLLER
3841 static void tg3_poll_controller(struct net_device *dev)
3842 {
3843         struct tg3 *tp = netdev_priv(dev);
3844
3845         tg3_interrupt(tp->pdev->irq, dev);
3846 }
3847 #endif
3848
3849 static void tg3_reset_task(struct work_struct *work)
3850 {
3851         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3852         unsigned int restart_timer;
3853
3854         tg3_full_lock(tp, 0);
3855
3856         if (!netif_running(tp->dev)) {
3857                 tg3_full_unlock(tp);
3858                 return;
3859         }
3860
3861         tg3_full_unlock(tp);
3862
3863         tg3_netif_stop(tp);
3864
3865         tg3_full_lock(tp, 1);
3866
3867         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3868         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3869
3870         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3871                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3872                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3873                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3874                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3875         }
3876
3877         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3878         if (tg3_init_hw(tp, 1))
3879                 goto out;
3880
3881         tg3_netif_start(tp);
3882
3883         if (restart_timer)
3884                 mod_timer(&tp->timer, jiffies + 1);
3885
3886 out:
3887         tg3_full_unlock(tp);
3888 }
3889
3890 static void tg3_dump_short_state(struct tg3 *tp)
3891 {
3892         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3893                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3894         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3895                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3896 }
3897
3898 static void tg3_tx_timeout(struct net_device *dev)
3899 {
3900         struct tg3 *tp = netdev_priv(dev);
3901
3902         if (netif_msg_tx_err(tp)) {
3903                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3904                        dev->name);
3905                 tg3_dump_short_state(tp);
3906         }
3907
3908         schedule_work(&tp->reset_task);
3909 }
3910
3911 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3912 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3913 {
3914         u32 base = (u32) mapping & 0xffffffff;
3915
3916         return ((base > 0xffffdcc0) &&
3917                 (base + len + 8 < base));
3918 }
3919
3920 /* Test for DMA addresses > 40-bit */
3921 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3922                                           int len)
3923 {
3924 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3925         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3926                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3927         return 0;
3928 #else
3929         return 0;
3930 #endif
3931 }
3932
3933 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3934
3935 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3936 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3937                                        u32 last_plus_one, u32 *start,
3938                                        u32 base_flags, u32 mss)
3939 {
3940         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3941         dma_addr_t new_addr = 0;
3942         u32 entry = *start;
3943         int i, ret = 0;
3944
3945         if (!new_skb) {
3946                 ret = -1;
3947         } else {
3948                 /* New SKB is guaranteed to be linear. */
3949                 entry = *start;
3950                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3951                                           PCI_DMA_TODEVICE);
3952                 /* Make sure new skb does not cross any 4G boundaries.
3953                  * Drop the packet if it does.
3954                  */
3955                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3956                         ret = -1;
3957                         dev_kfree_skb(new_skb);
3958                         new_skb = NULL;
3959                 } else {
3960                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3961                                     base_flags, 1 | (mss << 1));
3962                         *start = NEXT_TX(entry);
3963                 }
3964         }
3965
3966         /* Now clean up the sw ring entries. */
3967         i = 0;
3968         while (entry != last_plus_one) {
3969                 int len;
3970
3971                 if (i == 0)
3972                         len = skb_headlen(skb);
3973                 else
3974                         len = skb_shinfo(skb)->frags[i-1].size;
3975                 pci_unmap_single(tp->pdev,
3976                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3977                                  len, PCI_DMA_TODEVICE);
3978                 if (i == 0) {
3979                         tp->tx_buffers[entry].skb = new_skb;
3980                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3981                 } else {
3982                         tp->tx_buffers[entry].skb = NULL;
3983                 }
3984                 entry = NEXT_TX(entry);
3985                 i++;
3986         }
3987
3988         dev_kfree_skb(skb);
3989
3990         return ret;
3991 }
3992
3993 static void tg3_set_txd(struct tg3 *tp, int entry,
3994                         dma_addr_t mapping, int len, u32 flags,
3995                         u32 mss_and_is_end)
3996 {
3997         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3998         int is_end = (mss_and_is_end & 0x1);
3999         u32 mss = (mss_and_is_end >> 1);
4000         u32 vlan_tag = 0;
4001
4002         if (is_end)
4003                 flags |= TXD_FLAG_END;
4004         if (flags & TXD_FLAG_VLAN) {
4005                 vlan_tag = flags >> 16;
4006                 flags &= 0xffff;
4007         }
4008         vlan_tag |= (mss << TXD_MSS_SHIFT);
4009
4010         txd->addr_hi = ((u64) mapping >> 32);
4011         txd->addr_lo = ((u64) mapping & 0xffffffff);
4012         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4013         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4014 }
4015
4016 /* hard_start_xmit for devices that don't have any bugs and
4017  * support TG3_FLG2_HW_TSO_2 only.
4018  */
4019 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4020 {
4021         struct tg3 *tp = netdev_priv(dev);
4022         dma_addr_t mapping;
4023         u32 len, entry, base_flags, mss;
4024
4025         len = skb_headlen(skb);
4026
4027         /* We are running in BH disabled context with netif_tx_lock
4028          * and TX reclaim runs via tp->napi.poll inside of a software
4029          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4030          * no IRQ context deadlocks to worry about either.  Rejoice!
4031          */
4032         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4033                 if (!netif_queue_stopped(dev)) {
4034                         netif_stop_queue(dev);
4035
4036                         /* This is a hard error, log it. */
4037                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4038                                "queue awake!\n", dev->name);
4039                 }
4040                 return NETDEV_TX_BUSY;
4041         }
4042
4043         entry = tp->tx_prod;
4044         base_flags = 0;
4045         mss = 0;
4046         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4047                 int tcp_opt_len, ip_tcp_len;
4048
4049                 if (skb_header_cloned(skb) &&
4050                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4051                         dev_kfree_skb(skb);
4052                         goto out_unlock;
4053                 }
4054
4055                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4056                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4057                 else {
4058                         struct iphdr *iph = ip_hdr(skb);
4059
4060                         tcp_opt_len = tcp_optlen(skb);
4061                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4062
4063                         iph->check = 0;
4064                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4065                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4066                 }
4067
4068                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4069                                TXD_FLAG_CPU_POST_DMA);
4070
4071                 tcp_hdr(skb)->check = 0;
4072
4073         }
4074         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4075                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4076 #if TG3_VLAN_TAG_USED
4077         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4078                 base_flags |= (TXD_FLAG_VLAN |
4079                                (vlan_tx_tag_get(skb) << 16));
4080 #endif
4081
4082         /* Queue skb data, a.k.a. the main skb fragment. */
4083         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4084
4085         tp->tx_buffers[entry].skb = skb;
4086         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4087
4088         tg3_set_txd(tp, entry, mapping, len, base_flags,
4089                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4090
4091         entry = NEXT_TX(entry);
4092
4093         /* Now loop through additional data fragments, and queue them. */
4094         if (skb_shinfo(skb)->nr_frags > 0) {
4095                 unsigned int i, last;
4096
4097                 last = skb_shinfo(skb)->nr_frags - 1;
4098                 for (i = 0; i <= last; i++) {
4099                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4100
4101                         len = frag->size;
4102                         mapping = pci_map_page(tp->pdev,
4103                                                frag->page,
4104                                                frag->page_offset,
4105                                                len, PCI_DMA_TODEVICE);
4106
4107                         tp->tx_buffers[entry].skb = NULL;
4108                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4109
4110                         tg3_set_txd(tp, entry, mapping, len,
4111                                     base_flags, (i == last) | (mss << 1));
4112
4113                         entry = NEXT_TX(entry);
4114                 }
4115         }
4116
4117         /* Packets are ready, update Tx producer idx local and on card. */
4118         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4119
4120         tp->tx_prod = entry;
4121         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4122                 netif_stop_queue(dev);
4123                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4124                         netif_wake_queue(tp->dev);
4125         }
4126
4127 out_unlock:
4128         mmiowb();
4129
4130         dev->trans_start = jiffies;
4131
4132         return NETDEV_TX_OK;
4133 }
4134
4135 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4136
4137 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4138  * TSO header is greater than 80 bytes.
4139  */
4140 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4141 {
4142         struct sk_buff *segs, *nskb;
4143
4144         /* Estimate the number of fragments in the worst case */
4145         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4146                 netif_stop_queue(tp->dev);
4147                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4148                         return NETDEV_TX_BUSY;
4149
4150                 netif_wake_queue(tp->dev);
4151         }
4152
4153         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4154         if (unlikely(IS_ERR(segs)))
4155                 goto tg3_tso_bug_end;
4156
4157         do {
4158                 nskb = segs;
4159                 segs = segs->next;
4160                 nskb->next = NULL;
4161                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4162         } while (segs);
4163
4164 tg3_tso_bug_end:
4165         dev_kfree_skb(skb);
4166
4167         return NETDEV_TX_OK;
4168 }
4169
4170 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4171  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4172  */
4173 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4174 {
4175         struct tg3 *tp = netdev_priv(dev);
4176         dma_addr_t mapping;
4177         u32 len, entry, base_flags, mss;
4178         int would_hit_hwbug;
4179
4180         len = skb_headlen(skb);
4181
4182         /* We are running in BH disabled context with netif_tx_lock
4183          * and TX reclaim runs via tp->napi.poll inside of a software
4184          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4185          * no IRQ context deadlocks to worry about either.  Rejoice!
4186          */
4187         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4188                 if (!netif_queue_stopped(dev)) {
4189                         netif_stop_queue(dev);
4190
4191                         /* This is a hard error, log it. */
4192                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4193                                "queue awake!\n", dev->name);
4194                 }
4195                 return NETDEV_TX_BUSY;
4196         }
4197
4198         entry = tp->tx_prod;
4199         base_flags = 0;
4200         if (skb->ip_summed == CHECKSUM_PARTIAL)
4201                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4202         mss = 0;
4203         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4204                 struct iphdr *iph;
4205                 int tcp_opt_len, ip_tcp_len, hdr_len;
4206
4207                 if (skb_header_cloned(skb) &&
4208                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4209                         dev_kfree_skb(skb);
4210                         goto out_unlock;
4211                 }
4212
4213                 tcp_opt_len = tcp_optlen(skb);
4214                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4215
4216                 hdr_len = ip_tcp_len + tcp_opt_len;
4217                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4218                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4219                         return (tg3_tso_bug(tp, skb));
4220
4221                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4222                                TXD_FLAG_CPU_POST_DMA);
4223
4224                 iph = ip_hdr(skb);
4225                 iph->check = 0;
4226                 iph->tot_len = htons(mss + hdr_len);
4227                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4228                         tcp_hdr(skb)->check = 0;
4229                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4230                 } else
4231                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4232                                                                  iph->daddr, 0,
4233                                                                  IPPROTO_TCP,
4234                                                                  0);
4235
4236                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4237                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4238                         if (tcp_opt_len || iph->ihl > 5) {
4239                                 int tsflags;
4240
4241                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4242                                 mss |= (tsflags << 11);
4243                         }
4244                 } else {
4245                         if (tcp_opt_len || iph->ihl > 5) {
4246                                 int tsflags;
4247
4248                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4249                                 base_flags |= tsflags << 12;
4250                         }
4251                 }
4252         }
4253 #if TG3_VLAN_TAG_USED
4254         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4255                 base_flags |= (TXD_FLAG_VLAN |
4256                                (vlan_tx_tag_get(skb) << 16));
4257 #endif
4258
4259         /* Queue skb data, a.k.a. the main skb fragment. */
4260         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4261
4262         tp->tx_buffers[entry].skb = skb;
4263         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4264
4265         would_hit_hwbug = 0;
4266
4267         if (tg3_4g_overflow_test(mapping, len))
4268                 would_hit_hwbug = 1;
4269
4270         tg3_set_txd(tp, entry, mapping, len, base_flags,
4271                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4272
4273         entry = NEXT_TX(entry);
4274
4275         /* Now loop through additional data fragments, and queue them. */
4276         if (skb_shinfo(skb)->nr_frags > 0) {
4277                 unsigned int i, last;
4278
4279                 last = skb_shinfo(skb)->nr_frags - 1;
4280                 for (i = 0; i <= last; i++) {
4281                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4282
4283                         len = frag->size;
4284                         mapping = pci_map_page(tp->pdev,
4285                                                frag->page,
4286                                                frag->page_offset,
4287                                                len, PCI_DMA_TODEVICE);
4288
4289                         tp->tx_buffers[entry].skb = NULL;
4290                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4291
4292                         if (tg3_4g_overflow_test(mapping, len))
4293                                 would_hit_hwbug = 1;
4294
4295                         if (tg3_40bit_overflow_test(tp, mapping, len))
4296                                 would_hit_hwbug = 1;
4297
4298                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4299                                 tg3_set_txd(tp, entry, mapping, len,
4300                                             base_flags, (i == last)|(mss << 1));
4301                         else
4302                                 tg3_set_txd(tp, entry, mapping, len,
4303                                             base_flags, (i == last));
4304
4305                         entry = NEXT_TX(entry);
4306                 }
4307         }
4308
4309         if (would_hit_hwbug) {
4310                 u32 last_plus_one = entry;
4311                 u32 start;
4312
4313                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4314                 start &= (TG3_TX_RING_SIZE - 1);
4315
4316                 /* If the workaround fails due to memory/mapping
4317                  * failure, silently drop this packet.
4318                  */
4319                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4320                                                 &start, base_flags, mss))
4321                         goto out_unlock;
4322
4323                 entry = start;
4324         }
4325
4326         /* Packets are ready, update Tx producer idx local and on card. */
4327         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4328
4329         tp->tx_prod = entry;
4330         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4331                 netif_stop_queue(dev);
4332                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4333                         netif_wake_queue(tp->dev);
4334         }
4335
4336 out_unlock:
4337         mmiowb();
4338
4339         dev->trans_start = jiffies;
4340
4341         return NETDEV_TX_OK;
4342 }
4343
4344 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4345                                int new_mtu)
4346 {
4347         dev->mtu = new_mtu;
4348
4349         if (new_mtu > ETH_DATA_LEN) {
4350                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4351                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4352                         ethtool_op_set_tso(dev, 0);
4353                 }
4354                 else
4355                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4356         } else {
4357                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4358                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4359                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4360         }
4361 }
4362
4363 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4364 {
4365         struct tg3 *tp = netdev_priv(dev);
4366         int err;
4367
4368         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4369                 return -EINVAL;
4370
4371         if (!netif_running(dev)) {
4372                 /* We'll just catch it later when the
4373                  * device is up'd.
4374                  */
4375                 tg3_set_mtu(dev, tp, new_mtu);
4376                 return 0;
4377         }
4378
4379         tg3_netif_stop(tp);
4380
4381         tg3_full_lock(tp, 1);
4382
4383         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4384
4385         tg3_set_mtu(dev, tp, new_mtu);
4386
4387         err = tg3_restart_hw(tp, 0);
4388
4389         if (!err)
4390                 tg3_netif_start(tp);
4391
4392         tg3_full_unlock(tp);
4393
4394         return err;
4395 }
4396
4397 /* Free up pending packets in all rx/tx rings.
4398  *
4399  * The chip has been shut down and the driver detached from
4400  * the networking, so no interrupts or new tx packets will
4401  * end up in the driver.  tp->{tx,}lock is not held and we are not
4402  * in an interrupt context and thus may sleep.
4403  */
4404 static void tg3_free_rings(struct tg3 *tp)
4405 {
4406         struct ring_info *rxp;
4407         int i;
4408
4409         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4410                 rxp = &tp->rx_std_buffers[i];
4411
4412                 if (rxp->skb == NULL)
4413                         continue;
4414                 pci_unmap_single(tp->pdev,
4415                                  pci_unmap_addr(rxp, mapping),
4416                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4417                                  PCI_DMA_FROMDEVICE);
4418                 dev_kfree_skb_any(rxp->skb);
4419                 rxp->skb = NULL;
4420         }
4421
4422         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4423                 rxp = &tp->rx_jumbo_buffers[i];
4424
4425                 if (rxp->skb == NULL)
4426                         continue;
4427                 pci_unmap_single(tp->pdev,
4428                                  pci_unmap_addr(rxp, mapping),
4429                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4430                                  PCI_DMA_FROMDEVICE);
4431                 dev_kfree_skb_any(rxp->skb);
4432                 rxp->skb = NULL;
4433         }
4434
4435         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4436                 struct tx_ring_info *txp;
4437                 struct sk_buff *skb;
4438                 int j;
4439
4440                 txp = &tp->tx_buffers[i];
4441                 skb = txp->skb;
4442
4443                 if (skb == NULL) {
4444                         i++;
4445                         continue;
4446                 }
4447
4448                 pci_unmap_single(tp->pdev,
4449                                  pci_unmap_addr(txp, mapping),
4450                                  skb_headlen(skb),
4451                                  PCI_DMA_TODEVICE);
4452                 txp->skb = NULL;
4453
4454                 i++;
4455
4456                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4457                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4458                         pci_unmap_page(tp->pdev,
4459                                        pci_unmap_addr(txp, mapping),
4460                                        skb_shinfo(skb)->frags[j].size,
4461                                        PCI_DMA_TODEVICE);
4462                         i++;
4463                 }
4464
4465                 dev_kfree_skb_any(skb);
4466         }
4467 }
4468
4469 /* Initialize tx/rx rings for packet processing.
4470  *
4471  * The chip has been shut down and the driver detached from
4472  * the networking, so no interrupts or new tx packets will
4473  * end up in the driver.  tp->{tx,}lock are held and thus
4474  * we may not sleep.
4475  */
4476 static int tg3_init_rings(struct tg3 *tp)
4477 {
4478         u32 i;
4479
4480         /* Free up all the SKBs. */
4481         tg3_free_rings(tp);
4482
4483         /* Zero out all descriptors. */
4484         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4485         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4486         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4487         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4488
4489         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4490         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4491             (tp->dev->mtu > ETH_DATA_LEN))
4492                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4493
4494         /* Initialize invariants of the rings, we only set this
4495          * stuff once.  This works because the card does not
4496          * write into the rx buffer posting rings.
4497          */
4498         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4499                 struct tg3_rx_buffer_desc *rxd;
4500
4501                 rxd = &tp->rx_std[i];
4502                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4503                         << RXD_LEN_SHIFT;
4504                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4505                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4506                                (i << RXD_OPAQUE_INDEX_SHIFT));
4507         }
4508
4509         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4510                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4511                         struct tg3_rx_buffer_desc *rxd;
4512
4513                         rxd = &tp->rx_jumbo[i];
4514                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4515                                 << RXD_LEN_SHIFT;
4516                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4517                                 RXD_FLAG_JUMBO;
4518                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4519                                (i << RXD_OPAQUE_INDEX_SHIFT));
4520                 }
4521         }
4522
4523         /* Now allocate fresh SKBs for each rx ring. */
4524         for (i = 0; i < tp->rx_pending; i++) {
4525                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4526                         printk(KERN_WARNING PFX
4527                                "%s: Using a smaller RX standard ring, "
4528                                "only %d out of %d buffers were allocated "
4529                                "successfully.\n",
4530                                tp->dev->name, i, tp->rx_pending);
4531                         if (i == 0)
4532                                 return -ENOMEM;
4533                         tp->rx_pending = i;
4534                         break;
4535                 }
4536         }
4537
4538         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4539                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4540                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4541                                              -1, i) < 0) {
4542                                 printk(KERN_WARNING PFX
4543                                        "%s: Using a smaller RX jumbo ring, "
4544                                        "only %d out of %d buffers were "
4545                                        "allocated successfully.\n",
4546                                        tp->dev->name, i, tp->rx_jumbo_pending);
4547                                 if (i == 0) {
4548                                         tg3_free_rings(tp);
4549                                         return -ENOMEM;
4550                                 }
4551                                 tp->rx_jumbo_pending = i;
4552                                 break;
4553                         }
4554                 }
4555         }
4556         return 0;
4557 }
4558
4559 /*
4560  * Must not be invoked with interrupt sources disabled and
4561  * the hardware shutdown down.
4562  */
4563 static void tg3_free_consistent(struct tg3 *tp)
4564 {
4565         kfree(tp->rx_std_buffers);
4566         tp->rx_std_buffers = NULL;
4567         if (tp->rx_std) {
4568                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4569                                     tp->rx_std, tp->rx_std_mapping);
4570                 tp->rx_std = NULL;
4571         }
4572         if (tp->rx_jumbo) {
4573                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4574                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4575                 tp->rx_jumbo = NULL;
4576         }
4577         if (tp->rx_rcb) {
4578                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4579                                     tp->rx_rcb, tp->rx_rcb_mapping);
4580                 tp->rx_rcb = NULL;
4581         }
4582         if (tp->tx_ring) {
4583                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4584                         tp->tx_ring, tp->tx_desc_mapping);
4585                 tp->tx_ring = NULL;
4586         }
4587         if (tp->hw_status) {
4588                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4589                                     tp->hw_status, tp->status_mapping);
4590                 tp->hw_status = NULL;
4591         }
4592         if (tp->hw_stats) {
4593                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4594                                     tp->hw_stats, tp->stats_mapping);
4595                 tp->hw_stats = NULL;
4596         }
4597 }
4598
4599 /*
4600  * Must not be invoked with interrupt sources disabled and
4601  * the hardware shutdown down.  Can sleep.
4602  */
4603 static int tg3_alloc_consistent(struct tg3 *tp)
4604 {
4605         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4606                                       (TG3_RX_RING_SIZE +
4607                                        TG3_RX_JUMBO_RING_SIZE)) +
4608                                      (sizeof(struct tx_ring_info) *
4609                                       TG3_TX_RING_SIZE),
4610                                      GFP_KERNEL);
4611         if (!tp->rx_std_buffers)
4612                 return -ENOMEM;
4613
4614         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4615         tp->tx_buffers = (struct tx_ring_info *)
4616                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4617
4618         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4619                                           &tp->rx_std_mapping);
4620         if (!tp->rx_std)
4621                 goto err_out;
4622
4623         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4624                                             &tp->rx_jumbo_mapping);
4625
4626         if (!tp->rx_jumbo)
4627                 goto err_out;
4628
4629         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4630                                           &tp->rx_rcb_mapping);
4631         if (!tp->rx_rcb)
4632                 goto err_out;
4633
4634         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4635                                            &tp->tx_desc_mapping);
4636         if (!tp->tx_ring)
4637                 goto err_out;
4638
4639         tp->hw_status = pci_alloc_consistent(tp->pdev,
4640                                              TG3_HW_STATUS_SIZE,
4641                                              &tp->status_mapping);
4642         if (!tp->hw_status)
4643                 goto err_out;
4644
4645         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4646                                             sizeof(struct tg3_hw_stats),
4647                                             &tp->stats_mapping);
4648         if (!tp->hw_stats)
4649                 goto err_out;
4650
4651         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4652         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4653
4654         return 0;
4655
4656 err_out:
4657         tg3_free_consistent(tp);
4658         return -ENOMEM;
4659 }
4660
4661 #define MAX_WAIT_CNT 1000
4662
4663 /* To stop a block, clear the enable bit and poll till it
4664  * clears.  tp->lock is held.
4665  */
4666 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4667 {
4668         unsigned int i;
4669         u32 val;
4670
4671         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4672                 switch (ofs) {
4673                 case RCVLSC_MODE:
4674                 case DMAC_MODE:
4675                 case MBFREE_MODE:
4676                 case BUFMGR_MODE:
4677                 case MEMARB_MODE:
4678                         /* We can't enable/disable these bits of the
4679                          * 5705/5750, just say success.
4680                          */
4681                         return 0;
4682
4683                 default:
4684                         break;
4685                 };
4686         }
4687
4688         val = tr32(ofs);
4689         val &= ~enable_bit;
4690         tw32_f(ofs, val);
4691
4692         for (i = 0; i < MAX_WAIT_CNT; i++) {
4693                 udelay(100);
4694                 val = tr32(ofs);
4695                 if ((val & enable_bit) == 0)
4696                         break;
4697         }
4698
4699         if (i == MAX_WAIT_CNT && !silent) {
4700                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4701                        "ofs=%lx enable_bit=%x\n",
4702                        ofs, enable_bit);
4703                 return -ENODEV;
4704         }
4705
4706         return 0;
4707 }
4708
4709 /* tp->lock is held. */
4710 static int tg3_abort_hw(struct tg3 *tp, int silent)
4711 {
4712         int i, err;
4713
4714         tg3_disable_ints(tp);
4715
4716         tp->rx_mode &= ~RX_MODE_ENABLE;
4717         tw32_f(MAC_RX_MODE, tp->rx_mode);
4718         udelay(10);
4719
4720         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4721         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4722         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4723         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4724         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4725         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4726
4727         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4728         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4729         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4730         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4731         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4732         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4733         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4734
4735         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4736         tw32_f(MAC_MODE, tp->mac_mode);
4737         udelay(40);
4738
4739         tp->tx_mode &= ~TX_MODE_ENABLE;
4740         tw32_f(MAC_TX_MODE, tp->tx_mode);
4741
4742         for (i = 0; i < MAX_WAIT_CNT; i++) {
4743                 udelay(100);
4744                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4745                         break;
4746         }
4747         if (i >= MAX_WAIT_CNT) {
4748                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4749                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4750                        tp->dev->name, tr32(MAC_TX_MODE));
4751                 err |= -ENODEV;
4752         }
4753
4754         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4755         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4756         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4757
4758         tw32(FTQ_RESET, 0xffffffff);
4759         tw32(FTQ_RESET, 0x00000000);
4760
4761         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4762         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4763
4764         if (tp->hw_status)
4765                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4766         if (tp->hw_stats)
4767                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4768
4769         return err;
4770 }
4771
4772 /* tp->lock is held. */
4773 static int tg3_nvram_lock(struct tg3 *tp)
4774 {
4775         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4776                 int i;
4777
4778                 if (tp->nvram_lock_cnt == 0) {
4779                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4780                         for (i = 0; i < 8000; i++) {
4781                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4782                                         break;
4783                                 udelay(20);
4784                         }
4785                         if (i == 8000) {
4786                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4787                                 return -ENODEV;
4788                         }
4789                 }
4790                 tp->nvram_lock_cnt++;
4791         }
4792         return 0;
4793 }
4794
4795 /* tp->lock is held. */
4796 static void tg3_nvram_unlock(struct tg3 *tp)
4797 {
4798         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4799                 if (tp->nvram_lock_cnt > 0)
4800                         tp->nvram_lock_cnt--;
4801                 if (tp->nvram_lock_cnt == 0)
4802                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4803         }
4804 }
4805
4806 /* tp->lock is held. */
4807 static void tg3_enable_nvram_access(struct tg3 *tp)
4808 {
4809         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4810             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4811                 u32 nvaccess = tr32(NVRAM_ACCESS);
4812
4813                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4814         }
4815 }
4816
4817 /* tp->lock is held. */
4818 static void tg3_disable_nvram_access(struct tg3 *tp)
4819 {
4820         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4821             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4822                 u32 nvaccess = tr32(NVRAM_ACCESS);
4823
4824                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4825         }
4826 }
4827
4828 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4829 {
4830         int i;
4831         u32 apedata;
4832
4833         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4834         if (apedata != APE_SEG_SIG_MAGIC)
4835                 return;
4836
4837         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4838         if (apedata != APE_FW_STATUS_READY)
4839                 return;
4840
4841         /* Wait for up to 1 millisecond for APE to service previous event. */
4842         for (i = 0; i < 10; i++) {
4843                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4844                         return;
4845
4846                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4847
4848                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4849                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4850                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4851
4852                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4853
4854                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4855                         break;
4856
4857                 udelay(100);
4858         }
4859
4860         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4861                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4862 }
4863
4864 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4865 {
4866         u32 event;
4867         u32 apedata;
4868
4869         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4870                 return;
4871
4872         switch (kind) {
4873                 case RESET_KIND_INIT:
4874                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4875                                         APE_HOST_SEG_SIG_MAGIC);
4876                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4877                                         APE_HOST_SEG_LEN_MAGIC);
4878                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4879                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4880                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4881                                         APE_HOST_DRIVER_ID_MAGIC);
4882                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4883                                         APE_HOST_BEHAV_NO_PHYLOCK);
4884
4885                         event = APE_EVENT_STATUS_STATE_START;
4886                         break;
4887                 case RESET_KIND_SHUTDOWN:
4888                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4889                         break;
4890                 case RESET_KIND_SUSPEND:
4891                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4892                         break;
4893                 default:
4894                         return;
4895         }
4896
4897         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4898
4899         tg3_ape_send_event(tp, event);
4900 }
4901
4902 /* tp->lock is held. */
4903 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4904 {
4905         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4906                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4907
4908         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4909                 switch (kind) {
4910                 case RESET_KIND_INIT:
4911                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4912                                       DRV_STATE_START);
4913                         break;
4914
4915                 case RESET_KIND_SHUTDOWN:
4916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4917                                       DRV_STATE_UNLOAD);
4918                         break;
4919
4920                 case RESET_KIND_SUSPEND:
4921                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4922                                       DRV_STATE_SUSPEND);
4923                         break;
4924
4925                 default:
4926                         break;
4927                 };
4928         }
4929
4930         if (kind == RESET_KIND_INIT ||
4931             kind == RESET_KIND_SUSPEND)
4932                 tg3_ape_driver_state_change(tp, kind);
4933 }
4934
4935 /* tp->lock is held. */
4936 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4937 {
4938         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4939                 switch (kind) {
4940                 case RESET_KIND_INIT:
4941                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4942                                       DRV_STATE_START_DONE);
4943                         break;
4944
4945                 case RESET_KIND_SHUTDOWN:
4946                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947                                       DRV_STATE_UNLOAD_DONE);
4948                         break;
4949
4950                 default:
4951                         break;
4952                 };
4953         }
4954
4955         if (kind == RESET_KIND_SHUTDOWN)
4956                 tg3_ape_driver_state_change(tp, kind);
4957 }
4958
4959 /* tp->lock is held. */
4960 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4961 {
4962         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4963                 switch (kind) {
4964                 case RESET_KIND_INIT:
4965                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4966                                       DRV_STATE_START);
4967                         break;
4968
4969                 case RESET_KIND_SHUTDOWN:
4970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971                                       DRV_STATE_UNLOAD);
4972                         break;
4973
4974                 case RESET_KIND_SUSPEND:
4975                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4976                                       DRV_STATE_SUSPEND);
4977                         break;
4978
4979                 default:
4980                         break;
4981                 };
4982         }
4983 }
4984
4985 static int tg3_poll_fw(struct tg3 *tp)
4986 {
4987         int i;
4988         u32 val;
4989
4990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4991                 /* Wait up to 20ms for init done. */
4992                 for (i = 0; i < 200; i++) {
4993                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4994                                 return 0;
4995                         udelay(100);
4996                 }
4997                 return -ENODEV;
4998         }
4999
5000         /* Wait for firmware initialization to complete. */
5001         for (i = 0; i < 100000; i++) {
5002                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5003                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5004                         break;
5005                 udelay(10);
5006         }
5007
5008         /* Chip might not be fitted with firmware.  Some Sun onboard
5009          * parts are configured like that.  So don't signal the timeout
5010          * of the above loop as an error, but do report the lack of
5011          * running firmware once.
5012          */
5013         if (i >= 100000 &&
5014             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5015                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5016
5017                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5018                        tp->dev->name);
5019         }
5020
5021         return 0;
5022 }
5023
5024 /* Save PCI command register before chip reset */
5025 static void tg3_save_pci_state(struct tg3 *tp)
5026 {
5027         u32 val;
5028
5029         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5030         tp->pci_cmd = val;
5031 }
5032
5033 /* Restore PCI state after chip reset */
5034 static void tg3_restore_pci_state(struct tg3 *tp)
5035 {
5036         u32 val;
5037
5038         /* Re-enable indirect register accesses. */
5039         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5040                                tp->misc_host_ctrl);
5041
5042         /* Set MAX PCI retry to zero. */
5043         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5044         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5045             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5046                 val |= PCISTATE_RETRY_SAME_DMA;
5047         /* Allow reads and writes to the APE register and memory space. */
5048         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5049                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5050                        PCISTATE_ALLOW_APE_SHMEM_WR;
5051         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5052
5053         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5054
5055         /* Make sure PCI-X relaxed ordering bit is clear. */
5056         if (tp->pcix_cap) {
5057                 u16 pcix_cmd;
5058
5059                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5060                                      &pcix_cmd);
5061                 pcix_cmd &= ~PCI_X_CMD_ERO;
5062                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5063                                       pcix_cmd);
5064         }
5065
5066         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5067
5068                 /* Chip reset on 5780 will reset MSI enable bit,
5069                  * so need to restore it.
5070                  */
5071                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5072                         u16 ctrl;
5073
5074                         pci_read_config_word(tp->pdev,
5075                                              tp->msi_cap + PCI_MSI_FLAGS,
5076                                              &ctrl);
5077                         pci_write_config_word(tp->pdev,
5078                                               tp->msi_cap + PCI_MSI_FLAGS,
5079                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5080                         val = tr32(MSGINT_MODE);
5081                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5082                 }
5083         }
5084 }
5085
5086 static void tg3_stop_fw(struct tg3 *);
5087
5088 /* tp->lock is held. */
5089 static int tg3_chip_reset(struct tg3 *tp)
5090 {
5091         u32 val;
5092         void (*write_op)(struct tg3 *, u32, u32);
5093         int err;
5094
5095         tg3_nvram_lock(tp);
5096
5097         /* No matching tg3_nvram_unlock() after this because
5098          * chip reset below will undo the nvram lock.
5099          */
5100         tp->nvram_lock_cnt = 0;
5101
5102         /* GRC_MISC_CFG core clock reset will clear the memory
5103          * enable bit in PCI register 4 and the MSI enable bit
5104          * on some chips, so we save relevant registers here.
5105          */
5106         tg3_save_pci_state(tp);
5107
5108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5113                 tw32(GRC_FASTBOOT_PC, 0);
5114
5115         /*
5116          * We must avoid the readl() that normally takes place.
5117          * It locks machines, causes machine checks, and other
5118          * fun things.  So, temporarily disable the 5701
5119          * hardware workaround, while we do the reset.
5120          */
5121         write_op = tp->write32;
5122         if (write_op == tg3_write_flush_reg32)
5123                 tp->write32 = tg3_write32;
5124
5125         /* Prevent the irq handler from reading or writing PCI registers
5126          * during chip reset when the memory enable bit in the PCI command
5127          * register may be cleared.  The chip does not generate interrupt
5128          * at this time, but the irq handler may still be called due to irq
5129          * sharing or irqpoll.
5130          */
5131         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5132         if (tp->hw_status) {
5133                 tp->hw_status->status = 0;
5134                 tp->hw_status->status_tag = 0;
5135         }
5136         tp->last_tag = 0;
5137         smp_mb();
5138         synchronize_irq(tp->pdev->irq);
5139
5140         /* do the reset */
5141         val = GRC_MISC_CFG_CORECLK_RESET;
5142
5143         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5144                 if (tr32(0x7e2c) == 0x60) {
5145                         tw32(0x7e2c, 0x20);
5146                 }
5147                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5148                         tw32(GRC_MISC_CFG, (1 << 29));
5149                         val |= (1 << 29);
5150                 }
5151         }
5152
5153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5154                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5155                 tw32(GRC_VCPU_EXT_CTRL,
5156                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5157         }
5158
5159         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5160                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5161         tw32(GRC_MISC_CFG, val);
5162
5163         /* restore 5701 hardware bug workaround write method */
5164         tp->write32 = write_op;
5165
5166         /* Unfortunately, we have to delay before the PCI read back.
5167          * Some 575X chips even will not respond to a PCI cfg access
5168          * when the reset command is given to the chip.
5169          *
5170          * How do these hardware designers expect things to work
5171          * properly if the PCI write is posted for a long period
5172          * of time?  It is always necessary to have some method by
5173          * which a register read back can occur to push the write
5174          * out which does the reset.
5175          *
5176          * For most tg3 variants the trick below was working.
5177          * Ho hum...
5178          */
5179         udelay(120);
5180
5181         /* Flush PCI posted writes.  The normal MMIO registers
5182          * are inaccessible at this time so this is the only
5183          * way to make this reliably (actually, this is no longer
5184          * the case, see above).  I tried to use indirect
5185          * register read/write but this upset some 5701 variants.
5186          */
5187         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5188
5189         udelay(120);
5190
5191         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5192                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5193                         int i;
5194                         u32 cfg_val;
5195
5196                         /* Wait for link training to complete.  */
5197                         for (i = 0; i < 5000; i++)
5198                                 udelay(100);
5199
5200                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5201                         pci_write_config_dword(tp->pdev, 0xc4,
5202                                                cfg_val | (1 << 15));
5203                 }
5204                 /* Set PCIE max payload size and clear error status.  */
5205                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5206         }
5207
5208         tg3_restore_pci_state(tp);
5209
5210         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5211
5212         val = 0;
5213         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5214                 val = tr32(MEMARB_MODE);
5215         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5216
5217         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5218                 tg3_stop_fw(tp);
5219                 tw32(0x5000, 0x400);
5220         }
5221
5222         tw32(GRC_MODE, tp->grc_mode);
5223
5224         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5225                 val = tr32(0xc4);
5226
5227                 tw32(0xc4, val | (1 << 15));
5228         }
5229
5230         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5232                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5233                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5234                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5235                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5236         }
5237
5238         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5239                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5240                 tw32_f(MAC_MODE, tp->mac_mode);
5241         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5242                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5243                 tw32_f(MAC_MODE, tp->mac_mode);
5244         } else
5245                 tw32_f(MAC_MODE, 0);
5246         udelay(40);
5247
5248         err = tg3_poll_fw(tp);
5249         if (err)
5250                 return err;
5251
5252         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5253             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5254                 val = tr32(0x7c00);
5255
5256                 tw32(0x7c00, val | (1 << 25));
5257         }
5258
5259         /* Reprobe ASF enable state.  */
5260         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5261         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5262         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5263         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5264                 u32 nic_cfg;
5265
5266                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5267                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5268                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5269                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5270                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5271                 }
5272         }
5273
5274         return 0;
5275 }
5276
5277 /* tp->lock is held. */
5278 static void tg3_stop_fw(struct tg3 *tp)
5279 {
5280         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5281            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5282                 u32 val;
5283                 int i;
5284
5285                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5286                 val = tr32(GRC_RX_CPU_EVENT);
5287                 val |= (1 << 14);
5288                 tw32(GRC_RX_CPU_EVENT, val);
5289
5290                 /* Wait for RX cpu to ACK the event.  */
5291                 for (i = 0; i < 100; i++) {
5292                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5293                                 break;
5294                         udelay(1);
5295                 }
5296         }
5297 }
5298
5299 /* tp->lock is held. */
5300 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5301 {
5302         int err;
5303
5304         tg3_stop_fw(tp);
5305
5306         tg3_write_sig_pre_reset(tp, kind);
5307
5308         tg3_abort_hw(tp, silent);
5309         err = tg3_chip_reset(tp);
5310
5311         tg3_write_sig_legacy(tp, kind);
5312         tg3_write_sig_post_reset(tp, kind);
5313
5314         if (err)
5315                 return err;
5316
5317         return 0;
5318 }
5319
5320 #define TG3_FW_RELEASE_MAJOR    0x0
5321 #define TG3_FW_RELASE_MINOR     0x0
5322 #define TG3_FW_RELEASE_FIX      0x0
5323 #define TG3_FW_START_ADDR       0x08000000
5324 #define TG3_FW_TEXT_ADDR        0x08000000
5325 #define TG3_FW_TEXT_LEN         0x9c0
5326 #define TG3_FW_RODATA_ADDR      0x080009c0
5327 #define TG3_FW_RODATA_LEN       0x60
5328 #define TG3_FW_DATA_ADDR        0x08000a40
5329 #define TG3_FW_DATA_LEN         0x20
5330 #define TG3_FW_SBSS_ADDR        0x08000a60
5331 #define TG3_FW_SBSS_LEN         0xc
5332 #define TG3_FW_BSS_ADDR         0x08000a70
5333 #define TG3_FW_BSS_LEN          0x10
5334
5335 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5336         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5337         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5338         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5339         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5340         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5341         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5342         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5343         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5344         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5345         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5346         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5347         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5348         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5349         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5350         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5351         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5352         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5353         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5354         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5355         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5356         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5357         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5358         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5359         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5360         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5361         0, 0, 0, 0, 0, 0,
5362         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5363         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5364         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5365         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5366         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5367         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5368         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5369         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5370         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5371         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5372         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5373         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5374         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5375         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5376         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5377         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5378         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5379         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5380         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5381         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5382         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5383         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5384         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5385         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5386         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5387         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5388         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5389         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5390         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5391         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5392         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5393         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5394         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5395         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5396         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5397         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5398         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5399         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5400         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5401         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5402         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5403         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5404         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5405         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5406         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5407         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5408         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5409         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5410         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5411         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5412         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5413         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5414         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5415         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5416         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5417         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5418         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5419         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5420         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5421         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5422         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5423         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5424         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5425         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5426         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5427 };
5428
5429 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5430         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5431         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5432         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5433         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5434         0x00000000
5435 };
5436
5437 #if 0 /* All zeros, don't eat up space with it. */
5438 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5439         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5440         0x00000000, 0x00000000, 0x00000000, 0x00000000
5441 };
5442 #endif
5443
5444 #define RX_CPU_SCRATCH_BASE     0x30000
5445 #define RX_CPU_SCRATCH_SIZE     0x04000
5446 #define TX_CPU_SCRATCH_BASE     0x34000
5447 #define TX_CPU_SCRATCH_SIZE     0x04000
5448
5449 /* tp->lock is held. */
5450 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5451 {
5452         int i;
5453
5454         BUG_ON(offset == TX_CPU_BASE &&
5455             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5456
5457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5458                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5459
5460                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5461                 return 0;
5462         }
5463         if (offset == RX_CPU_BASE) {
5464                 for (i = 0; i < 10000; i++) {
5465                         tw32(offset + CPU_STATE, 0xffffffff);
5466                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5467                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5468                                 break;
5469                 }
5470
5471                 tw32(offset + CPU_STATE, 0xffffffff);
5472                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5473                 udelay(10);
5474         } else {
5475                 for (i = 0; i < 10000; i++) {
5476                         tw32(offset + CPU_STATE, 0xffffffff);
5477                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5478                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5479                                 break;
5480                 }
5481         }
5482
5483         if (i >= 10000) {
5484                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5485                        "and %s CPU\n",
5486                        tp->dev->name,
5487                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5488                 return -ENODEV;
5489         }
5490
5491         /* Clear firmware's nvram arbitration. */
5492         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5493                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5494         return 0;
5495 }
5496
5497 struct fw_info {
5498         unsigned int text_base;
5499         unsigned int text_len;
5500         const u32 *text_data;
5501         unsigned int rodata_base;
5502         unsigned int rodata_len;
5503         const u32 *rodata_data;
5504         unsigned int data_base;
5505         unsigned int data_len;
5506         const u32 *data_data;
5507 };
5508
5509 /* tp->lock is held. */
5510 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5511                                  int cpu_scratch_size, struct fw_info *info)
5512 {
5513         int err, lock_err, i;
5514         void (*write_op)(struct tg3 *, u32, u32);
5515
5516         if (cpu_base == TX_CPU_BASE &&
5517             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5518                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5519                        "TX cpu firmware on %s which is 5705.\n",
5520                        tp->dev->name);
5521                 return -EINVAL;
5522         }
5523
5524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5525                 write_op = tg3_write_mem;
5526         else
5527                 write_op = tg3_write_indirect_reg32;
5528
5529         /* It is possible that bootcode is still loading at this point.
5530          * Get the nvram lock first before halting the cpu.
5531          */
5532         lock_err = tg3_nvram_lock(tp);
5533         err = tg3_halt_cpu(tp, cpu_base);
5534         if (!lock_err)
5535                 tg3_nvram_unlock(tp);
5536         if (err)
5537                 goto out;
5538
5539         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5540                 write_op(tp, cpu_scratch_base + i, 0);
5541         tw32(cpu_base + CPU_STATE, 0xffffffff);
5542         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5543         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5544                 write_op(tp, (cpu_scratch_base +
5545                               (info->text_base & 0xffff) +
5546                               (i * sizeof(u32))),
5547                          (info->text_data ?
5548                           info->text_data[i] : 0));
5549         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5550                 write_op(tp, (cpu_scratch_base +
5551                               (info->rodata_base & 0xffff) +
5552                               (i * sizeof(u32))),
5553                          (info->rodata_data ?
5554                           info->rodata_data[i] : 0));
5555         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5556                 write_op(tp, (cpu_scratch_base +
5557                               (info->data_base & 0xffff) +
5558                               (i * sizeof(u32))),
5559                          (info->data_data ?
5560                           info->data_data[i] : 0));
5561
5562         err = 0;
5563
5564 out:
5565         return err;
5566 }
5567
5568 /* tp->lock is held. */
5569 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5570 {
5571         struct fw_info info;
5572         int err, i;
5573
5574         info.text_base = TG3_FW_TEXT_ADDR;
5575         info.text_len = TG3_FW_TEXT_LEN;
5576         info.text_data = &tg3FwText[0];
5577         info.rodata_base = TG3_FW_RODATA_ADDR;
5578         info.rodata_len = TG3_FW_RODATA_LEN;
5579         info.rodata_data = &tg3FwRodata[0];
5580         info.data_base = TG3_FW_DATA_ADDR;
5581         info.data_len = TG3_FW_DATA_LEN;
5582         info.data_data = NULL;
5583
5584         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5585                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5586                                     &info);
5587         if (err)
5588                 return err;
5589
5590         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5591                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5592                                     &info);
5593         if (err)
5594                 return err;
5595
5596         /* Now startup only the RX cpu. */
5597         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5598         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5599
5600         for (i = 0; i < 5; i++) {
5601                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5602                         break;
5603                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5604                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5605                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5606                 udelay(1000);
5607         }
5608         if (i >= 5) {
5609                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5610                        "to set RX CPU PC, is %08x should be %08x\n",
5611                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5612                        TG3_FW_TEXT_ADDR);
5613                 return -ENODEV;
5614         }
5615         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5616         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5617
5618         return 0;
5619 }
5620
5621
5622 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5623 #define TG3_TSO_FW_RELASE_MINOR         0x6
5624 #define TG3_TSO_FW_RELEASE_FIX          0x0
5625 #define TG3_TSO_FW_START_ADDR           0x08000000
5626 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5627 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5628 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5629 #define TG3_TSO_FW_RODATA_LEN           0x60
5630 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5631 #define TG3_TSO_FW_DATA_LEN             0x30
5632 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5633 #define TG3_TSO_FW_SBSS_LEN             0x2c
5634 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5635 #define TG3_TSO_FW_BSS_LEN              0x894
5636
5637 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5638         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5639         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5640         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5641         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5642         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5643         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5644         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5645         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5646         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5647         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5648         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5649         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5650         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5651         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5652         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5653         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5654         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5655         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5656         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5657         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5658         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5659         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5660         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5661         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5662         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5663         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5664         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5665         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5666         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5667         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5668         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5669         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5670         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5671         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5672         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5673         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5674         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5675         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5676         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5677         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5678         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5679         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5680         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5681         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5682         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5683         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5684         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5685         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5686         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5687         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5688         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5689         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5690         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5691         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5692         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5693         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5694         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5695         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5696         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5697         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5698         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5699         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5700         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5701         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5702         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5703         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5704         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5705         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5706         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5707         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5708         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5709         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5710         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5711         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5712         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5713         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5714         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5715         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5716         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5717         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5718         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5719         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5720         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5721         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5722         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5723         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5724         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5725         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5726         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5727         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5728         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5729         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5730         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5731         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5732         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5733         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5734         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5735         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5736         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5737         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5738         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5739         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5740         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5741         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5742         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5743         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5744         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5745         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5746         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5747         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5748         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5749         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5750         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5751         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5752         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5753         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5754         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5755         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5756         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5757         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5758         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5759         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5760         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5761         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5762         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5763         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5764         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5765         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5766         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5767         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5768         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5769         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5770         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5771         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5772         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5773         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5774         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5775         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5776         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5777         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5778         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5779         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5780         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5781         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5782         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5783         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5784         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5785         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5786         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5787         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5788         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5789         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5790         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5791         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5792         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5793         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5794         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5795         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5796         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5797         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5798         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5799         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5800         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5801         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5802         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5803         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5804         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5805         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5806         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5807         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5808         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5809         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5810         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5811         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5812         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5813         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5814         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5815         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5816         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5817         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5818         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5819         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5820         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5821         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5822         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5823         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5824         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5825         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5826         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5827         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5828         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5829         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5830         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5831         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5832         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5833         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5834         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5835         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5836         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5837         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5838         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5839         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5840         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5841         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5842         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5843         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5844         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5845         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5846         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5847         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5848         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5849         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5850         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5851         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5852         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5853         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5854         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5855         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5856         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5857         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5858         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5859         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5860         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5861         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5862         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5863         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5864         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5865         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5866         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5867         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5868         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5869         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5870         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5871         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5872         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5873         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5874         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5875         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5876         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5877         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5878         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5879         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5880         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5881         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5882         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5883         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5884         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5885         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5886         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5887         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5888         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5889         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5890         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5891         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5892         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5893         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5894         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5895         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5896         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5897         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5898         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5899         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5900         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5901         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5902         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5903         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5904         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5905         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5906         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5907         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5908         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5909         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5910         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5911         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5912         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5913         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5914         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5915         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5916         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5917         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5918         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5919         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5920         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5921         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5922 };
5923
5924 static const u32 tg3TsoFwRodata[] = {
5925         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5926         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5927         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5928         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5929         0x00000000,
5930 };
5931
5932 static const u32 tg3TsoFwData[] = {
5933         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5934         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5935         0x00000000,
5936 };
5937
5938 /* 5705 needs a special version of the TSO firmware.  */
5939 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5940 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5941 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5942 #define TG3_TSO5_FW_START_ADDR          0x00010000
5943 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5944 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5945 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5946 #define TG3_TSO5_FW_RODATA_LEN          0x50
5947 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5948 #define TG3_TSO5_FW_DATA_LEN            0x20
5949 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5950 #define TG3_TSO5_FW_SBSS_LEN            0x28
5951 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5952 #define TG3_TSO5_FW_BSS_LEN             0x88
5953
5954 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5955         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5956         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5957         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5958         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5959         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5960         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5961         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5962         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5963         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5964         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5965         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5966         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5967         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5968         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5969         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5970         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5971         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5972         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5973         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5974         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5975         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5976         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5977         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5978         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5979         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5980         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5981         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5982         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5983         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5984         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5985         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5986         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5987         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5988         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5989         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5990         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5991         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5992         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5993         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5994         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5995         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5996         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5997         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5998         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5999         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6000         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6001         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6002         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6003         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6004         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6005         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6006         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6007         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6008         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6009         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6010         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6011         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6012         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6013         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6014         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6015         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6016         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6017         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6018         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6019         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6020         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6021         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6022         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6023         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6024         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6025         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6026         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6027         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6028         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6029         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6030         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6031         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6032         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6033         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6034         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6035         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6036         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6037         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6038         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6039         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6040         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6041         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6042         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6043         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6044         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6045         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6046         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6047         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6048         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6049         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6050         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6051         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6052         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6053         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6054         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6055         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6056         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6057         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6058         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6059         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6060         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6061         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6062         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6063         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6064         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6065         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6066         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6067         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6068         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6069         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6070         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6071         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6072         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6073         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6074         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6075         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6076         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6077         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6078         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6079         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6080         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6081         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6082         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6083         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6084         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6085         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6086         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6087         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6088         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6089         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6090         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6091         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6092         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6093         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6094         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6095         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6096         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6097         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6098         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6099         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6100         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6101         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6102         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6103         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6104         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6105         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6106         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6107         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6108         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6109         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6110         0x00000000, 0x00000000, 0x00000000,
6111 };
6112
6113 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6114         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6115         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6116         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6117         0x00000000, 0x00000000, 0x00000000,
6118 };
6119
6120 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6121         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6122         0x00000000, 0x00000000, 0x00000000,
6123 };
6124
6125 /* tp->lock is held. */
6126 static int tg3_load_tso_firmware(struct tg3 *tp)
6127 {
6128         struct fw_info info;
6129         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6130         int err, i;
6131
6132         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6133                 return 0;
6134
6135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6136                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6137                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6138                 info.text_data = &tg3Tso5FwText[0];
6139                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6140                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6141                 info.rodata_data = &tg3Tso5FwRodata[0];
6142                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6143                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6144                 info.data_data = &tg3Tso5FwData[0];
6145                 cpu_base = RX_CPU_BASE;
6146                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6147                 cpu_scratch_size = (info.text_len +
6148                                     info.rodata_len +
6149                                     info.data_len +
6150                                     TG3_TSO5_FW_SBSS_LEN +
6151                                     TG3_TSO5_FW_BSS_LEN);
6152         } else {
6153                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6154                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6155                 info.text_data = &tg3TsoFwText[0];
6156                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6157                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6158                 info.rodata_data = &tg3TsoFwRodata[0];
6159                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6160                 info.data_len = TG3_TSO_FW_DATA_LEN;
6161                 info.data_data = &tg3TsoFwData[0];
6162                 cpu_base = TX_CPU_BASE;
6163                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6164                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6165         }
6166
6167         err = tg3_load_firmware_cpu(tp, cpu_base,
6168                                     cpu_scratch_base, cpu_scratch_size,
6169                                     &info);
6170         if (err)
6171                 return err;
6172
6173         /* Now startup the cpu. */
6174         tw32(cpu_base + CPU_STATE, 0xffffffff);
6175         tw32_f(cpu_base + CPU_PC,    info.text_base);
6176
6177         for (i = 0; i < 5; i++) {
6178                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6179                         break;
6180                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6181                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6182                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6183                 udelay(1000);
6184         }
6185         if (i >= 5) {
6186                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6187                        "to set CPU PC, is %08x should be %08x\n",
6188                        tp->dev->name, tr32(cpu_base + CPU_PC),
6189                        info.text_base);
6190                 return -ENODEV;
6191         }
6192         tw32(cpu_base + CPU_STATE, 0xffffffff);
6193         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6194         return 0;
6195 }
6196
6197
6198 /* tp->lock is held. */
6199 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6200 {
6201         u32 addr_high, addr_low;
6202         int i;
6203
6204         addr_high = ((tp->dev->dev_addr[0] << 8) |
6205                      tp->dev->dev_addr[1]);
6206         addr_low = ((tp->dev->dev_addr[2] << 24) |
6207                     (tp->dev->dev_addr[3] << 16) |
6208                     (tp->dev->dev_addr[4] <<  8) |
6209                     (tp->dev->dev_addr[5] <<  0));
6210         for (i = 0; i < 4; i++) {
6211                 if (i == 1 && skip_mac_1)
6212                         continue;
6213                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6214                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6215         }
6216
6217         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6218             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6219                 for (i = 0; i < 12; i++) {
6220                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6221                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6222                 }
6223         }
6224
6225         addr_high = (tp->dev->dev_addr[0] +
6226                      tp->dev->dev_addr[1] +
6227                      tp->dev->dev_addr[2] +
6228                      tp->dev->dev_addr[3] +
6229                      tp->dev->dev_addr[4] +
6230                      tp->dev->dev_addr[5]) &
6231                 TX_BACKOFF_SEED_MASK;
6232         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6233 }
6234
6235 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6236 {
6237         struct tg3 *tp = netdev_priv(dev);
6238         struct sockaddr *addr = p;
6239         int err = 0, skip_mac_1 = 0;
6240
6241         if (!is_valid_ether_addr(addr->sa_data))
6242                 return -EINVAL;
6243
6244         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6245
6246         if (!netif_running(dev))
6247                 return 0;
6248
6249         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6250                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6251
6252                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6253                 addr0_low = tr32(MAC_ADDR_0_LOW);
6254                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6255                 addr1_low = tr32(MAC_ADDR_1_LOW);
6256
6257                 /* Skip MAC addr 1 if ASF is using it. */
6258                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6259                     !(addr1_high == 0 && addr1_low == 0))
6260                         skip_mac_1 = 1;
6261         }
6262         spin_lock_bh(&tp->lock);
6263         __tg3_set_mac_addr(tp, skip_mac_1);
6264         spin_unlock_bh(&tp->lock);
6265
6266         return err;
6267 }
6268
6269 /* tp->lock is held. */
6270 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6271                            dma_addr_t mapping, u32 maxlen_flags,
6272                            u32 nic_addr)
6273 {
6274         tg3_write_mem(tp,
6275                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6276                       ((u64) mapping >> 32));
6277         tg3_write_mem(tp,
6278                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6279                       ((u64) mapping & 0xffffffff));
6280         tg3_write_mem(tp,
6281                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6282                        maxlen_flags);
6283
6284         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6285                 tg3_write_mem(tp,
6286                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6287                               nic_addr);
6288 }
6289
6290 static void __tg3_set_rx_mode(struct net_device *);
6291 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6292 {
6293         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6294         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6295         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6296         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6297         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6298                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6299                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6300         }
6301         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6302         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6303         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6304                 u32 val = ec->stats_block_coalesce_usecs;
6305
6306                 if (!netif_carrier_ok(tp->dev))
6307                         val = 0;
6308
6309                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6310         }
6311 }
6312
6313 /* tp->lock is held. */
6314 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6315 {
6316         u32 val, rdmac_mode;
6317         int i, err, limit;
6318
6319         tg3_disable_ints(tp);
6320
6321         tg3_stop_fw(tp);
6322
6323         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6324
6325         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6326                 tg3_abort_hw(tp, 1);
6327         }
6328
6329         if (reset_phy)
6330                 tg3_phy_reset(tp);
6331
6332         err = tg3_chip_reset(tp);
6333         if (err)
6334                 return err;
6335
6336         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6337
6338         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6339                 val = tr32(TG3_CPMU_CTRL);
6340                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6341                 tw32(TG3_CPMU_CTRL, val);
6342         }
6343
6344         /* This works around an issue with Athlon chipsets on
6345          * B3 tigon3 silicon.  This bit has no effect on any
6346          * other revision.  But do not set this on PCI Express
6347          * chips and don't even touch the clocks if the CPMU is present.
6348          */
6349         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6350                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6351                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6352                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6353         }
6354
6355         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6356             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6357                 val = tr32(TG3PCI_PCISTATE);
6358                 val |= PCISTATE_RETRY_SAME_DMA;
6359                 tw32(TG3PCI_PCISTATE, val);
6360         }
6361
6362         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6363                 /* Allow reads and writes to the
6364                  * APE register and memory space.
6365                  */
6366                 val = tr32(TG3PCI_PCISTATE);
6367                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6368                        PCISTATE_ALLOW_APE_SHMEM_WR;
6369                 tw32(TG3PCI_PCISTATE, val);
6370         }
6371
6372         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6373                 /* Enable some hw fixes.  */
6374                 val = tr32(TG3PCI_MSI_DATA);
6375                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6376                 tw32(TG3PCI_MSI_DATA, val);
6377         }
6378
6379         /* Descriptor ring init may make accesses to the
6380          * NIC SRAM area to setup the TX descriptors, so we
6381          * can only do this after the hardware has been
6382          * successfully reset.
6383          */
6384         err = tg3_init_rings(tp);
6385         if (err)
6386                 return err;
6387
6388         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6389             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6390                 /* This value is determined during the probe time DMA
6391                  * engine test, tg3_test_dma.
6392                  */
6393                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6394         }
6395
6396         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6397                           GRC_MODE_4X_NIC_SEND_RINGS |
6398                           GRC_MODE_NO_TX_PHDR_CSUM |
6399                           GRC_MODE_NO_RX_PHDR_CSUM);
6400         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6401
6402         /* Pseudo-header checksum is done by hardware logic and not
6403          * the offload processers, so make the chip do the pseudo-
6404          * header checksums on receive.  For transmit it is more
6405          * convenient to do the pseudo-header checksum in software
6406          * as Linux does that on transmit for us in all cases.
6407          */
6408         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6409
6410         tw32(GRC_MODE,
6411              tp->grc_mode |
6412              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6413
6414         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6415         val = tr32(GRC_MISC_CFG);
6416         val &= ~0xff;
6417         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6418         tw32(GRC_MISC_CFG, val);
6419
6420         /* Initialize MBUF/DESC pool. */
6421         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6422                 /* Do nothing.  */
6423         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6424                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6425                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6426                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6427                 else
6428                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6429                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6430                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6431         }
6432         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6433                 int fw_len;
6434
6435                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6436                           TG3_TSO5_FW_RODATA_LEN +
6437                           TG3_TSO5_FW_DATA_LEN +
6438                           TG3_TSO5_FW_SBSS_LEN +
6439                           TG3_TSO5_FW_BSS_LEN);
6440                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6441                 tw32(BUFMGR_MB_POOL_ADDR,
6442                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6443                 tw32(BUFMGR_MB_POOL_SIZE,
6444                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6445         }
6446
6447         if (tp->dev->mtu <= ETH_DATA_LEN) {
6448                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6449                      tp->bufmgr_config.mbuf_read_dma_low_water);
6450                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6451                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6452                 tw32(BUFMGR_MB_HIGH_WATER,
6453                      tp->bufmgr_config.mbuf_high_water);
6454         } else {
6455                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6456                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6457                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6458                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6459                 tw32(BUFMGR_MB_HIGH_WATER,
6460                      tp->bufmgr_config.mbuf_high_water_jumbo);
6461         }
6462         tw32(BUFMGR_DMA_LOW_WATER,
6463              tp->bufmgr_config.dma_low_water);
6464         tw32(BUFMGR_DMA_HIGH_WATER,
6465              tp->bufmgr_config.dma_high_water);
6466
6467         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6468         for (i = 0; i < 2000; i++) {
6469                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6470                         break;
6471                 udelay(10);
6472         }
6473         if (i >= 2000) {
6474                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6475                        tp->dev->name);
6476                 return -ENODEV;
6477         }
6478
6479         /* Setup replenish threshold. */
6480         val = tp->rx_pending / 8;
6481         if (val == 0)
6482                 val = 1;
6483         else if (val > tp->rx_std_max_post)
6484                 val = tp->rx_std_max_post;
6485         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6486                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6487                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6488
6489                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6490                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6491         }
6492
6493         tw32(RCVBDI_STD_THRESH, val);
6494
6495         /* Initialize TG3_BDINFO's at:
6496          *  RCVDBDI_STD_BD:     standard eth size rx ring
6497          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6498          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6499          *
6500          * like so:
6501          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6502          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6503          *                              ring attribute flags
6504          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6505          *
6506          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6507          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6508          *
6509          * The size of each ring is fixed in the firmware, but the location is
6510          * configurable.
6511          */
6512         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6513              ((u64) tp->rx_std_mapping >> 32));
6514         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6515              ((u64) tp->rx_std_mapping & 0xffffffff));
6516         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6517              NIC_SRAM_RX_BUFFER_DESC);
6518
6519         /* Don't even try to program the JUMBO/MINI buffer descriptor
6520          * configs on 5705.
6521          */
6522         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6523                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6524                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6525         } else {
6526                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6527                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6528
6529                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6530                      BDINFO_FLAGS_DISABLED);
6531
6532                 /* Setup replenish threshold. */
6533                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6534
6535                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6536                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6537                              ((u64) tp->rx_jumbo_mapping >> 32));
6538                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6539                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6540                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6541                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6542                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6543                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6544                 } else {
6545                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6546                              BDINFO_FLAGS_DISABLED);
6547                 }
6548
6549         }
6550
6551         /* There is only one send ring on 5705/5750, no need to explicitly
6552          * disable the others.
6553          */
6554         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6555                 /* Clear out send RCB ring in SRAM. */
6556                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6557                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6558                                       BDINFO_FLAGS_DISABLED);
6559         }
6560
6561         tp->tx_prod = 0;
6562         tp->tx_cons = 0;
6563         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6564         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6565
6566         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6567                        tp->tx_desc_mapping,
6568                        (TG3_TX_RING_SIZE <<
6569                         BDINFO_FLAGS_MAXLEN_SHIFT),
6570                        NIC_SRAM_TX_BUFFER_DESC);
6571
6572         /* There is only one receive return ring on 5705/5750, no need
6573          * to explicitly disable the others.
6574          */
6575         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6576                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6577                      i += TG3_BDINFO_SIZE) {
6578                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6579                                       BDINFO_FLAGS_DISABLED);
6580                 }
6581         }
6582
6583         tp->rx_rcb_ptr = 0;
6584         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6585
6586         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6587                        tp->rx_rcb_mapping,
6588                        (TG3_RX_RCB_RING_SIZE(tp) <<
6589                         BDINFO_FLAGS_MAXLEN_SHIFT),
6590                        0);
6591
6592         tp->rx_std_ptr = tp->rx_pending;
6593         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6594                      tp->rx_std_ptr);
6595
6596         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6597                                                 tp->rx_jumbo_pending : 0;
6598         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6599                      tp->rx_jumbo_ptr);
6600
6601         /* Initialize MAC address and backoff seed. */
6602         __tg3_set_mac_addr(tp, 0);
6603
6604         /* MTU + ethernet header + FCS + optional VLAN tag */
6605         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6606
6607         /* The slot time is changed by tg3_setup_phy if we
6608          * run at gigabit with half duplex.
6609          */
6610         tw32(MAC_TX_LENGTHS,
6611              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6612              (6 << TX_LENGTHS_IPG_SHIFT) |
6613              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6614
6615         /* Receive rules. */
6616         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6617         tw32(RCVLPC_CONFIG, 0x0181);
6618
6619         /* Calculate RDMAC_MODE setting early, we need it to determine
6620          * the RCVLPC_STATE_ENABLE mask.
6621          */
6622         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6623                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6624                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6625                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6626                       RDMAC_MODE_LNGREAD_ENAB);
6627
6628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6629                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6630                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6631                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6632
6633         /* If statement applies to 5705 and 5750 PCI devices only */
6634         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6635              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6636             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6637                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6638                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6639                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6640                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6641                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6642                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6643                 }
6644         }
6645
6646         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6647                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6648
6649         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6650                 rdmac_mode |= (1 << 27);
6651
6652         /* Receive/send statistics. */
6653         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6654                 val = tr32(RCVLPC_STATS_ENABLE);
6655                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6656                 tw32(RCVLPC_STATS_ENABLE, val);
6657         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6658                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6659                 val = tr32(RCVLPC_STATS_ENABLE);
6660                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6661                 tw32(RCVLPC_STATS_ENABLE, val);
6662         } else {
6663                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6664         }
6665         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6666         tw32(SNDDATAI_STATSENAB, 0xffffff);
6667         tw32(SNDDATAI_STATSCTRL,
6668              (SNDDATAI_SCTRL_ENABLE |
6669               SNDDATAI_SCTRL_FASTUPD));
6670
6671         /* Setup host coalescing engine. */
6672         tw32(HOSTCC_MODE, 0);
6673         for (i = 0; i < 2000; i++) {
6674                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6675                         break;
6676                 udelay(10);
6677         }
6678
6679         __tg3_set_coalesce(tp, &tp->coal);
6680
6681         /* set status block DMA address */
6682         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6683              ((u64) tp->status_mapping >> 32));
6684         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6685              ((u64) tp->status_mapping & 0xffffffff));
6686
6687         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6688                 /* Status/statistics block address.  See tg3_timer,
6689                  * the tg3_periodic_fetch_stats call there, and
6690                  * tg3_get_stats to see how this works for 5705/5750 chips.
6691                  */
6692                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6693                      ((u64) tp->stats_mapping >> 32));
6694                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6695                      ((u64) tp->stats_mapping & 0xffffffff));
6696                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6697                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6698         }
6699
6700         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6701
6702         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6703         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6704         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6705                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6706
6707         /* Clear statistics/status block in chip, and status block in ram. */
6708         for (i = NIC_SRAM_STATS_BLK;
6709              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6710              i += sizeof(u32)) {
6711                 tg3_write_mem(tp, i, 0);
6712                 udelay(40);
6713         }
6714         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6715
6716         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6717                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6718                 /* reset to prevent losing 1st rx packet intermittently */
6719                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6720                 udelay(10);
6721         }
6722
6723         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6724                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6725         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6726             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6727             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6728                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6729         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6730         udelay(40);
6731
6732         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6733          * If TG3_FLG2_IS_NIC is zero, we should read the
6734          * register to preserve the GPIO settings for LOMs. The GPIOs,
6735          * whether used as inputs or outputs, are set by boot code after
6736          * reset.
6737          */
6738         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6739                 u32 gpio_mask;
6740
6741                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6742                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6743                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6744
6745                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6746                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6747                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6748
6749                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6750                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6751
6752                 tp->grc_local_ctrl &= ~gpio_mask;
6753                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6754
6755                 /* GPIO1 must be driven high for eeprom write protect */
6756                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6757                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6758                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6759         }
6760         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6761         udelay(100);
6762
6763         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6764         tp->last_tag = 0;
6765
6766         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6767                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6768                 udelay(40);
6769         }
6770
6771         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6772                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6773                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6774                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6775                WDMAC_MODE_LNGREAD_ENAB);
6776
6777         /* If statement applies to 5705 and 5750 PCI devices only */
6778         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6779              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6781                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6782                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6783                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6784                         /* nothing */
6785                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6786                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6787                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6788                         val |= WDMAC_MODE_RX_ACCEL;
6789                 }
6790         }
6791
6792         /* Enable host coalescing bug fix */
6793         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6794             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6795             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6797                 val |= (1 << 29);
6798
6799         tw32_f(WDMAC_MODE, val);
6800         udelay(40);
6801
6802         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6803                 u16 pcix_cmd;
6804
6805                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6806                                      &pcix_cmd);
6807                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6808                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6809                         pcix_cmd |= PCI_X_CMD_READ_2K;
6810                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6811                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6812                         pcix_cmd |= PCI_X_CMD_READ_2K;
6813                 }
6814                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6815                                       pcix_cmd);
6816         }
6817
6818         tw32_f(RDMAC_MODE, rdmac_mode);
6819         udelay(40);
6820
6821         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6822         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6823                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6824
6825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6826                 tw32(SNDDATAC_MODE,
6827                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6828         else
6829                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6830
6831         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6832         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6833         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6834         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6835         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6836                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6837         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6838         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6839
6840         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6841                 err = tg3_load_5701_a0_firmware_fix(tp);
6842                 if (err)
6843                         return err;
6844         }
6845
6846         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6847                 err = tg3_load_tso_firmware(tp);
6848                 if (err)
6849                         return err;
6850         }
6851
6852         tp->tx_mode = TX_MODE_ENABLE;
6853         tw32_f(MAC_TX_MODE, tp->tx_mode);
6854         udelay(100);
6855
6856         tp->rx_mode = RX_MODE_ENABLE;
6857         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6859                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6860
6861         tw32_f(MAC_RX_MODE, tp->rx_mode);
6862         udelay(10);
6863
6864         if (tp->link_config.phy_is_low_power) {
6865                 tp->link_config.phy_is_low_power = 0;
6866                 tp->link_config.speed = tp->link_config.orig_speed;
6867                 tp->link_config.duplex = tp->link_config.orig_duplex;
6868                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6869         }
6870
6871         tp->mi_mode = MAC_MI_MODE_BASE;
6872         tw32_f(MAC_MI_MODE, tp->mi_mode);
6873         udelay(80);
6874
6875         tw32(MAC_LED_CTRL, tp->led_ctrl);
6876
6877         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6878         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6879                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6880                 udelay(10);
6881         }
6882         tw32_f(MAC_RX_MODE, tp->rx_mode);
6883         udelay(10);
6884
6885         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6886                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6887                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6888                         /* Set drive transmission level to 1.2V  */
6889                         /* only if the signal pre-emphasis bit is not set  */
6890                         val = tr32(MAC_SERDES_CFG);
6891                         val &= 0xfffff000;
6892                         val |= 0x880;
6893                         tw32(MAC_SERDES_CFG, val);
6894                 }
6895                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6896                         tw32(MAC_SERDES_CFG, 0x616000);
6897         }
6898
6899         /* Prevent chip from dropping frames when flow control
6900          * is enabled.
6901          */
6902         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6903
6904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6905             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6906                 /* Use hardware link auto-negotiation */
6907                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6908         }
6909
6910         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6911             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6912                 u32 tmp;
6913
6914                 tmp = tr32(SERDES_RX_CTRL);
6915                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6916                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6917                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6918                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6919         }
6920
6921         err = tg3_setup_phy(tp, 0);
6922         if (err)
6923                 return err;
6924
6925         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6926             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6927                 u32 tmp;
6928
6929                 /* Clear CRC stats. */
6930                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6931                         tg3_writephy(tp, MII_TG3_TEST1,
6932                                      tmp | MII_TG3_TEST1_CRC_EN);
6933                         tg3_readphy(tp, 0x14, &tmp);
6934                 }
6935         }
6936
6937         __tg3_set_rx_mode(tp->dev);
6938
6939         /* Initialize receive rules. */
6940         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6941         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6942         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6943         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6944
6945         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6946             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6947                 limit = 8;
6948         else
6949                 limit = 16;
6950         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6951                 limit -= 4;
6952         switch (limit) {
6953         case 16:
6954                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6955         case 15:
6956                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6957         case 14:
6958                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6959         case 13:
6960                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6961         case 12:
6962                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6963         case 11:
6964                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6965         case 10:
6966                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6967         case 9:
6968                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6969         case 8:
6970                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6971         case 7:
6972                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6973         case 6:
6974                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6975         case 5:
6976                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6977         case 4:
6978                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6979         case 3:
6980                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6981         case 2:
6982         case 1:
6983
6984         default:
6985                 break;
6986         };
6987
6988         /* Write our heartbeat update interval to APE. */
6989         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6990                         APE_HOST_HEARTBEAT_INT_DISABLE);
6991
6992         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6993
6994         return 0;
6995 }
6996
6997 /* Called at device open time to get the chip ready for
6998  * packet processing.  Invoked with tp->lock held.
6999  */
7000 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7001 {
7002         int err;
7003
7004         /* Force the chip into D0. */
7005         err = tg3_set_power_state(tp, PCI_D0);
7006         if (err)
7007                 goto out;
7008
7009         tg3_switch_clocks(tp);
7010
7011         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7012
7013         err = tg3_reset_hw(tp, reset_phy);
7014
7015 out:
7016         return err;
7017 }
7018
7019 #define TG3_STAT_ADD32(PSTAT, REG) \
7020 do {    u32 __val = tr32(REG); \
7021         (PSTAT)->low += __val; \
7022         if ((PSTAT)->low < __val) \
7023                 (PSTAT)->high += 1; \
7024 } while (0)
7025
7026 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7027 {
7028         struct tg3_hw_stats *sp = tp->hw_stats;
7029
7030         if (!netif_carrier_ok(tp->dev))
7031                 return;
7032
7033         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7034         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7035         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7036         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7037         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7038         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7039         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7040         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7041         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7042         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7043         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7044         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7045         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7046
7047         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7048         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7049         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7050         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7051         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7052         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7053         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7054         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7055         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7056         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7057         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7058         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7059         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7060         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7061
7062         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7063         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7064         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7065 }
7066
7067 static void tg3_timer(unsigned long __opaque)
7068 {
7069         struct tg3 *tp = (struct tg3 *) __opaque;
7070
7071         if (tp->irq_sync)
7072                 goto restart_timer;
7073
7074         spin_lock(&tp->lock);
7075
7076         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7077                 /* All of this garbage is because when using non-tagged
7078                  * IRQ status the mailbox/status_block protocol the chip
7079                  * uses with the cpu is race prone.
7080                  */
7081                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7082                         tw32(GRC_LOCAL_CTRL,
7083                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7084                 } else {
7085                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7086                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7087                 }
7088
7089                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7090                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7091                         spin_unlock(&tp->lock);
7092                         schedule_work(&tp->reset_task);
7093                         return;
7094                 }
7095         }
7096
7097         /* This part only runs once per second. */
7098         if (!--tp->timer_counter) {
7099                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7100                         tg3_periodic_fetch_stats(tp);
7101
7102                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7103                         u32 mac_stat;
7104                         int phy_event;
7105
7106                         mac_stat = tr32(MAC_STATUS);
7107
7108                         phy_event = 0;
7109                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7110                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7111                                         phy_event = 1;
7112                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7113                                 phy_event = 1;
7114
7115                         if (phy_event)
7116                                 tg3_setup_phy(tp, 0);
7117                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7118                         u32 mac_stat = tr32(MAC_STATUS);
7119                         int need_setup = 0;
7120
7121                         if (netif_carrier_ok(tp->dev) &&
7122                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7123                                 need_setup = 1;
7124                         }
7125                         if (! netif_carrier_ok(tp->dev) &&
7126                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7127                                          MAC_STATUS_SIGNAL_DET))) {
7128                                 need_setup = 1;
7129                         }
7130                         if (need_setup) {
7131                                 if (!tp->serdes_counter) {
7132                                         tw32_f(MAC_MODE,
7133                                              (tp->mac_mode &
7134                                               ~MAC_MODE_PORT_MODE_MASK));
7135                                         udelay(40);
7136                                         tw32_f(MAC_MODE, tp->mac_mode);
7137                                         udelay(40);
7138                                 }
7139                                 tg3_setup_phy(tp, 0);
7140                         }
7141                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7142                         tg3_serdes_parallel_detect(tp);
7143
7144                 tp->timer_counter = tp->timer_multiplier;
7145         }
7146
7147         /* Heartbeat is only sent once every 2 seconds.
7148          *
7149          * The heartbeat is to tell the ASF firmware that the host
7150          * driver is still alive.  In the event that the OS crashes,
7151          * ASF needs to reset the hardware to free up the FIFO space
7152          * that may be filled with rx packets destined for the host.
7153          * If the FIFO is full, ASF will no longer function properly.
7154          *
7155          * Unintended resets have been reported on real time kernels
7156          * where the timer doesn't run on time.  Netpoll will also have
7157          * same problem.
7158          *
7159          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7160          * to check the ring condition when the heartbeat is expiring
7161          * before doing the reset.  This will prevent most unintended
7162          * resets.
7163          */
7164         if (!--tp->asf_counter) {
7165                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7166                         u32 val;
7167
7168                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7169                                       FWCMD_NICDRV_ALIVE3);
7170                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7171                         /* 5 seconds timeout */
7172                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7173                         val = tr32(GRC_RX_CPU_EVENT);
7174                         val |= (1 << 14);
7175                         tw32(GRC_RX_CPU_EVENT, val);
7176                 }
7177                 tp->asf_counter = tp->asf_multiplier;
7178         }
7179
7180         spin_unlock(&tp->lock);
7181
7182 restart_timer:
7183         tp->timer.expires = jiffies + tp->timer_offset;
7184         add_timer(&tp->timer);
7185 }
7186
7187 static int tg3_request_irq(struct tg3 *tp)
7188 {
7189         irq_handler_t fn;
7190         unsigned long flags;
7191         struct net_device *dev = tp->dev;
7192
7193         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7194                 fn = tg3_msi;
7195                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7196                         fn = tg3_msi_1shot;
7197                 flags = IRQF_SAMPLE_RANDOM;
7198         } else {
7199                 fn = tg3_interrupt;
7200                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7201                         fn = tg3_interrupt_tagged;
7202                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7203         }
7204         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7205 }
7206
7207 static int tg3_test_interrupt(struct tg3 *tp)
7208 {
7209         struct net_device *dev = tp->dev;
7210         int err, i, intr_ok = 0;
7211
7212         if (!netif_running(dev))
7213                 return -ENODEV;
7214
7215         tg3_disable_ints(tp);
7216
7217         free_irq(tp->pdev->irq, dev);
7218
7219         err = request_irq(tp->pdev->irq, tg3_test_isr,
7220                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7221         if (err)
7222                 return err;
7223
7224         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7225         tg3_enable_ints(tp);
7226
7227         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7228                HOSTCC_MODE_NOW);
7229
7230         for (i = 0; i < 5; i++) {
7231                 u32 int_mbox, misc_host_ctrl;
7232
7233                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7234                                         TG3_64BIT_REG_LOW);
7235                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7236
7237                 if ((int_mbox != 0) ||
7238                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7239                         intr_ok = 1;
7240                         break;
7241                 }
7242
7243                 msleep(10);
7244         }
7245
7246         tg3_disable_ints(tp);
7247
7248         free_irq(tp->pdev->irq, dev);
7249
7250         err = tg3_request_irq(tp);
7251
7252         if (err)
7253                 return err;
7254
7255         if (intr_ok)
7256                 return 0;
7257
7258         return -EIO;
7259 }
7260
7261 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7262  * successfully restored
7263  */
7264 static int tg3_test_msi(struct tg3 *tp)
7265 {
7266         struct net_device *dev = tp->dev;
7267         int err;
7268         u16 pci_cmd;
7269
7270         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7271                 return 0;
7272
7273         /* Turn off SERR reporting in case MSI terminates with Master
7274          * Abort.
7275          */
7276         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7277         pci_write_config_word(tp->pdev, PCI_COMMAND,
7278                               pci_cmd & ~PCI_COMMAND_SERR);
7279
7280         err = tg3_test_interrupt(tp);
7281
7282         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7283
7284         if (!err)
7285                 return 0;
7286
7287         /* other failures */
7288         if (err != -EIO)
7289                 return err;
7290
7291         /* MSI test failed, go back to INTx mode */
7292         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7293                "switching to INTx mode. Please report this failure to "
7294                "the PCI maintainer and include system chipset information.\n",
7295                        tp->dev->name);
7296
7297         free_irq(tp->pdev->irq, dev);
7298         pci_disable_msi(tp->pdev);
7299
7300         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7301
7302         err = tg3_request_irq(tp);
7303         if (err)
7304                 return err;
7305
7306         /* Need to reset the chip because the MSI cycle may have terminated
7307          * with Master Abort.
7308          */
7309         tg3_full_lock(tp, 1);
7310
7311         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7312         err = tg3_init_hw(tp, 1);
7313
7314         tg3_full_unlock(tp);
7315
7316         if (err)
7317                 free_irq(tp->pdev->irq, dev);
7318
7319         return err;
7320 }
7321
7322 static int tg3_open(struct net_device *dev)
7323 {
7324         struct tg3 *tp = netdev_priv(dev);
7325         int err;
7326
7327         netif_carrier_off(tp->dev);
7328
7329         tg3_full_lock(tp, 0);
7330
7331         err = tg3_set_power_state(tp, PCI_D0);
7332         if (err) {
7333                 tg3_full_unlock(tp);
7334                 return err;
7335         }
7336
7337         tg3_disable_ints(tp);
7338         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7339
7340         tg3_full_unlock(tp);
7341
7342         /* The placement of this call is tied
7343          * to the setup and use of Host TX descriptors.
7344          */
7345         err = tg3_alloc_consistent(tp);
7346         if (err)
7347                 return err;
7348
7349         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7350                 /* All MSI supporting chips should support tagged
7351                  * status.  Assert that this is the case.
7352                  */
7353                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7354                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7355                                "Not using MSI.\n", tp->dev->name);
7356                 } else if (pci_enable_msi(tp->pdev) == 0) {
7357                         u32 msi_mode;
7358
7359                         /* Hardware bug - MSI won't work if INTX disabled. */
7360                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7361                                 pci_intx(tp->pdev, 1);
7362
7363                         msi_mode = tr32(MSGINT_MODE);
7364                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7365                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7366                 }
7367         }
7368         err = tg3_request_irq(tp);
7369
7370         if (err) {
7371                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7372                         pci_disable_msi(tp->pdev);
7373                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7374                 }
7375                 tg3_free_consistent(tp);
7376                 return err;
7377         }
7378
7379         napi_enable(&tp->napi);
7380
7381         tg3_full_lock(tp, 0);
7382
7383         err = tg3_init_hw(tp, 1);
7384         if (err) {
7385                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7386                 tg3_free_rings(tp);
7387         } else {
7388                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7389                         tp->timer_offset = HZ;
7390                 else
7391                         tp->timer_offset = HZ / 10;
7392
7393                 BUG_ON(tp->timer_offset > HZ);
7394                 tp->timer_counter = tp->timer_multiplier =
7395                         (HZ / tp->timer_offset);
7396                 tp->asf_counter = tp->asf_multiplier =
7397                         ((HZ / tp->timer_offset) * 2);
7398
7399                 init_timer(&tp->timer);
7400                 tp->timer.expires = jiffies + tp->timer_offset;
7401                 tp->timer.data = (unsigned long) tp;
7402                 tp->timer.function = tg3_timer;
7403         }
7404
7405         tg3_full_unlock(tp);
7406
7407         if (err) {
7408                 napi_disable(&tp->napi);
7409                 free_irq(tp->pdev->irq, dev);
7410                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7411                         pci_disable_msi(tp->pdev);
7412                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7413                 }
7414                 tg3_free_consistent(tp);
7415                 return err;
7416         }
7417
7418         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7419                 err = tg3_test_msi(tp);
7420
7421                 if (err) {
7422                         tg3_full_lock(tp, 0);
7423
7424                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7425                                 pci_disable_msi(tp->pdev);
7426                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7427                         }
7428                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7429                         tg3_free_rings(tp);
7430                         tg3_free_consistent(tp);
7431
7432                         tg3_full_unlock(tp);
7433
7434                         napi_disable(&tp->napi);
7435
7436                         return err;
7437                 }
7438
7439                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7440                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7441                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7442
7443                                 tw32(PCIE_TRANSACTION_CFG,
7444                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7445                         }
7446                 }
7447         }
7448
7449         tg3_full_lock(tp, 0);
7450
7451         add_timer(&tp->timer);
7452         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7453         tg3_enable_ints(tp);
7454
7455         tg3_full_unlock(tp);
7456
7457         netif_start_queue(dev);
7458
7459         return 0;
7460 }
7461
7462 #if 0
7463 /*static*/ void tg3_dump_state(struct tg3 *tp)
7464 {
7465         u32 val32, val32_2, val32_3, val32_4, val32_5;
7466         u16 val16;
7467         int i;
7468
7469         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7470         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7471         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7472                val16, val32);
7473
7474         /* MAC block */
7475         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7476                tr32(MAC_MODE), tr32(MAC_STATUS));
7477         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7478                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7479         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7480                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7481         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7482                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7483
7484         /* Send data initiator control block */
7485         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7486                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7487         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7488                tr32(SNDDATAI_STATSCTRL));
7489
7490         /* Send data completion control block */
7491         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7492
7493         /* Send BD ring selector block */
7494         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7495                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7496
7497         /* Send BD initiator control block */
7498         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7499                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7500
7501         /* Send BD completion control block */
7502         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7503
7504         /* Receive list placement control block */
7505         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7506                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7507         printk("       RCVLPC_STATSCTRL[%08x]\n",
7508                tr32(RCVLPC_STATSCTRL));
7509
7510         /* Receive data and receive BD initiator control block */
7511         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7512                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7513
7514         /* Receive data completion control block */
7515         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7516                tr32(RCVDCC_MODE));
7517
7518         /* Receive BD initiator control block */
7519         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7520                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7521
7522         /* Receive BD completion control block */
7523         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7524                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7525
7526         /* Receive list selector control block */
7527         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7528                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7529
7530         /* Mbuf cluster free block */
7531         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7532                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7533
7534         /* Host coalescing control block */
7535         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7536                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7537         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7538                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7539                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7540         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7541                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7542                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7543         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7544                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7545         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7546                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7547
7548         /* Memory arbiter control block */
7549         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7550                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7551
7552         /* Buffer manager control block */
7553         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7554                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7555         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7556                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7557         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7558                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7559                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7560                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7561
7562         /* Read DMA control block */
7563         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7564                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7565
7566         /* Write DMA control block */
7567         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7568                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7569
7570         /* DMA completion block */
7571         printk("DEBUG: DMAC_MODE[%08x]\n",
7572                tr32(DMAC_MODE));
7573
7574         /* GRC block */
7575         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7576                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7577         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7578                tr32(GRC_LOCAL_CTRL));
7579
7580         /* TG3_BDINFOs */
7581         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7582                tr32(RCVDBDI_JUMBO_BD + 0x0),
7583                tr32(RCVDBDI_JUMBO_BD + 0x4),
7584                tr32(RCVDBDI_JUMBO_BD + 0x8),
7585                tr32(RCVDBDI_JUMBO_BD + 0xc));
7586         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7587                tr32(RCVDBDI_STD_BD + 0x0),
7588                tr32(RCVDBDI_STD_BD + 0x4),
7589                tr32(RCVDBDI_STD_BD + 0x8),
7590                tr32(RCVDBDI_STD_BD + 0xc));
7591         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7592                tr32(RCVDBDI_MINI_BD + 0x0),
7593                tr32(RCVDBDI_MINI_BD + 0x4),
7594                tr32(RCVDBDI_MINI_BD + 0x8),
7595                tr32(RCVDBDI_MINI_BD + 0xc));
7596
7597         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7598         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7599         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7600         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7601         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7602                val32, val32_2, val32_3, val32_4);
7603
7604         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7605         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7606         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7607         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7608         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7609                val32, val32_2, val32_3, val32_4);
7610
7611         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7612         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7613         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7614         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7615         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7616         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7617                val32, val32_2, val32_3, val32_4, val32_5);
7618
7619         /* SW status block */
7620         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7621                tp->hw_status->status,
7622                tp->hw_status->status_tag,
7623                tp->hw_status->rx_jumbo_consumer,
7624                tp->hw_status->rx_consumer,
7625                tp->hw_status->rx_mini_consumer,
7626                tp->hw_status->idx[0].rx_producer,
7627                tp->hw_status->idx[0].tx_consumer);
7628
7629         /* SW statistics block */
7630         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7631                ((u32 *)tp->hw_stats)[0],
7632                ((u32 *)tp->hw_stats)[1],
7633                ((u32 *)tp->hw_stats)[2],
7634                ((u32 *)tp->hw_stats)[3]);
7635
7636         /* Mailboxes */
7637         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7638                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7639                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7640                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7641                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7642
7643         /* NIC side send descriptors. */
7644         for (i = 0; i < 6; i++) {
7645                 unsigned long txd;
7646
7647                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7648                         + (i * sizeof(struct tg3_tx_buffer_desc));
7649                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7650                        i,
7651                        readl(txd + 0x0), readl(txd + 0x4),
7652                        readl(txd + 0x8), readl(txd + 0xc));
7653         }
7654
7655         /* NIC side RX descriptors. */
7656         for (i = 0; i < 6; i++) {
7657                 unsigned long rxd;
7658
7659                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7660                         + (i * sizeof(struct tg3_rx_buffer_desc));
7661                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7662                        i,
7663                        readl(rxd + 0x0), readl(rxd + 0x4),
7664                        readl(rxd + 0x8), readl(rxd + 0xc));
7665                 rxd += (4 * sizeof(u32));
7666                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7667                        i,
7668                        readl(rxd + 0x0), readl(rxd + 0x4),
7669                        readl(rxd + 0x8), readl(rxd + 0xc));
7670         }
7671
7672         for (i = 0; i < 6; i++) {
7673                 unsigned long rxd;
7674
7675                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7676                         + (i * sizeof(struct tg3_rx_buffer_desc));
7677                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7678                        i,
7679                        readl(rxd + 0x0), readl(rxd + 0x4),
7680                        readl(rxd + 0x8), readl(rxd + 0xc));
7681                 rxd += (4 * sizeof(u32));
7682                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7683                        i,
7684                        readl(rxd + 0x0), readl(rxd + 0x4),
7685                        readl(rxd + 0x8), readl(rxd + 0xc));
7686         }
7687 }
7688 #endif
7689
7690 static struct net_device_stats *tg3_get_stats(struct net_device *);
7691 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7692
7693 static int tg3_close(struct net_device *dev)
7694 {
7695         struct tg3 *tp = netdev_priv(dev);
7696
7697         napi_disable(&tp->napi);
7698         cancel_work_sync(&tp->reset_task);
7699
7700         netif_stop_queue(dev);
7701
7702         del_timer_sync(&tp->timer);
7703
7704         tg3_full_lock(tp, 1);
7705 #if 0
7706         tg3_dump_state(tp);
7707 #endif
7708
7709         tg3_disable_ints(tp);
7710
7711         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7712         tg3_free_rings(tp);
7713         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7714
7715         tg3_full_unlock(tp);
7716
7717         free_irq(tp->pdev->irq, dev);
7718         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7719                 pci_disable_msi(tp->pdev);
7720                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7721         }
7722
7723         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7724                sizeof(tp->net_stats_prev));
7725         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7726                sizeof(tp->estats_prev));
7727
7728         tg3_free_consistent(tp);
7729
7730         tg3_set_power_state(tp, PCI_D3hot);
7731
7732         netif_carrier_off(tp->dev);
7733
7734         return 0;
7735 }
7736
7737 static inline unsigned long get_stat64(tg3_stat64_t *val)
7738 {
7739         unsigned long ret;
7740
7741 #if (BITS_PER_LONG == 32)
7742         ret = val->low;
7743 #else
7744         ret = ((u64)val->high << 32) | ((u64)val->low);
7745 #endif
7746         return ret;
7747 }
7748
7749 static unsigned long calc_crc_errors(struct tg3 *tp)
7750 {
7751         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7752
7753         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7754             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7755              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7756                 u32 val;
7757
7758                 spin_lock_bh(&tp->lock);
7759                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7760                         tg3_writephy(tp, MII_TG3_TEST1,
7761                                      val | MII_TG3_TEST1_CRC_EN);
7762                         tg3_readphy(tp, 0x14, &val);
7763                 } else
7764                         val = 0;
7765                 spin_unlock_bh(&tp->lock);
7766
7767                 tp->phy_crc_errors += val;
7768
7769                 return tp->phy_crc_errors;
7770         }
7771
7772         return get_stat64(&hw_stats->rx_fcs_errors);
7773 }
7774
7775 #define ESTAT_ADD(member) \
7776         estats->member =        old_estats->member + \
7777                                 get_stat64(&hw_stats->member)
7778
7779 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7780 {
7781         struct tg3_ethtool_stats *estats = &tp->estats;
7782         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7783         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7784
7785         if (!hw_stats)
7786                 return old_estats;
7787
7788         ESTAT_ADD(rx_octets);
7789         ESTAT_ADD(rx_fragments);
7790         ESTAT_ADD(rx_ucast_packets);
7791         ESTAT_ADD(rx_mcast_packets);
7792         ESTAT_ADD(rx_bcast_packets);
7793         ESTAT_ADD(rx_fcs_errors);
7794         ESTAT_ADD(rx_align_errors);
7795         ESTAT_ADD(rx_xon_pause_rcvd);
7796         ESTAT_ADD(rx_xoff_pause_rcvd);
7797         ESTAT_ADD(rx_mac_ctrl_rcvd);
7798         ESTAT_ADD(rx_xoff_entered);
7799         ESTAT_ADD(rx_frame_too_long_errors);
7800         ESTAT_ADD(rx_jabbers);
7801         ESTAT_ADD(rx_undersize_packets);
7802         ESTAT_ADD(rx_in_length_errors);
7803         ESTAT_ADD(rx_out_length_errors);
7804         ESTAT_ADD(rx_64_or_less_octet_packets);
7805         ESTAT_ADD(rx_65_to_127_octet_packets);
7806         ESTAT_ADD(rx_128_to_255_octet_packets);
7807         ESTAT_ADD(rx_256_to_511_octet_packets);
7808         ESTAT_ADD(rx_512_to_1023_octet_packets);
7809         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7810         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7811         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7812         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7813         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7814
7815         ESTAT_ADD(tx_octets);
7816         ESTAT_ADD(tx_collisions);
7817         ESTAT_ADD(tx_xon_sent);
7818         ESTAT_ADD(tx_xoff_sent);
7819         ESTAT_ADD(tx_flow_control);
7820         ESTAT_ADD(tx_mac_errors);
7821         ESTAT_ADD(tx_single_collisions);
7822         ESTAT_ADD(tx_mult_collisions);
7823         ESTAT_ADD(tx_deferred);
7824         ESTAT_ADD(tx_excessive_collisions);
7825         ESTAT_ADD(tx_late_collisions);
7826         ESTAT_ADD(tx_collide_2times);
7827         ESTAT_ADD(tx_collide_3times);
7828         ESTAT_ADD(tx_collide_4times);
7829         ESTAT_ADD(tx_collide_5times);
7830         ESTAT_ADD(tx_collide_6times);
7831         ESTAT_ADD(tx_collide_7times);
7832         ESTAT_ADD(tx_collide_8times);
7833         ESTAT_ADD(tx_collide_9times);
7834         ESTAT_ADD(tx_collide_10times);
7835         ESTAT_ADD(tx_collide_11times);
7836         ESTAT_ADD(tx_collide_12times);
7837         ESTAT_ADD(tx_collide_13times);
7838         ESTAT_ADD(tx_collide_14times);
7839         ESTAT_ADD(tx_collide_15times);
7840         ESTAT_ADD(tx_ucast_packets);
7841         ESTAT_ADD(tx_mcast_packets);
7842         ESTAT_ADD(tx_bcast_packets);
7843         ESTAT_ADD(tx_carrier_sense_errors);
7844         ESTAT_ADD(tx_discards);
7845         ESTAT_ADD(tx_errors);
7846
7847         ESTAT_ADD(dma_writeq_full);
7848         ESTAT_ADD(dma_write_prioq_full);
7849         ESTAT_ADD(rxbds_empty);
7850         ESTAT_ADD(rx_discards);
7851         ESTAT_ADD(rx_errors);
7852         ESTAT_ADD(rx_threshold_hit);
7853
7854         ESTAT_ADD(dma_readq_full);
7855         ESTAT_ADD(dma_read_prioq_full);
7856         ESTAT_ADD(tx_comp_queue_full);
7857
7858         ESTAT_ADD(ring_set_send_prod_index);
7859         ESTAT_ADD(ring_status_update);
7860         ESTAT_ADD(nic_irqs);
7861         ESTAT_ADD(nic_avoided_irqs);
7862         ESTAT_ADD(nic_tx_threshold_hit);
7863
7864         return estats;
7865 }
7866
7867 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7868 {
7869         struct tg3 *tp = netdev_priv(dev);
7870         struct net_device_stats *stats = &tp->net_stats;
7871         struct net_device_stats *old_stats = &tp->net_stats_prev;
7872         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7873
7874         if (!hw_stats)
7875                 return old_stats;
7876
7877         stats->rx_packets = old_stats->rx_packets +
7878                 get_stat64(&hw_stats->rx_ucast_packets) +
7879                 get_stat64(&hw_stats->rx_mcast_packets) +
7880                 get_stat64(&hw_stats->rx_bcast_packets);
7881
7882         stats->tx_packets = old_stats->tx_packets +
7883                 get_stat64(&hw_stats->tx_ucast_packets) +
7884                 get_stat64(&hw_stats->tx_mcast_packets) +
7885                 get_stat64(&hw_stats->tx_bcast_packets);
7886
7887         stats->rx_bytes = old_stats->rx_bytes +
7888                 get_stat64(&hw_stats->rx_octets);
7889         stats->tx_bytes = old_stats->tx_bytes +
7890                 get_stat64(&hw_stats->tx_octets);
7891
7892         stats->rx_errors = old_stats->rx_errors +
7893                 get_stat64(&hw_stats->rx_errors);
7894         stats->tx_errors = old_stats->tx_errors +
7895                 get_stat64(&hw_stats->tx_errors) +
7896                 get_stat64(&hw_stats->tx_mac_errors) +
7897                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7898                 get_stat64(&hw_stats->tx_discards);
7899
7900         stats->multicast = old_stats->multicast +
7901                 get_stat64(&hw_stats->rx_mcast_packets);
7902         stats->collisions = old_stats->collisions +
7903                 get_stat64(&hw_stats->tx_collisions);
7904
7905         stats->rx_length_errors = old_stats->rx_length_errors +
7906                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7907                 get_stat64(&hw_stats->rx_undersize_packets);
7908
7909         stats->rx_over_errors = old_stats->rx_over_errors +
7910                 get_stat64(&hw_stats->rxbds_empty);
7911         stats->rx_frame_errors = old_stats->rx_frame_errors +
7912                 get_stat64(&hw_stats->rx_align_errors);
7913         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7914                 get_stat64(&hw_stats->tx_discards);
7915         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7916                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7917
7918         stats->rx_crc_errors = old_stats->rx_crc_errors +
7919                 calc_crc_errors(tp);
7920
7921         stats->rx_missed_errors = old_stats->rx_missed_errors +
7922                 get_stat64(&hw_stats->rx_discards);
7923
7924         return stats;
7925 }
7926
7927 static inline u32 calc_crc(unsigned char *buf, int len)
7928 {
7929         u32 reg;
7930         u32 tmp;
7931         int j, k;
7932
7933         reg = 0xffffffff;
7934
7935         for (j = 0; j < len; j++) {
7936                 reg ^= buf[j];
7937
7938                 for (k = 0; k < 8; k++) {
7939                         tmp = reg & 0x01;
7940
7941                         reg >>= 1;
7942
7943                         if (tmp) {
7944                                 reg ^= 0xedb88320;
7945                         }
7946                 }
7947         }
7948
7949         return ~reg;
7950 }
7951
7952 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7953 {
7954         /* accept or reject all multicast frames */
7955         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7956         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7957         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7958         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7959 }
7960
7961 static void __tg3_set_rx_mode(struct net_device *dev)
7962 {
7963         struct tg3 *tp = netdev_priv(dev);
7964         u32 rx_mode;
7965
7966         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7967                                   RX_MODE_KEEP_VLAN_TAG);
7968
7969         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7970          * flag clear.
7971          */
7972 #if TG3_VLAN_TAG_USED
7973         if (!tp->vlgrp &&
7974             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7975                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7976 #else
7977         /* By definition, VLAN is disabled always in this
7978          * case.
7979          */
7980         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7981                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7982 #endif
7983
7984         if (dev->flags & IFF_PROMISC) {
7985                 /* Promiscuous mode. */
7986                 rx_mode |= RX_MODE_PROMISC;
7987         } else if (dev->flags & IFF_ALLMULTI) {
7988                 /* Accept all multicast. */
7989                 tg3_set_multi (tp, 1);
7990         } else if (dev->mc_count < 1) {
7991                 /* Reject all multicast. */
7992                 tg3_set_multi (tp, 0);
7993         } else {
7994                 /* Accept one or more multicast(s). */
7995                 struct dev_mc_list *mclist;
7996                 unsigned int i;
7997                 u32 mc_filter[4] = { 0, };
7998                 u32 regidx;
7999                 u32 bit;
8000                 u32 crc;
8001
8002                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8003                      i++, mclist = mclist->next) {
8004
8005                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8006                         bit = ~crc & 0x7f;
8007                         regidx = (bit & 0x60) >> 5;
8008                         bit &= 0x1f;
8009                         mc_filter[regidx] |= (1 << bit);
8010                 }
8011
8012                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8013                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8014                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8015                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8016         }
8017
8018         if (rx_mode != tp->rx_mode) {
8019                 tp->rx_mode = rx_mode;
8020                 tw32_f(MAC_RX_MODE, rx_mode);
8021                 udelay(10);
8022         }
8023 }
8024
8025 static void tg3_set_rx_mode(struct net_device *dev)
8026 {
8027         struct tg3 *tp = netdev_priv(dev);
8028
8029         if (!netif_running(dev))
8030                 return;
8031
8032         tg3_full_lock(tp, 0);
8033         __tg3_set_rx_mode(dev);
8034         tg3_full_unlock(tp);
8035 }
8036
8037 #define TG3_REGDUMP_LEN         (32 * 1024)
8038
8039 static int tg3_get_regs_len(struct net_device *dev)
8040 {
8041         return TG3_REGDUMP_LEN;
8042 }
8043
8044 static void tg3_get_regs(struct net_device *dev,
8045                 struct ethtool_regs *regs, void *_p)
8046 {
8047         u32 *p = _p;
8048         struct tg3 *tp = netdev_priv(dev);
8049         u8 *orig_p = _p;
8050         int i;
8051
8052         regs->version = 0;
8053
8054         memset(p, 0, TG3_REGDUMP_LEN);
8055
8056         if (tp->link_config.phy_is_low_power)
8057                 return;
8058
8059         tg3_full_lock(tp, 0);
8060
8061 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8062 #define GET_REG32_LOOP(base,len)                \
8063 do {    p = (u32 *)(orig_p + (base));           \
8064         for (i = 0; i < len; i += 4)            \
8065                 __GET_REG32((base) + i);        \
8066 } while (0)
8067 #define GET_REG32_1(reg)                        \
8068 do {    p = (u32 *)(orig_p + (reg));            \
8069         __GET_REG32((reg));                     \
8070 } while (0)
8071
8072         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8073         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8074         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8075         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8076         GET_REG32_1(SNDDATAC_MODE);
8077         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8078         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8079         GET_REG32_1(SNDBDC_MODE);
8080         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8081         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8082         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8083         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8084         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8085         GET_REG32_1(RCVDCC_MODE);
8086         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8087         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8088         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8089         GET_REG32_1(MBFREE_MODE);
8090         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8091         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8092         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8093         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8094         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8095         GET_REG32_1(RX_CPU_MODE);
8096         GET_REG32_1(RX_CPU_STATE);
8097         GET_REG32_1(RX_CPU_PGMCTR);
8098         GET_REG32_1(RX_CPU_HWBKPT);
8099         GET_REG32_1(TX_CPU_MODE);
8100         GET_REG32_1(TX_CPU_STATE);
8101         GET_REG32_1(TX_CPU_PGMCTR);
8102         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8103         GET_REG32_LOOP(FTQ_RESET, 0x120);
8104         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8105         GET_REG32_1(DMAC_MODE);
8106         GET_REG32_LOOP(GRC_MODE, 0x4c);
8107         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8108                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8109
8110 #undef __GET_REG32
8111 #undef GET_REG32_LOOP
8112 #undef GET_REG32_1
8113
8114         tg3_full_unlock(tp);
8115 }
8116
8117 static int tg3_get_eeprom_len(struct net_device *dev)
8118 {
8119         struct tg3 *tp = netdev_priv(dev);
8120
8121         return tp->nvram_size;
8122 }
8123
8124 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8125 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8126
8127 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8128 {
8129         struct tg3 *tp = netdev_priv(dev);
8130         int ret;
8131         u8  *pd;
8132         u32 i, offset, len, val, b_offset, b_count;
8133
8134         if (tp->link_config.phy_is_low_power)
8135                 return -EAGAIN;
8136
8137         offset = eeprom->offset;
8138         len = eeprom->len;
8139         eeprom->len = 0;
8140
8141         eeprom->magic = TG3_EEPROM_MAGIC;
8142
8143         if (offset & 3) {
8144                 /* adjustments to start on required 4 byte boundary */
8145                 b_offset = offset & 3;
8146                 b_count = 4 - b_offset;
8147                 if (b_count > len) {
8148                         /* i.e. offset=1 len=2 */
8149                         b_count = len;
8150                 }
8151                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8152                 if (ret)
8153                         return ret;
8154                 val = cpu_to_le32(val);
8155                 memcpy(data, ((char*)&val) + b_offset, b_count);
8156                 len -= b_count;
8157                 offset += b_count;
8158                 eeprom->len += b_count;
8159         }
8160
8161         /* read bytes upto the last 4 byte boundary */
8162         pd = &data[eeprom->len];
8163         for (i = 0; i < (len - (len & 3)); i += 4) {
8164                 ret = tg3_nvram_read(tp, offset + i, &val);
8165                 if (ret) {
8166                         eeprom->len += i;
8167                         return ret;
8168                 }
8169                 val = cpu_to_le32(val);
8170                 memcpy(pd + i, &val, 4);
8171         }
8172         eeprom->len += i;
8173
8174         if (len & 3) {
8175                 /* read last bytes not ending on 4 byte boundary */
8176                 pd = &data[eeprom->len];
8177                 b_count = len & 3;
8178                 b_offset = offset + len - b_count;
8179                 ret = tg3_nvram_read(tp, b_offset, &val);
8180                 if (ret)
8181                         return ret;
8182                 val = cpu_to_le32(val);
8183                 memcpy(pd, ((char*)&val), b_count);
8184                 eeprom->len += b_count;
8185         }
8186         return 0;
8187 }
8188
8189 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8190
8191 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8192 {
8193         struct tg3 *tp = netdev_priv(dev);
8194         int ret;
8195         u32 offset, len, b_offset, odd_len, start, end;
8196         u8 *buf;
8197
8198         if (tp->link_config.phy_is_low_power)
8199                 return -EAGAIN;
8200
8201         if (eeprom->magic != TG3_EEPROM_MAGIC)
8202                 return -EINVAL;
8203
8204         offset = eeprom->offset;
8205         len = eeprom->len;
8206
8207         if ((b_offset = (offset & 3))) {
8208                 /* adjustments to start on required 4 byte boundary */
8209                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8210                 if (ret)
8211                         return ret;
8212                 start = cpu_to_le32(start);
8213                 len += b_offset;
8214                 offset &= ~3;
8215                 if (len < 4)
8216                         len = 4;
8217         }
8218
8219         odd_len = 0;
8220         if (len & 3) {
8221                 /* adjustments to end on required 4 byte boundary */
8222                 odd_len = 1;
8223                 len = (len + 3) & ~3;
8224                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8225                 if (ret)
8226                         return ret;
8227                 end = cpu_to_le32(end);
8228         }
8229
8230         buf = data;
8231         if (b_offset || odd_len) {
8232                 buf = kmalloc(len, GFP_KERNEL);
8233                 if (!buf)
8234                         return -ENOMEM;
8235                 if (b_offset)
8236                         memcpy(buf, &start, 4);
8237                 if (odd_len)
8238                         memcpy(buf+len-4, &end, 4);
8239                 memcpy(buf + b_offset, data, eeprom->len);
8240         }
8241
8242         ret = tg3_nvram_write_block(tp, offset, len, buf);
8243
8244         if (buf != data)
8245                 kfree(buf);
8246
8247         return ret;
8248 }
8249
8250 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8251 {
8252         struct tg3 *tp = netdev_priv(dev);
8253
8254         cmd->supported = (SUPPORTED_Autoneg);
8255
8256         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8257                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8258                                    SUPPORTED_1000baseT_Full);
8259
8260         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8261                 cmd->supported |= (SUPPORTED_100baseT_Half |
8262                                   SUPPORTED_100baseT_Full |
8263                                   SUPPORTED_10baseT_Half |
8264                                   SUPPORTED_10baseT_Full |
8265                                   SUPPORTED_MII);
8266                 cmd->port = PORT_TP;
8267         } else {
8268                 cmd->supported |= SUPPORTED_FIBRE;
8269                 cmd->port = PORT_FIBRE;
8270         }
8271
8272         cmd->advertising = tp->link_config.advertising;
8273         if (netif_running(dev)) {
8274                 cmd->speed = tp->link_config.active_speed;
8275                 cmd->duplex = tp->link_config.active_duplex;
8276         }
8277         cmd->phy_address = PHY_ADDR;
8278         cmd->transceiver = 0;
8279         cmd->autoneg = tp->link_config.autoneg;
8280         cmd->maxtxpkt = 0;
8281         cmd->maxrxpkt = 0;
8282         return 0;
8283 }
8284
8285 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8286 {
8287         struct tg3 *tp = netdev_priv(dev);
8288
8289         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8290                 /* These are the only valid advertisement bits allowed.  */
8291                 if (cmd->autoneg == AUTONEG_ENABLE &&
8292                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8293                                           ADVERTISED_1000baseT_Full |
8294                                           ADVERTISED_Autoneg |
8295                                           ADVERTISED_FIBRE)))
8296                         return -EINVAL;
8297                 /* Fiber can only do SPEED_1000.  */
8298                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8299                          (cmd->speed != SPEED_1000))
8300                         return -EINVAL;
8301         /* Copper cannot force SPEED_1000.  */
8302         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8303                    (cmd->speed == SPEED_1000))
8304                 return -EINVAL;
8305         else if ((cmd->speed == SPEED_1000) &&
8306                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8307                 return -EINVAL;
8308
8309         tg3_full_lock(tp, 0);
8310
8311         tp->link_config.autoneg = cmd->autoneg;
8312         if (cmd->autoneg == AUTONEG_ENABLE) {
8313                 tp->link_config.advertising = (cmd->advertising |
8314                                               ADVERTISED_Autoneg);
8315                 tp->link_config.speed = SPEED_INVALID;
8316                 tp->link_config.duplex = DUPLEX_INVALID;
8317         } else {
8318                 tp->link_config.advertising = 0;
8319                 tp->link_config.speed = cmd->speed;
8320                 tp->link_config.duplex = cmd->duplex;
8321         }
8322
8323         tp->link_config.orig_speed = tp->link_config.speed;
8324         tp->link_config.orig_duplex = tp->link_config.duplex;
8325         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8326
8327         if (netif_running(dev))
8328                 tg3_setup_phy(tp, 1);
8329
8330         tg3_full_unlock(tp);
8331
8332         return 0;
8333 }
8334
8335 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8336 {
8337         struct tg3 *tp = netdev_priv(dev);
8338
8339         strcpy(info->driver, DRV_MODULE_NAME);
8340         strcpy(info->version, DRV_MODULE_VERSION);
8341         strcpy(info->fw_version, tp->fw_ver);
8342         strcpy(info->bus_info, pci_name(tp->pdev));
8343 }
8344
8345 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8346 {
8347         struct tg3 *tp = netdev_priv(dev);
8348
8349         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8350                 wol->supported = WAKE_MAGIC;
8351         else
8352                 wol->supported = 0;
8353         wol->wolopts = 0;
8354         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8355                 wol->wolopts = WAKE_MAGIC;
8356         memset(&wol->sopass, 0, sizeof(wol->sopass));
8357 }
8358
8359 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8360 {
8361         struct tg3 *tp = netdev_priv(dev);
8362
8363         if (wol->wolopts & ~WAKE_MAGIC)
8364                 return -EINVAL;
8365         if ((wol->wolopts & WAKE_MAGIC) &&
8366             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8367                 return -EINVAL;
8368
8369         spin_lock_bh(&tp->lock);
8370         if (wol->wolopts & WAKE_MAGIC)
8371                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8372         else
8373                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8374         spin_unlock_bh(&tp->lock);
8375
8376         return 0;
8377 }
8378
8379 static u32 tg3_get_msglevel(struct net_device *dev)
8380 {
8381         struct tg3 *tp = netdev_priv(dev);
8382         return tp->msg_enable;
8383 }
8384
8385 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8386 {
8387         struct tg3 *tp = netdev_priv(dev);
8388         tp->msg_enable = value;
8389 }
8390
8391 static int tg3_set_tso(struct net_device *dev, u32 value)
8392 {
8393         struct tg3 *tp = netdev_priv(dev);
8394
8395         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8396                 if (value)
8397                         return -EINVAL;
8398                 return 0;
8399         }
8400         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8401             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8402                 if (value) {
8403                         dev->features |= NETIF_F_TSO6;
8404                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8405                                 dev->features |= NETIF_F_TSO_ECN;
8406                 } else
8407                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8408         }
8409         return ethtool_op_set_tso(dev, value);
8410 }
8411
8412 static int tg3_nway_reset(struct net_device *dev)
8413 {
8414         struct tg3 *tp = netdev_priv(dev);
8415         u32 bmcr;
8416         int r;
8417
8418         if (!netif_running(dev))
8419                 return -EAGAIN;
8420
8421         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8422                 return -EINVAL;
8423
8424         spin_lock_bh(&tp->lock);
8425         r = -EINVAL;
8426         tg3_readphy(tp, MII_BMCR, &bmcr);
8427         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8428             ((bmcr & BMCR_ANENABLE) ||
8429              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8430                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8431                                            BMCR_ANENABLE);
8432                 r = 0;
8433         }
8434         spin_unlock_bh(&tp->lock);
8435
8436         return r;
8437 }
8438
8439 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8440 {
8441         struct tg3 *tp = netdev_priv(dev);
8442
8443         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8444         ering->rx_mini_max_pending = 0;
8445         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8446                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8447         else
8448                 ering->rx_jumbo_max_pending = 0;
8449
8450         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8451
8452         ering->rx_pending = tp->rx_pending;
8453         ering->rx_mini_pending = 0;
8454         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8455                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8456         else
8457                 ering->rx_jumbo_pending = 0;
8458
8459         ering->tx_pending = tp->tx_pending;
8460 }
8461
8462 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8463 {
8464         struct tg3 *tp = netdev_priv(dev);
8465         int irq_sync = 0, err = 0;
8466
8467         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8468             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8469             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8470             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8471             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8472              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8473                 return -EINVAL;
8474
8475         if (netif_running(dev)) {
8476                 tg3_netif_stop(tp);
8477                 irq_sync = 1;
8478         }
8479
8480         tg3_full_lock(tp, irq_sync);
8481
8482         tp->rx_pending = ering->rx_pending;
8483
8484         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8485             tp->rx_pending > 63)
8486                 tp->rx_pending = 63;
8487         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8488         tp->tx_pending = ering->tx_pending;
8489
8490         if (netif_running(dev)) {
8491                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8492                 err = tg3_restart_hw(tp, 1);
8493                 if (!err)
8494                         tg3_netif_start(tp);
8495         }
8496
8497         tg3_full_unlock(tp);
8498
8499         return err;
8500 }
8501
8502 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8503 {
8504         struct tg3 *tp = netdev_priv(dev);
8505
8506         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8507         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8508         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8509 }
8510
8511 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8512 {
8513         struct tg3 *tp = netdev_priv(dev);
8514         int irq_sync = 0, err = 0;
8515
8516         if (netif_running(dev)) {
8517                 tg3_netif_stop(tp);
8518                 irq_sync = 1;
8519         }
8520
8521         tg3_full_lock(tp, irq_sync);
8522
8523         if (epause->autoneg)
8524                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8525         else
8526                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8527         if (epause->rx_pause)
8528                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8529         else
8530                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8531         if (epause->tx_pause)
8532                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8533         else
8534                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8535
8536         if (netif_running(dev)) {
8537                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8538                 err = tg3_restart_hw(tp, 1);
8539                 if (!err)
8540                         tg3_netif_start(tp);
8541         }
8542
8543         tg3_full_unlock(tp);
8544
8545         return err;
8546 }
8547
8548 static u32 tg3_get_rx_csum(struct net_device *dev)
8549 {
8550         struct tg3 *tp = netdev_priv(dev);
8551         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8552 }
8553
8554 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8555 {
8556         struct tg3 *tp = netdev_priv(dev);
8557
8558         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8559                 if (data != 0)
8560                         return -EINVAL;
8561                 return 0;
8562         }
8563
8564         spin_lock_bh(&tp->lock);
8565         if (data)
8566                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8567         else
8568                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8569         spin_unlock_bh(&tp->lock);
8570
8571         return 0;
8572 }
8573
8574 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8575 {
8576         struct tg3 *tp = netdev_priv(dev);
8577
8578         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8579                 if (data != 0)
8580                         return -EINVAL;
8581                 return 0;
8582         }
8583
8584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8585             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8586             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8588                 ethtool_op_set_tx_ipv6_csum(dev, data);
8589         else
8590                 ethtool_op_set_tx_csum(dev, data);
8591
8592         return 0;
8593 }
8594
8595 static int tg3_get_sset_count (struct net_device *dev, int sset)
8596 {
8597         switch (sset) {
8598         case ETH_SS_TEST:
8599                 return TG3_NUM_TEST;
8600         case ETH_SS_STATS:
8601                 return TG3_NUM_STATS;
8602         default:
8603                 return -EOPNOTSUPP;
8604         }
8605 }
8606
8607 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8608 {
8609         switch (stringset) {
8610         case ETH_SS_STATS:
8611                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8612                 break;
8613         case ETH_SS_TEST:
8614                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8615                 break;
8616         default:
8617                 WARN_ON(1);     /* we need a WARN() */
8618                 break;
8619         }
8620 }
8621
8622 static int tg3_phys_id(struct net_device *dev, u32 data)
8623 {
8624         struct tg3 *tp = netdev_priv(dev);
8625         int i;
8626
8627         if (!netif_running(tp->dev))
8628                 return -EAGAIN;
8629
8630         if (data == 0)
8631                 data = 2;
8632
8633         for (i = 0; i < (data * 2); i++) {
8634                 if ((i % 2) == 0)
8635                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8636                                            LED_CTRL_1000MBPS_ON |
8637                                            LED_CTRL_100MBPS_ON |
8638                                            LED_CTRL_10MBPS_ON |
8639                                            LED_CTRL_TRAFFIC_OVERRIDE |
8640                                            LED_CTRL_TRAFFIC_BLINK |
8641                                            LED_CTRL_TRAFFIC_LED);
8642
8643                 else
8644                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8645                                            LED_CTRL_TRAFFIC_OVERRIDE);
8646
8647                 if (msleep_interruptible(500))
8648                         break;
8649         }
8650         tw32(MAC_LED_CTRL, tp->led_ctrl);
8651         return 0;
8652 }
8653
8654 static void tg3_get_ethtool_stats (struct net_device *dev,
8655                                    struct ethtool_stats *estats, u64 *tmp_stats)
8656 {
8657         struct tg3 *tp = netdev_priv(dev);
8658         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8659 }
8660
8661 #define NVRAM_TEST_SIZE 0x100
8662 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8663 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8664 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8665
8666 static int tg3_test_nvram(struct tg3 *tp)
8667 {
8668         u32 *buf, csum, magic;
8669         int i, j, k, err = 0, size;
8670
8671         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8672                 return -EIO;
8673
8674         if (magic == TG3_EEPROM_MAGIC)
8675                 size = NVRAM_TEST_SIZE;
8676         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8677                 if ((magic & 0xe00000) == 0x200000)
8678                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8679                 else
8680                         return 0;
8681         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8682                 size = NVRAM_SELFBOOT_HW_SIZE;
8683         else
8684                 return -EIO;
8685
8686         buf = kmalloc(size, GFP_KERNEL);
8687         if (buf == NULL)
8688                 return -ENOMEM;
8689
8690         err = -EIO;
8691         for (i = 0, j = 0; i < size; i += 4, j++) {
8692                 u32 val;
8693
8694                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8695                         break;
8696                 buf[j] = cpu_to_le32(val);
8697         }
8698         if (i < size)
8699                 goto out;
8700
8701         /* Selfboot format */
8702         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8703             TG3_EEPROM_MAGIC_FW) {
8704                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8705
8706                 for (i = 0; i < size; i++)
8707                         csum8 += buf8[i];
8708
8709                 if (csum8 == 0) {
8710                         err = 0;
8711                         goto out;
8712                 }
8713
8714                 err = -EIO;
8715                 goto out;
8716         }
8717
8718         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8719             TG3_EEPROM_MAGIC_HW) {
8720                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8721                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8722                 u8 *buf8 = (u8 *) buf;
8723
8724                 /* Separate the parity bits and the data bytes.  */
8725                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8726                         if ((i == 0) || (i == 8)) {
8727                                 int l;
8728                                 u8 msk;
8729
8730                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8731                                         parity[k++] = buf8[i] & msk;
8732                                 i++;
8733                         }
8734                         else if (i == 16) {
8735                                 int l;
8736                                 u8 msk;
8737
8738                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8739                                         parity[k++] = buf8[i] & msk;
8740                                 i++;
8741
8742                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8743                                         parity[k++] = buf8[i] & msk;
8744                                 i++;
8745                         }
8746                         data[j++] = buf8[i];
8747                 }
8748
8749                 err = -EIO;
8750                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8751                         u8 hw8 = hweight8(data[i]);
8752
8753                         if ((hw8 & 0x1) && parity[i])
8754                                 goto out;
8755                         else if (!(hw8 & 0x1) && !parity[i])
8756                                 goto out;
8757                 }
8758                 err = 0;
8759                 goto out;
8760         }
8761
8762         /* Bootstrap checksum at offset 0x10 */
8763         csum = calc_crc((unsigned char *) buf, 0x10);
8764         if(csum != cpu_to_le32(buf[0x10/4]))
8765                 goto out;
8766
8767         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8768         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8769         if (csum != cpu_to_le32(buf[0xfc/4]))
8770                  goto out;
8771
8772         err = 0;
8773
8774 out:
8775         kfree(buf);
8776         return err;
8777 }
8778
8779 #define TG3_SERDES_TIMEOUT_SEC  2
8780 #define TG3_COPPER_TIMEOUT_SEC  6
8781
8782 static int tg3_test_link(struct tg3 *tp)
8783 {
8784         int i, max;
8785
8786         if (!netif_running(tp->dev))
8787                 return -ENODEV;
8788
8789         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8790                 max = TG3_SERDES_TIMEOUT_SEC;
8791         else
8792                 max = TG3_COPPER_TIMEOUT_SEC;
8793
8794         for (i = 0; i < max; i++) {
8795                 if (netif_carrier_ok(tp->dev))
8796                         return 0;
8797
8798                 if (msleep_interruptible(1000))
8799                         break;
8800         }
8801
8802         return -EIO;
8803 }
8804
8805 /* Only test the commonly used registers */
8806 static int tg3_test_registers(struct tg3 *tp)
8807 {
8808         int i, is_5705, is_5750;
8809         u32 offset, read_mask, write_mask, val, save_val, read_val;
8810         static struct {
8811                 u16 offset;
8812                 u16 flags;
8813 #define TG3_FL_5705     0x1
8814 #define TG3_FL_NOT_5705 0x2
8815 #define TG3_FL_NOT_5788 0x4
8816 #define TG3_FL_NOT_5750 0x8
8817                 u32 read_mask;
8818                 u32 write_mask;
8819         } reg_tbl[] = {
8820                 /* MAC Control Registers */
8821                 { MAC_MODE, TG3_FL_NOT_5705,
8822                         0x00000000, 0x00ef6f8c },
8823                 { MAC_MODE, TG3_FL_5705,
8824                         0x00000000, 0x01ef6b8c },
8825                 { MAC_STATUS, TG3_FL_NOT_5705,
8826                         0x03800107, 0x00000000 },
8827                 { MAC_STATUS, TG3_FL_5705,
8828                         0x03800100, 0x00000000 },
8829                 { MAC_ADDR_0_HIGH, 0x0000,
8830                         0x00000000, 0x0000ffff },
8831                 { MAC_ADDR_0_LOW, 0x0000,
8832                         0x00000000, 0xffffffff },
8833                 { MAC_RX_MTU_SIZE, 0x0000,
8834                         0x00000000, 0x0000ffff },
8835                 { MAC_TX_MODE, 0x0000,
8836                         0x00000000, 0x00000070 },
8837                 { MAC_TX_LENGTHS, 0x0000,
8838                         0x00000000, 0x00003fff },
8839                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8840                         0x00000000, 0x000007fc },
8841                 { MAC_RX_MODE, TG3_FL_5705,
8842                         0x00000000, 0x000007dc },
8843                 { MAC_HASH_REG_0, 0x0000,
8844                         0x00000000, 0xffffffff },
8845                 { MAC_HASH_REG_1, 0x0000,
8846                         0x00000000, 0xffffffff },
8847                 { MAC_HASH_REG_2, 0x0000,
8848                         0x00000000, 0xffffffff },
8849                 { MAC_HASH_REG_3, 0x0000,
8850                         0x00000000, 0xffffffff },
8851
8852                 /* Receive Data and Receive BD Initiator Control Registers. */
8853                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8854                         0x00000000, 0xffffffff },
8855                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8856                         0x00000000, 0xffffffff },
8857                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8858                         0x00000000, 0x00000003 },
8859                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8860                         0x00000000, 0xffffffff },
8861                 { RCVDBDI_STD_BD+0, 0x0000,
8862                         0x00000000, 0xffffffff },
8863                 { RCVDBDI_STD_BD+4, 0x0000,
8864                         0x00000000, 0xffffffff },
8865                 { RCVDBDI_STD_BD+8, 0x0000,
8866                         0x00000000, 0xffff0002 },
8867                 { RCVDBDI_STD_BD+0xc, 0x0000,
8868                         0x00000000, 0xffffffff },
8869
8870                 /* Receive BD Initiator Control Registers. */
8871                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8872                         0x00000000, 0xffffffff },
8873                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8874                         0x00000000, 0x000003ff },
8875                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8876                         0x00000000, 0xffffffff },
8877
8878                 /* Host Coalescing Control Registers. */
8879                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8880                         0x00000000, 0x00000004 },
8881                 { HOSTCC_MODE, TG3_FL_5705,
8882                         0x00000000, 0x000000f6 },
8883                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8884                         0x00000000, 0xffffffff },
8885                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8886                         0x00000000, 0x000003ff },
8887                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8888                         0x00000000, 0xffffffff },
8889                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8890                         0x00000000, 0x000003ff },
8891                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8892                         0x00000000, 0xffffffff },
8893                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8894                         0x00000000, 0x000000ff },
8895                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8896                         0x00000000, 0xffffffff },
8897                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8898                         0x00000000, 0x000000ff },
8899                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8900                         0x00000000, 0xffffffff },
8901                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8902                         0x00000000, 0xffffffff },
8903                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8904                         0x00000000, 0xffffffff },
8905                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8906                         0x00000000, 0x000000ff },
8907                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8908                         0x00000000, 0xffffffff },
8909                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8910                         0x00000000, 0x000000ff },
8911                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8912                         0x00000000, 0xffffffff },
8913                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8914                         0x00000000, 0xffffffff },
8915                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8916                         0x00000000, 0xffffffff },
8917                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8918                         0x00000000, 0xffffffff },
8919                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8920                         0x00000000, 0xffffffff },
8921                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8922                         0xffffffff, 0x00000000 },
8923                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8924                         0xffffffff, 0x00000000 },
8925
8926                 /* Buffer Manager Control Registers. */
8927                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8928                         0x00000000, 0x007fff80 },
8929                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8930                         0x00000000, 0x007fffff },
8931                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8932                         0x00000000, 0x0000003f },
8933                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8934                         0x00000000, 0x000001ff },
8935                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8936                         0x00000000, 0x000001ff },
8937                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8938                         0xffffffff, 0x00000000 },
8939                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8940                         0xffffffff, 0x00000000 },
8941
8942                 /* Mailbox Registers */
8943                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8944                         0x00000000, 0x000001ff },
8945                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8946                         0x00000000, 0x000001ff },
8947                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8948                         0x00000000, 0x000007ff },
8949                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8950                         0x00000000, 0x000001ff },
8951
8952                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8953         };
8954
8955         is_5705 = is_5750 = 0;
8956         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8957                 is_5705 = 1;
8958                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8959                         is_5750 = 1;
8960         }
8961
8962         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8963                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8964                         continue;
8965
8966                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8967                         continue;
8968
8969                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8970                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8971                         continue;
8972
8973                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8974                         continue;
8975
8976                 offset = (u32) reg_tbl[i].offset;
8977                 read_mask = reg_tbl[i].read_mask;
8978                 write_mask = reg_tbl[i].write_mask;
8979
8980                 /* Save the original register content */
8981                 save_val = tr32(offset);
8982
8983                 /* Determine the read-only value. */
8984                 read_val = save_val & read_mask;
8985
8986                 /* Write zero to the register, then make sure the read-only bits
8987                  * are not changed and the read/write bits are all zeros.
8988                  */
8989                 tw32(offset, 0);
8990
8991                 val = tr32(offset);
8992
8993                 /* Test the read-only and read/write bits. */
8994                 if (((val & read_mask) != read_val) || (val & write_mask))
8995                         goto out;
8996
8997                 /* Write ones to all the bits defined by RdMask and WrMask, then
8998                  * make sure the read-only bits are not changed and the
8999                  * read/write bits are all ones.
9000                  */
9001                 tw32(offset, read_mask | write_mask);
9002
9003                 val = tr32(offset);
9004
9005                 /* Test the read-only bits. */
9006                 if ((val & read_mask) != read_val)
9007                         goto out;
9008
9009                 /* Test the read/write bits. */
9010                 if ((val & write_mask) != write_mask)
9011                         goto out;
9012
9013                 tw32(offset, save_val);
9014         }
9015
9016         return 0;
9017
9018 out:
9019         if (netif_msg_hw(tp))
9020                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9021                        offset);
9022         tw32(offset, save_val);
9023         return -EIO;
9024 }
9025
9026 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9027 {
9028         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9029         int i;
9030         u32 j;
9031
9032         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
9033                 for (j = 0; j < len; j += 4) {
9034                         u32 val;
9035
9036                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9037                         tg3_read_mem(tp, offset + j, &val);
9038                         if (val != test_pattern[i])
9039                                 return -EIO;
9040                 }
9041         }
9042         return 0;
9043 }
9044
9045 static int tg3_test_memory(struct tg3 *tp)
9046 {
9047         static struct mem_entry {
9048                 u32 offset;
9049                 u32 len;
9050         } mem_tbl_570x[] = {
9051                 { 0x00000000, 0x00b50},
9052                 { 0x00002000, 0x1c000},
9053                 { 0xffffffff, 0x00000}
9054         }, mem_tbl_5705[] = {
9055                 { 0x00000100, 0x0000c},
9056                 { 0x00000200, 0x00008},
9057                 { 0x00004000, 0x00800},
9058                 { 0x00006000, 0x01000},
9059                 { 0x00008000, 0x02000},
9060                 { 0x00010000, 0x0e000},
9061                 { 0xffffffff, 0x00000}
9062         }, mem_tbl_5755[] = {
9063                 { 0x00000200, 0x00008},
9064                 { 0x00004000, 0x00800},
9065                 { 0x00006000, 0x00800},
9066                 { 0x00008000, 0x02000},
9067                 { 0x00010000, 0x0c000},
9068                 { 0xffffffff, 0x00000}
9069         }, mem_tbl_5906[] = {
9070                 { 0x00000200, 0x00008},
9071                 { 0x00004000, 0x00400},
9072                 { 0x00006000, 0x00400},
9073                 { 0x00008000, 0x01000},
9074                 { 0x00010000, 0x01000},
9075                 { 0xffffffff, 0x00000}
9076         };
9077         struct mem_entry *mem_tbl;
9078         int err = 0;
9079         int i;
9080
9081         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9082                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9083                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9084                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9085                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9086                         mem_tbl = mem_tbl_5755;
9087                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9088                         mem_tbl = mem_tbl_5906;
9089                 else
9090                         mem_tbl = mem_tbl_5705;
9091         } else
9092                 mem_tbl = mem_tbl_570x;
9093
9094         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9095                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9096                     mem_tbl[i].len)) != 0)
9097                         break;
9098         }
9099
9100         return err;
9101 }
9102
9103 #define TG3_MAC_LOOPBACK        0
9104 #define TG3_PHY_LOOPBACK        1
9105
9106 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9107 {
9108         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9109         u32 desc_idx;
9110         struct sk_buff *skb, *rx_skb;
9111         u8 *tx_data;
9112         dma_addr_t map;
9113         int num_pkts, tx_len, rx_len, i, err;
9114         struct tg3_rx_buffer_desc *desc;
9115
9116         if (loopback_mode == TG3_MAC_LOOPBACK) {
9117                 /* HW errata - mac loopback fails in some cases on 5780.
9118                  * Normal traffic and PHY loopback are not affected by
9119                  * errata.
9120                  */
9121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9122                         return 0;
9123
9124                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9125                            MAC_MODE_PORT_INT_LPBACK;
9126                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9127                         mac_mode |= MAC_MODE_LINK_POLARITY;
9128                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9129                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9130                 else
9131                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9132                 tw32(MAC_MODE, mac_mode);
9133         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9134                 u32 val;
9135
9136                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9137                         u32 phytest;
9138
9139                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9140                                 u32 phy;
9141
9142                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9143                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9144                                 if (!tg3_readphy(tp, 0x1b, &phy))
9145                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9146                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9147                         }
9148                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9149                 } else
9150                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9151
9152                 tg3_phy_toggle_automdix(tp, 0);
9153
9154                 tg3_writephy(tp, MII_BMCR, val);
9155                 udelay(40);
9156
9157                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9158                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9159                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9160                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9161                 } else
9162                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9163
9164                 /* reset to prevent losing 1st rx packet intermittently */
9165                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9166                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9167                         udelay(10);
9168                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9169                 }
9170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9171                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9172                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9173                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9174                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9175                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9176                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9177                 }
9178                 tw32(MAC_MODE, mac_mode);
9179         }
9180         else
9181                 return -EINVAL;
9182
9183         err = -EIO;
9184
9185         tx_len = 1514;
9186         skb = netdev_alloc_skb(tp->dev, tx_len);
9187         if (!skb)
9188                 return -ENOMEM;
9189
9190         tx_data = skb_put(skb, tx_len);
9191         memcpy(tx_data, tp->dev->dev_addr, 6);
9192         memset(tx_data + 6, 0x0, 8);
9193
9194         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9195
9196         for (i = 14; i < tx_len; i++)
9197                 tx_data[i] = (u8) (i & 0xff);
9198
9199         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9200
9201         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9202              HOSTCC_MODE_NOW);
9203
9204         udelay(10);
9205
9206         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9207
9208         num_pkts = 0;
9209
9210         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9211
9212         tp->tx_prod++;
9213         num_pkts++;
9214
9215         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9216                      tp->tx_prod);
9217         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9218
9219         udelay(10);
9220
9221         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9222         for (i = 0; i < 25; i++) {
9223                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9224                        HOSTCC_MODE_NOW);
9225
9226                 udelay(10);
9227
9228                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9229                 rx_idx = tp->hw_status->idx[0].rx_producer;
9230                 if ((tx_idx == tp->tx_prod) &&
9231                     (rx_idx == (rx_start_idx + num_pkts)))
9232                         break;
9233         }
9234
9235         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9236         dev_kfree_skb(skb);
9237
9238         if (tx_idx != tp->tx_prod)
9239                 goto out;
9240
9241         if (rx_idx != rx_start_idx + num_pkts)
9242                 goto out;
9243
9244         desc = &tp->rx_rcb[rx_start_idx];
9245         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9246         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9247         if (opaque_key != RXD_OPAQUE_RING_STD)
9248                 goto out;
9249
9250         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9251             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9252                 goto out;
9253
9254         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9255         if (rx_len != tx_len)
9256                 goto out;
9257
9258         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9259
9260         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9261         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9262
9263         for (i = 14; i < tx_len; i++) {
9264                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9265                         goto out;
9266         }
9267         err = 0;
9268
9269         /* tg3_free_rings will unmap and free the rx_skb */
9270 out:
9271         return err;
9272 }
9273
9274 #define TG3_MAC_LOOPBACK_FAILED         1
9275 #define TG3_PHY_LOOPBACK_FAILED         2
9276 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9277                                          TG3_PHY_LOOPBACK_FAILED)
9278
9279 static int tg3_test_loopback(struct tg3 *tp)
9280 {
9281         int err = 0;
9282         u32 cpmuctrl = 0;
9283
9284         if (!netif_running(tp->dev))
9285                 return TG3_LOOPBACK_FAILED;
9286
9287         err = tg3_reset_hw(tp, 1);
9288         if (err)
9289                 return TG3_LOOPBACK_FAILED;
9290
9291         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9292                 int i;
9293                 u32 status;
9294
9295                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9296
9297                 /* Wait for up to 40 microseconds to acquire lock. */
9298                 for (i = 0; i < 4; i++) {
9299                         status = tr32(TG3_CPMU_MUTEX_GNT);
9300                         if (status == CPMU_MUTEX_GNT_DRIVER)
9301                                 break;
9302                         udelay(10);
9303                 }
9304
9305                 if (status != CPMU_MUTEX_GNT_DRIVER)
9306                         return TG3_LOOPBACK_FAILED;
9307
9308                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9309
9310                 /* Turn off power management based on link speed. */
9311                 tw32(TG3_CPMU_CTRL,
9312                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9313         }
9314
9315         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9316                 err |= TG3_MAC_LOOPBACK_FAILED;
9317
9318         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9319                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9320
9321                 /* Release the mutex */
9322                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9323         }
9324
9325         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9326                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9327                         err |= TG3_PHY_LOOPBACK_FAILED;
9328         }
9329
9330         return err;
9331 }
9332
9333 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9334                           u64 *data)
9335 {
9336         struct tg3 *tp = netdev_priv(dev);
9337
9338         if (tp->link_config.phy_is_low_power)
9339                 tg3_set_power_state(tp, PCI_D0);
9340
9341         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9342
9343         if (tg3_test_nvram(tp) != 0) {
9344                 etest->flags |= ETH_TEST_FL_FAILED;
9345                 data[0] = 1;
9346         }
9347         if (tg3_test_link(tp) != 0) {
9348                 etest->flags |= ETH_TEST_FL_FAILED;
9349                 data[1] = 1;
9350         }
9351         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9352                 int err, irq_sync = 0;
9353
9354                 if (netif_running(dev)) {
9355                         tg3_netif_stop(tp);
9356                         irq_sync = 1;
9357                 }
9358
9359                 tg3_full_lock(tp, irq_sync);
9360
9361                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9362                 err = tg3_nvram_lock(tp);
9363                 tg3_halt_cpu(tp, RX_CPU_BASE);
9364                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9365                         tg3_halt_cpu(tp, TX_CPU_BASE);
9366                 if (!err)
9367                         tg3_nvram_unlock(tp);
9368
9369                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9370                         tg3_phy_reset(tp);
9371
9372                 if (tg3_test_registers(tp) != 0) {
9373                         etest->flags |= ETH_TEST_FL_FAILED;
9374                         data[2] = 1;
9375                 }
9376                 if (tg3_test_memory(tp) != 0) {
9377                         etest->flags |= ETH_TEST_FL_FAILED;
9378                         data[3] = 1;
9379                 }
9380                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9381                         etest->flags |= ETH_TEST_FL_FAILED;
9382
9383                 tg3_full_unlock(tp);
9384
9385                 if (tg3_test_interrupt(tp) != 0) {
9386                         etest->flags |= ETH_TEST_FL_FAILED;
9387                         data[5] = 1;
9388                 }
9389
9390                 tg3_full_lock(tp, 0);
9391
9392                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9393                 if (netif_running(dev)) {
9394                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9395                         if (!tg3_restart_hw(tp, 1))
9396                                 tg3_netif_start(tp);
9397                 }
9398
9399                 tg3_full_unlock(tp);
9400         }
9401         if (tp->link_config.phy_is_low_power)
9402                 tg3_set_power_state(tp, PCI_D3hot);
9403
9404 }
9405
9406 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9407 {
9408         struct mii_ioctl_data *data = if_mii(ifr);
9409         struct tg3 *tp = netdev_priv(dev);
9410         int err;
9411
9412         switch(cmd) {
9413         case SIOCGMIIPHY:
9414                 data->phy_id = PHY_ADDR;
9415
9416                 /* fallthru */
9417         case SIOCGMIIREG: {
9418                 u32 mii_regval;
9419
9420                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9421                         break;                  /* We have no PHY */
9422
9423                 if (tp->link_config.phy_is_low_power)
9424                         return -EAGAIN;
9425
9426                 spin_lock_bh(&tp->lock);
9427                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9428                 spin_unlock_bh(&tp->lock);
9429
9430                 data->val_out = mii_regval;
9431
9432                 return err;
9433         }
9434
9435         case SIOCSMIIREG:
9436                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9437                         break;                  /* We have no PHY */
9438
9439                 if (!capable(CAP_NET_ADMIN))
9440                         return -EPERM;
9441
9442                 if (tp->link_config.phy_is_low_power)
9443                         return -EAGAIN;
9444
9445                 spin_lock_bh(&tp->lock);
9446                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9447                 spin_unlock_bh(&tp->lock);
9448
9449                 return err;
9450
9451         default:
9452                 /* do nothing */
9453                 break;
9454         }
9455         return -EOPNOTSUPP;
9456 }
9457
9458 #if TG3_VLAN_TAG_USED
9459 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9460 {
9461         struct tg3 *tp = netdev_priv(dev);
9462
9463         if (netif_running(dev))
9464                 tg3_netif_stop(tp);
9465
9466         tg3_full_lock(tp, 0);
9467
9468         tp->vlgrp = grp;
9469
9470         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9471         __tg3_set_rx_mode(dev);
9472
9473         if (netif_running(dev))
9474                 tg3_netif_start(tp);
9475
9476         tg3_full_unlock(tp);
9477 }
9478 #endif
9479
9480 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9481 {
9482         struct tg3 *tp = netdev_priv(dev);
9483
9484         memcpy(ec, &tp->coal, sizeof(*ec));
9485         return 0;
9486 }
9487
9488 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9489 {
9490         struct tg3 *tp = netdev_priv(dev);
9491         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9492         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9493
9494         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9495                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9496                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9497                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9498                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9499         }
9500
9501         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9502             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9503             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9504             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9505             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9506             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9507             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9508             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9509             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9510             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9511                 return -EINVAL;
9512
9513         /* No rx interrupts will be generated if both are zero */
9514         if ((ec->rx_coalesce_usecs == 0) &&
9515             (ec->rx_max_coalesced_frames == 0))
9516                 return -EINVAL;
9517
9518         /* No tx interrupts will be generated if both are zero */
9519         if ((ec->tx_coalesce_usecs == 0) &&
9520             (ec->tx_max_coalesced_frames == 0))
9521                 return -EINVAL;
9522
9523         /* Only copy relevant parameters, ignore all others. */
9524         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9525         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9526         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9527         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9528         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9529         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9530         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9531         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9532         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9533
9534         if (netif_running(dev)) {
9535                 tg3_full_lock(tp, 0);
9536                 __tg3_set_coalesce(tp, &tp->coal);
9537                 tg3_full_unlock(tp);
9538         }
9539         return 0;
9540 }
9541
9542 static const struct ethtool_ops tg3_ethtool_ops = {
9543         .get_settings           = tg3_get_settings,
9544         .set_settings           = tg3_set_settings,
9545         .get_drvinfo            = tg3_get_drvinfo,
9546         .get_regs_len           = tg3_get_regs_len,
9547         .get_regs               = tg3_get_regs,
9548         .get_wol                = tg3_get_wol,
9549         .set_wol                = tg3_set_wol,
9550         .get_msglevel           = tg3_get_msglevel,
9551         .set_msglevel           = tg3_set_msglevel,
9552         .nway_reset             = tg3_nway_reset,
9553         .get_link               = ethtool_op_get_link,
9554         .get_eeprom_len         = tg3_get_eeprom_len,
9555         .get_eeprom             = tg3_get_eeprom,
9556         .set_eeprom             = tg3_set_eeprom,
9557         .get_ringparam          = tg3_get_ringparam,
9558         .set_ringparam          = tg3_set_ringparam,
9559         .get_pauseparam         = tg3_get_pauseparam,
9560         .set_pauseparam         = tg3_set_pauseparam,
9561         .get_rx_csum            = tg3_get_rx_csum,
9562         .set_rx_csum            = tg3_set_rx_csum,
9563         .set_tx_csum            = tg3_set_tx_csum,
9564         .set_sg                 = ethtool_op_set_sg,
9565         .set_tso                = tg3_set_tso,
9566         .self_test              = tg3_self_test,
9567         .get_strings            = tg3_get_strings,
9568         .phys_id                = tg3_phys_id,
9569         .get_ethtool_stats      = tg3_get_ethtool_stats,
9570         .get_coalesce           = tg3_get_coalesce,
9571         .set_coalesce           = tg3_set_coalesce,
9572         .get_sset_count         = tg3_get_sset_count,
9573 };
9574
9575 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9576 {
9577         u32 cursize, val, magic;
9578
9579         tp->nvram_size = EEPROM_CHIP_SIZE;
9580
9581         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9582                 return;
9583
9584         if ((magic != TG3_EEPROM_MAGIC) &&
9585             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9586             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9587                 return;
9588
9589         /*
9590          * Size the chip by reading offsets at increasing powers of two.
9591          * When we encounter our validation signature, we know the addressing
9592          * has wrapped around, and thus have our chip size.
9593          */
9594         cursize = 0x10;
9595
9596         while (cursize < tp->nvram_size) {
9597                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9598                         return;
9599
9600                 if (val == magic)
9601                         break;
9602
9603                 cursize <<= 1;
9604         }
9605
9606         tp->nvram_size = cursize;
9607 }
9608
9609 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9610 {
9611         u32 val;
9612
9613         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9614                 return;
9615
9616         /* Selfboot format */
9617         if (val != TG3_EEPROM_MAGIC) {
9618                 tg3_get_eeprom_size(tp);
9619                 return;
9620         }
9621
9622         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9623                 if (val != 0) {
9624                         tp->nvram_size = (val >> 16) * 1024;
9625                         return;
9626                 }
9627         }
9628         tp->nvram_size = 0x80000;
9629 }
9630
9631 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9632 {
9633         u32 nvcfg1;
9634
9635         nvcfg1 = tr32(NVRAM_CFG1);
9636         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9637                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9638         }
9639         else {
9640                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9641                 tw32(NVRAM_CFG1, nvcfg1);
9642         }
9643
9644         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9645             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9646                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9647                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9648                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9649                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9650                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9651                                 break;
9652                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9653                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9654                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9655                                 break;
9656                         case FLASH_VENDOR_ATMEL_EEPROM:
9657                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9658                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9659                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9660                                 break;
9661                         case FLASH_VENDOR_ST:
9662                                 tp->nvram_jedecnum = JEDEC_ST;
9663                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9664                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9665                                 break;
9666                         case FLASH_VENDOR_SAIFUN:
9667                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9668                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9669                                 break;
9670                         case FLASH_VENDOR_SST_SMALL:
9671                         case FLASH_VENDOR_SST_LARGE:
9672                                 tp->nvram_jedecnum = JEDEC_SST;
9673                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9674                                 break;
9675                 }
9676         }
9677         else {
9678                 tp->nvram_jedecnum = JEDEC_ATMEL;
9679                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9680                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9681         }
9682 }
9683
9684 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9685 {
9686         u32 nvcfg1;
9687
9688         nvcfg1 = tr32(NVRAM_CFG1);
9689
9690         /* NVRAM protection for TPM */
9691         if (nvcfg1 & (1 << 27))
9692                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9693
9694         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9695                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9696                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9697                         tp->nvram_jedecnum = JEDEC_ATMEL;
9698                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9699                         break;
9700                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9701                         tp->nvram_jedecnum = JEDEC_ATMEL;
9702                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9703                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9704                         break;
9705                 case FLASH_5752VENDOR_ST_M45PE10:
9706                 case FLASH_5752VENDOR_ST_M45PE20:
9707                 case FLASH_5752VENDOR_ST_M45PE40:
9708                         tp->nvram_jedecnum = JEDEC_ST;
9709                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9710                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9711                         break;
9712         }
9713
9714         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9715                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9716                         case FLASH_5752PAGE_SIZE_256:
9717                                 tp->nvram_pagesize = 256;
9718                                 break;
9719                         case FLASH_5752PAGE_SIZE_512:
9720                                 tp->nvram_pagesize = 512;
9721                                 break;
9722                         case FLASH_5752PAGE_SIZE_1K:
9723                                 tp->nvram_pagesize = 1024;
9724                                 break;
9725                         case FLASH_5752PAGE_SIZE_2K:
9726                                 tp->nvram_pagesize = 2048;
9727                                 break;
9728                         case FLASH_5752PAGE_SIZE_4K:
9729                                 tp->nvram_pagesize = 4096;
9730                                 break;
9731                         case FLASH_5752PAGE_SIZE_264:
9732                                 tp->nvram_pagesize = 264;
9733                                 break;
9734                 }
9735         }
9736         else {
9737                 /* For eeprom, set pagesize to maximum eeprom size */
9738                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9739
9740                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9741                 tw32(NVRAM_CFG1, nvcfg1);
9742         }
9743 }
9744
9745 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9746 {
9747         u32 nvcfg1, protect = 0;
9748
9749         nvcfg1 = tr32(NVRAM_CFG1);
9750
9751         /* NVRAM protection for TPM */
9752         if (nvcfg1 & (1 << 27)) {
9753                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9754                 protect = 1;
9755         }
9756
9757         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9758         switch (nvcfg1) {
9759                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9760                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9761                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9762                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9763                         tp->nvram_jedecnum = JEDEC_ATMEL;
9764                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9765                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9766                         tp->nvram_pagesize = 264;
9767                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9768                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9769                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9770                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9771                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9772                         else
9773                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9774                         break;
9775                 case FLASH_5752VENDOR_ST_M45PE10:
9776                 case FLASH_5752VENDOR_ST_M45PE20:
9777                 case FLASH_5752VENDOR_ST_M45PE40:
9778                         tp->nvram_jedecnum = JEDEC_ST;
9779                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9780                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9781                         tp->nvram_pagesize = 256;
9782                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9783                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9784                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9785                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9786                         else
9787                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9788                         break;
9789         }
9790 }
9791
9792 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9793 {
9794         u32 nvcfg1;
9795
9796         nvcfg1 = tr32(NVRAM_CFG1);
9797
9798         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9799                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9800                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9801                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9802                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9803                         tp->nvram_jedecnum = JEDEC_ATMEL;
9804                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9805                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9806
9807                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9808                         tw32(NVRAM_CFG1, nvcfg1);
9809                         break;
9810                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9811                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9812                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9813                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9814                         tp->nvram_jedecnum = JEDEC_ATMEL;
9815                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9816                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9817                         tp->nvram_pagesize = 264;
9818                         break;
9819                 case FLASH_5752VENDOR_ST_M45PE10:
9820                 case FLASH_5752VENDOR_ST_M45PE20:
9821                 case FLASH_5752VENDOR_ST_M45PE40:
9822                         tp->nvram_jedecnum = JEDEC_ST;
9823                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9824                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9825                         tp->nvram_pagesize = 256;
9826                         break;
9827         }
9828 }
9829
9830 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9831 {
9832         u32 nvcfg1, protect = 0;
9833
9834         nvcfg1 = tr32(NVRAM_CFG1);
9835
9836         /* NVRAM protection for TPM */
9837         if (nvcfg1 & (1 << 27)) {
9838                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9839                 protect = 1;
9840         }
9841
9842         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9843         switch (nvcfg1) {
9844                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9845                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9846                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9847                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9848                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9849                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9850                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9851                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9852                         tp->nvram_jedecnum = JEDEC_ATMEL;
9853                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9854                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9855                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9856                         tp->nvram_pagesize = 256;
9857                         break;
9858                 case FLASH_5761VENDOR_ST_A_M45PE20:
9859                 case FLASH_5761VENDOR_ST_A_M45PE40:
9860                 case FLASH_5761VENDOR_ST_A_M45PE80:
9861                 case FLASH_5761VENDOR_ST_A_M45PE16:
9862                 case FLASH_5761VENDOR_ST_M_M45PE20:
9863                 case FLASH_5761VENDOR_ST_M_M45PE40:
9864                 case FLASH_5761VENDOR_ST_M_M45PE80:
9865                 case FLASH_5761VENDOR_ST_M_M45PE16:
9866                         tp->nvram_jedecnum = JEDEC_ST;
9867                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9868                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9869                         tp->nvram_pagesize = 256;
9870                         break;
9871         }
9872
9873         if (protect) {
9874                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9875         } else {
9876                 switch (nvcfg1) {
9877                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9878                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9879                         case FLASH_5761VENDOR_ST_A_M45PE16:
9880                         case FLASH_5761VENDOR_ST_M_M45PE16:
9881                                 tp->nvram_size = 0x100000;
9882                                 break;
9883                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9884                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9885                         case FLASH_5761VENDOR_ST_A_M45PE80:
9886                         case FLASH_5761VENDOR_ST_M_M45PE80:
9887                                 tp->nvram_size = 0x80000;
9888                                 break;
9889                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9890                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9891                         case FLASH_5761VENDOR_ST_A_M45PE40:
9892                         case FLASH_5761VENDOR_ST_M_M45PE40:
9893                                 tp->nvram_size = 0x40000;
9894                                 break;
9895                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9896                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9897                         case FLASH_5761VENDOR_ST_A_M45PE20:
9898                         case FLASH_5761VENDOR_ST_M_M45PE20:
9899                                 tp->nvram_size = 0x20000;
9900                                 break;
9901                 }
9902         }
9903 }
9904
9905 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9906 {
9907         tp->nvram_jedecnum = JEDEC_ATMEL;
9908         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9909         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9910 }
9911
9912 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9913 static void __devinit tg3_nvram_init(struct tg3 *tp)
9914 {
9915         tw32_f(GRC_EEPROM_ADDR,
9916              (EEPROM_ADDR_FSM_RESET |
9917               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9918                EEPROM_ADDR_CLKPERD_SHIFT)));
9919
9920         msleep(1);
9921
9922         /* Enable seeprom accesses. */
9923         tw32_f(GRC_LOCAL_CTRL,
9924              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9925         udelay(100);
9926
9927         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9928             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9929                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9930
9931                 if (tg3_nvram_lock(tp)) {
9932                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9933                                "tg3_nvram_init failed.\n", tp->dev->name);
9934                         return;
9935                 }
9936                 tg3_enable_nvram_access(tp);
9937
9938                 tp->nvram_size = 0;
9939
9940                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9941                         tg3_get_5752_nvram_info(tp);
9942                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9943                         tg3_get_5755_nvram_info(tp);
9944                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9945                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9946                         tg3_get_5787_nvram_info(tp);
9947                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9948                         tg3_get_5761_nvram_info(tp);
9949                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9950                         tg3_get_5906_nvram_info(tp);
9951                 else
9952                         tg3_get_nvram_info(tp);
9953
9954                 if (tp->nvram_size == 0)
9955                         tg3_get_nvram_size(tp);
9956
9957                 tg3_disable_nvram_access(tp);
9958                 tg3_nvram_unlock(tp);
9959
9960         } else {
9961                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9962
9963                 tg3_get_eeprom_size(tp);
9964         }
9965 }
9966
9967 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9968                                         u32 offset, u32 *val)
9969 {
9970         u32 tmp;
9971         int i;
9972
9973         if (offset > EEPROM_ADDR_ADDR_MASK ||
9974             (offset % 4) != 0)
9975                 return -EINVAL;
9976
9977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9978                                         EEPROM_ADDR_DEVID_MASK |
9979                                         EEPROM_ADDR_READ);
9980         tw32(GRC_EEPROM_ADDR,
9981              tmp |
9982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9984               EEPROM_ADDR_ADDR_MASK) |
9985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9986
9987         for (i = 0; i < 1000; i++) {
9988                 tmp = tr32(GRC_EEPROM_ADDR);
9989
9990                 if (tmp & EEPROM_ADDR_COMPLETE)
9991                         break;
9992                 msleep(1);
9993         }
9994         if (!(tmp & EEPROM_ADDR_COMPLETE))
9995                 return -EBUSY;
9996
9997         *val = tr32(GRC_EEPROM_DATA);
9998         return 0;
9999 }
10000
10001 #define NVRAM_CMD_TIMEOUT 10000
10002
10003 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10004 {
10005         int i;
10006
10007         tw32(NVRAM_CMD, nvram_cmd);
10008         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10009                 udelay(10);
10010                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10011                         udelay(10);
10012                         break;
10013                 }
10014         }
10015         if (i == NVRAM_CMD_TIMEOUT) {
10016                 return -EBUSY;
10017         }
10018         return 0;
10019 }
10020
10021 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10022 {
10023         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10024             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10025             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10026            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10027             (tp->nvram_jedecnum == JEDEC_ATMEL))
10028
10029                 addr = ((addr / tp->nvram_pagesize) <<
10030                         ATMEL_AT45DB0X1B_PAGE_POS) +
10031                        (addr % tp->nvram_pagesize);
10032
10033         return addr;
10034 }
10035
10036 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10037 {
10038         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10039             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10040             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10041            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10042             (tp->nvram_jedecnum == JEDEC_ATMEL))
10043
10044                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10045                         tp->nvram_pagesize) +
10046                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10047
10048         return addr;
10049 }
10050
10051 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10052 {
10053         int ret;
10054
10055         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10056                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10057
10058         offset = tg3_nvram_phys_addr(tp, offset);
10059
10060         if (offset > NVRAM_ADDR_MSK)
10061                 return -EINVAL;
10062
10063         ret = tg3_nvram_lock(tp);
10064         if (ret)
10065                 return ret;
10066
10067         tg3_enable_nvram_access(tp);
10068
10069         tw32(NVRAM_ADDR, offset);
10070         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10071                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10072
10073         if (ret == 0)
10074                 *val = swab32(tr32(NVRAM_RDDATA));
10075
10076         tg3_disable_nvram_access(tp);
10077
10078         tg3_nvram_unlock(tp);
10079
10080         return ret;
10081 }
10082
10083 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10084 {
10085         int err;
10086         u32 tmp;
10087
10088         err = tg3_nvram_read(tp, offset, &tmp);
10089         *val = swab32(tmp);
10090         return err;
10091 }
10092
10093 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10094                                     u32 offset, u32 len, u8 *buf)
10095 {
10096         int i, j, rc = 0;
10097         u32 val;
10098
10099         for (i = 0; i < len; i += 4) {
10100                 u32 addr, data;
10101
10102                 addr = offset + i;
10103
10104                 memcpy(&data, buf + i, 4);
10105
10106                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10107
10108                 val = tr32(GRC_EEPROM_ADDR);
10109                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10110
10111                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10112                         EEPROM_ADDR_READ);
10113                 tw32(GRC_EEPROM_ADDR, val |
10114                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10115                         (addr & EEPROM_ADDR_ADDR_MASK) |
10116                         EEPROM_ADDR_START |
10117                         EEPROM_ADDR_WRITE);
10118
10119                 for (j = 0; j < 1000; j++) {
10120                         val = tr32(GRC_EEPROM_ADDR);
10121
10122                         if (val & EEPROM_ADDR_COMPLETE)
10123                                 break;
10124                         msleep(1);
10125                 }
10126                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10127                         rc = -EBUSY;
10128                         break;
10129                 }
10130         }
10131
10132         return rc;
10133 }
10134
10135 /* offset and length are dword aligned */
10136 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10137                 u8 *buf)
10138 {
10139         int ret = 0;
10140         u32 pagesize = tp->nvram_pagesize;
10141         u32 pagemask = pagesize - 1;
10142         u32 nvram_cmd;
10143         u8 *tmp;
10144
10145         tmp = kmalloc(pagesize, GFP_KERNEL);
10146         if (tmp == NULL)
10147                 return -ENOMEM;
10148
10149         while (len) {
10150                 int j;
10151                 u32 phy_addr, page_off, size;
10152
10153                 phy_addr = offset & ~pagemask;
10154
10155                 for (j = 0; j < pagesize; j += 4) {
10156                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10157                                                 (u32 *) (tmp + j))))
10158                                 break;
10159                 }
10160                 if (ret)
10161                         break;
10162
10163                 page_off = offset & pagemask;
10164                 size = pagesize;
10165                 if (len < size)
10166                         size = len;
10167
10168                 len -= size;
10169
10170                 memcpy(tmp + page_off, buf, size);
10171
10172                 offset = offset + (pagesize - page_off);
10173
10174                 tg3_enable_nvram_access(tp);
10175
10176                 /*
10177                  * Before we can erase the flash page, we need
10178                  * to issue a special "write enable" command.
10179                  */
10180                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10181
10182                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10183                         break;
10184
10185                 /* Erase the target page */
10186                 tw32(NVRAM_ADDR, phy_addr);
10187
10188                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10189                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10190
10191                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10192                         break;
10193
10194                 /* Issue another write enable to start the write. */
10195                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10196
10197                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10198                         break;
10199
10200                 for (j = 0; j < pagesize; j += 4) {
10201                         u32 data;
10202
10203                         data = *((u32 *) (tmp + j));
10204                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10205
10206                         tw32(NVRAM_ADDR, phy_addr + j);
10207
10208                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10209                                 NVRAM_CMD_WR;
10210
10211                         if (j == 0)
10212                                 nvram_cmd |= NVRAM_CMD_FIRST;
10213                         else if (j == (pagesize - 4))
10214                                 nvram_cmd |= NVRAM_CMD_LAST;
10215
10216                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10217                                 break;
10218                 }
10219                 if (ret)
10220                         break;
10221         }
10222
10223         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10224         tg3_nvram_exec_cmd(tp, nvram_cmd);
10225
10226         kfree(tmp);
10227
10228         return ret;
10229 }
10230
10231 /* offset and length are dword aligned */
10232 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10233                 u8 *buf)
10234 {
10235         int i, ret = 0;
10236
10237         for (i = 0; i < len; i += 4, offset += 4) {
10238                 u32 data, page_off, phy_addr, nvram_cmd;
10239
10240                 memcpy(&data, buf + i, 4);
10241                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10242
10243                 page_off = offset % tp->nvram_pagesize;
10244
10245                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10246
10247                 tw32(NVRAM_ADDR, phy_addr);
10248
10249                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10250
10251                 if ((page_off == 0) || (i == 0))
10252                         nvram_cmd |= NVRAM_CMD_FIRST;
10253                 if (page_off == (tp->nvram_pagesize - 4))
10254                         nvram_cmd |= NVRAM_CMD_LAST;
10255
10256                 if (i == (len - 4))
10257                         nvram_cmd |= NVRAM_CMD_LAST;
10258
10259                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10260                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10261                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10262                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10263                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10264                     (tp->nvram_jedecnum == JEDEC_ST) &&
10265                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10266
10267                         if ((ret = tg3_nvram_exec_cmd(tp,
10268                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10269                                 NVRAM_CMD_DONE)))
10270
10271                                 break;
10272                 }
10273                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10274                         /* We always do complete word writes to eeprom. */
10275                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10276                 }
10277
10278                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10279                         break;
10280         }
10281         return ret;
10282 }
10283
10284 /* offset and length are dword aligned */
10285 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10286 {
10287         int ret;
10288
10289         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10290                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10291                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10292                 udelay(40);
10293         }
10294
10295         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10296                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10297         }
10298         else {
10299                 u32 grc_mode;
10300
10301                 ret = tg3_nvram_lock(tp);
10302                 if (ret)
10303                         return ret;
10304
10305                 tg3_enable_nvram_access(tp);
10306                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10307                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10308                         tw32(NVRAM_WRITE1, 0x406);
10309
10310                 grc_mode = tr32(GRC_MODE);
10311                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10312
10313                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10314                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10315
10316                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10317                                 buf);
10318                 }
10319                 else {
10320                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10321                                 buf);
10322                 }
10323
10324                 grc_mode = tr32(GRC_MODE);
10325                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10326
10327                 tg3_disable_nvram_access(tp);
10328                 tg3_nvram_unlock(tp);
10329         }
10330
10331         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10332                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10333                 udelay(40);
10334         }
10335
10336         return ret;
10337 }
10338
10339 struct subsys_tbl_ent {
10340         u16 subsys_vendor, subsys_devid;
10341         u32 phy_id;
10342 };
10343
10344 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10345         /* Broadcom boards. */
10346         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10347         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10348         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10349         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10350         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10351         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10352         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10353         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10354         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10355         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10356         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10357
10358         /* 3com boards. */
10359         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10360         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10361         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10362         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10363         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10364
10365         /* DELL boards. */
10366         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10367         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10368         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10369         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10370
10371         /* Compaq boards. */
10372         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10373         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10374         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10375         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10376         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10377
10378         /* IBM boards. */
10379         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10380 };
10381
10382 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10383 {
10384         int i;
10385
10386         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10387                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10388                      tp->pdev->subsystem_vendor) &&
10389                     (subsys_id_to_phy_id[i].subsys_devid ==
10390                      tp->pdev->subsystem_device))
10391                         return &subsys_id_to_phy_id[i];
10392         }
10393         return NULL;
10394 }
10395
10396 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10397 {
10398         u32 val;
10399         u16 pmcsr;
10400
10401         /* On some early chips the SRAM cannot be accessed in D3hot state,
10402          * so need make sure we're in D0.
10403          */
10404         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10405         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10406         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10407         msleep(1);
10408
10409         /* Make sure register accesses (indirect or otherwise)
10410          * will function correctly.
10411          */
10412         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10413                                tp->misc_host_ctrl);
10414
10415         /* The memory arbiter has to be enabled in order for SRAM accesses
10416          * to succeed.  Normally on powerup the tg3 chip firmware will make
10417          * sure it is enabled, but other entities such as system netboot
10418          * code might disable it.
10419          */
10420         val = tr32(MEMARB_MODE);
10421         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10422
10423         tp->phy_id = PHY_ID_INVALID;
10424         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10425
10426         /* Assume an onboard device and WOL capable by default.  */
10427         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10428
10429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10430                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10431                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10432                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10433                 }
10434                 val = tr32(VCPU_CFGSHDW);
10435                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10436                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10437                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10438                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10439                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10440                 return;
10441         }
10442
10443         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10444         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10445                 u32 nic_cfg, led_cfg;
10446                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10447                 int eeprom_phy_serdes = 0;
10448
10449                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10450                 tp->nic_sram_data_cfg = nic_cfg;
10451
10452                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10453                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10454                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10455                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10456                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10457                     (ver > 0) && (ver < 0x100))
10458                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10459
10460                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10461                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10462                         eeprom_phy_serdes = 1;
10463
10464                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10465                 if (nic_phy_id != 0) {
10466                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10467                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10468
10469                         eeprom_phy_id  = (id1 >> 16) << 10;
10470                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10471                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10472                 } else
10473                         eeprom_phy_id = 0;
10474
10475                 tp->phy_id = eeprom_phy_id;
10476                 if (eeprom_phy_serdes) {
10477                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10478                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10479                         else
10480                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10481                 }
10482
10483                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10484                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10485                                     SHASTA_EXT_LED_MODE_MASK);
10486                 else
10487                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10488
10489                 switch (led_cfg) {
10490                 default:
10491                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10492                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10493                         break;
10494
10495                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10496                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10497                         break;
10498
10499                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10500                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10501
10502                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10503                          * read on some older 5700/5701 bootcode.
10504                          */
10505                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10506                             ASIC_REV_5700 ||
10507                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10508                             ASIC_REV_5701)
10509                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10510
10511                         break;
10512
10513                 case SHASTA_EXT_LED_SHARED:
10514                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10515                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10516                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10517                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10518                                                  LED_CTRL_MODE_PHY_2);
10519                         break;
10520
10521                 case SHASTA_EXT_LED_MAC:
10522                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10523                         break;
10524
10525                 case SHASTA_EXT_LED_COMBO:
10526                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10527                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10528                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10529                                                  LED_CTRL_MODE_PHY_2);
10530                         break;
10531
10532                 };
10533
10534                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10535                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10536                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10537                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10538
10539                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10540                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10541                         if ((tp->pdev->subsystem_vendor ==
10542                              PCI_VENDOR_ID_ARIMA) &&
10543                             (tp->pdev->subsystem_device == 0x205a ||
10544                              tp->pdev->subsystem_device == 0x2063))
10545                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10546                 } else {
10547                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10548                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10549                 }
10550
10551                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10552                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10553                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10554                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10555                 }
10556                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10557                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10558                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10559                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10560                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10561
10562                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10563                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10564                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10565
10566                 if (cfg2 & (1 << 17))
10567                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10568
10569                 /* serdes signal pre-emphasis in register 0x590 set by */
10570                 /* bootcode if bit 18 is set */
10571                 if (cfg2 & (1 << 18))
10572                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10573
10574                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10575                         u32 cfg3;
10576
10577                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10578                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10579                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10580                 }
10581         }
10582 }
10583
10584 static int __devinit tg3_phy_probe(struct tg3 *tp)
10585 {
10586         u32 hw_phy_id_1, hw_phy_id_2;
10587         u32 hw_phy_id, hw_phy_id_masked;
10588         int err;
10589
10590         /* Reading the PHY ID register can conflict with ASF
10591          * firwmare access to the PHY hardware.
10592          */
10593         err = 0;
10594         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10595             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10596                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10597         } else {
10598                 /* Now read the physical PHY_ID from the chip and verify
10599                  * that it is sane.  If it doesn't look good, we fall back
10600                  * to either the hard-coded table based PHY_ID and failing
10601                  * that the value found in the eeprom area.
10602                  */
10603                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10604                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10605
10606                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10607                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10608                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10609
10610                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10611         }
10612
10613         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10614                 tp->phy_id = hw_phy_id;
10615                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10616                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10617                 else
10618                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10619         } else {
10620                 if (tp->phy_id != PHY_ID_INVALID) {
10621                         /* Do nothing, phy ID already set up in
10622                          * tg3_get_eeprom_hw_cfg().
10623                          */
10624                 } else {
10625                         struct subsys_tbl_ent *p;
10626
10627                         /* No eeprom signature?  Try the hardcoded
10628                          * subsys device table.
10629                          */
10630                         p = lookup_by_subsys(tp);
10631                         if (!p)
10632                                 return -ENODEV;
10633
10634                         tp->phy_id = p->phy_id;
10635                         if (!tp->phy_id ||
10636                             tp->phy_id == PHY_ID_BCM8002)
10637                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10638                 }
10639         }
10640
10641         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10642             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10643             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10644                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10645
10646                 tg3_readphy(tp, MII_BMSR, &bmsr);
10647                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10648                     (bmsr & BMSR_LSTATUS))
10649                         goto skip_phy_reset;
10650
10651                 err = tg3_phy_reset(tp);
10652                 if (err)
10653                         return err;
10654
10655                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10656                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10657                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10658                 tg3_ctrl = 0;
10659                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10660                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10661                                     MII_TG3_CTRL_ADV_1000_FULL);
10662                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10663                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10664                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10665                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10666                 }
10667
10668                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10669                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10670                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10671                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10672                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10673
10674                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10675                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10676
10677                         tg3_writephy(tp, MII_BMCR,
10678                                      BMCR_ANENABLE | BMCR_ANRESTART);
10679                 }
10680                 tg3_phy_set_wirespeed(tp);
10681
10682                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10683                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10684                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10685         }
10686
10687 skip_phy_reset:
10688         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10689                 err = tg3_init_5401phy_dsp(tp);
10690                 if (err)
10691                         return err;
10692         }
10693
10694         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10695                 err = tg3_init_5401phy_dsp(tp);
10696         }
10697
10698         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10699                 tp->link_config.advertising =
10700                         (ADVERTISED_1000baseT_Half |
10701                          ADVERTISED_1000baseT_Full |
10702                          ADVERTISED_Autoneg |
10703                          ADVERTISED_FIBRE);
10704         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10705                 tp->link_config.advertising &=
10706                         ~(ADVERTISED_1000baseT_Half |
10707                           ADVERTISED_1000baseT_Full);
10708
10709         return err;
10710 }
10711
10712 static void __devinit tg3_read_partno(struct tg3 *tp)
10713 {
10714         unsigned char vpd_data[256];
10715         unsigned int i;
10716         u32 magic;
10717
10718         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10719                 goto out_not_found;
10720
10721         if (magic == TG3_EEPROM_MAGIC) {
10722                 for (i = 0; i < 256; i += 4) {
10723                         u32 tmp;
10724
10725                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10726                                 goto out_not_found;
10727
10728                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10729                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10730                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10731                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10732                 }
10733         } else {
10734                 int vpd_cap;
10735
10736                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10737                 for (i = 0; i < 256; i += 4) {
10738                         u32 tmp, j = 0;
10739                         u16 tmp16;
10740
10741                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10742                                               i);
10743                         while (j++ < 100) {
10744                                 pci_read_config_word(tp->pdev, vpd_cap +
10745                                                      PCI_VPD_ADDR, &tmp16);
10746                                 if (tmp16 & 0x8000)
10747                                         break;
10748                                 msleep(1);
10749                         }
10750                         if (!(tmp16 & 0x8000))
10751                                 goto out_not_found;
10752
10753                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10754                                               &tmp);
10755                         tmp = cpu_to_le32(tmp);
10756                         memcpy(&vpd_data[i], &tmp, 4);
10757                 }
10758         }
10759
10760         /* Now parse and find the part number. */
10761         for (i = 0; i < 254; ) {
10762                 unsigned char val = vpd_data[i];
10763                 unsigned int block_end;
10764
10765                 if (val == 0x82 || val == 0x91) {
10766                         i = (i + 3 +
10767                              (vpd_data[i + 1] +
10768                               (vpd_data[i + 2] << 8)));
10769                         continue;
10770                 }
10771
10772                 if (val != 0x90)
10773                         goto out_not_found;
10774
10775                 block_end = (i + 3 +
10776                              (vpd_data[i + 1] +
10777                               (vpd_data[i + 2] << 8)));
10778                 i += 3;
10779
10780                 if (block_end > 256)
10781                         goto out_not_found;
10782
10783                 while (i < (block_end - 2)) {
10784                         if (vpd_data[i + 0] == 'P' &&
10785                             vpd_data[i + 1] == 'N') {
10786                                 int partno_len = vpd_data[i + 2];
10787
10788                                 i += 3;
10789                                 if (partno_len > 24 || (partno_len + i) > 256)
10790                                         goto out_not_found;
10791
10792                                 memcpy(tp->board_part_number,
10793                                        &vpd_data[i], partno_len);
10794
10795                                 /* Success. */
10796                                 return;
10797                         }
10798                         i += 3 + vpd_data[i + 2];
10799                 }
10800
10801                 /* Part number not found. */
10802                 goto out_not_found;
10803         }
10804
10805 out_not_found:
10806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10807                 strcpy(tp->board_part_number, "BCM95906");
10808         else
10809                 strcpy(tp->board_part_number, "none");
10810 }
10811
10812 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10813 {
10814         u32 val, offset, start;
10815
10816         if (tg3_nvram_read_swab(tp, 0, &val))
10817                 return;
10818
10819         if (val != TG3_EEPROM_MAGIC)
10820                 return;
10821
10822         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10823             tg3_nvram_read_swab(tp, 0x4, &start))
10824                 return;
10825
10826         offset = tg3_nvram_logical_addr(tp, offset);
10827         if (tg3_nvram_read_swab(tp, offset, &val))
10828                 return;
10829
10830         if ((val & 0xfc000000) == 0x0c000000) {
10831                 u32 ver_offset, addr;
10832                 int i;
10833
10834                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10835                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10836                         return;
10837
10838                 if (val != 0)
10839                         return;
10840
10841                 addr = offset + ver_offset - start;
10842                 for (i = 0; i < 16; i += 4) {
10843                         if (tg3_nvram_read(tp, addr + i, &val))
10844                                 return;
10845
10846                         val = cpu_to_le32(val);
10847                         memcpy(tp->fw_ver + i, &val, 4);
10848                 }
10849         }
10850 }
10851
10852 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10853
10854 static int __devinit tg3_get_invariants(struct tg3 *tp)
10855 {
10856         static struct pci_device_id write_reorder_chipsets[] = {
10857                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10858                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10859                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10860                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10861                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10862                              PCI_DEVICE_ID_VIA_8385_0) },
10863                 { },
10864         };
10865         u32 misc_ctrl_reg;
10866         u32 cacheline_sz_reg;
10867         u32 pci_state_reg, grc_misc_cfg;
10868         u32 val;
10869         u16 pci_cmd;
10870         int err, pcie_cap;
10871
10872         /* Force memory write invalidate off.  If we leave it on,
10873          * then on 5700_BX chips we have to enable a workaround.
10874          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10875          * to match the cacheline size.  The Broadcom driver have this
10876          * workaround but turns MWI off all the times so never uses
10877          * it.  This seems to suggest that the workaround is insufficient.
10878          */
10879         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10880         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10881         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10882
10883         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10884          * has the register indirect write enable bit set before
10885          * we try to access any of the MMIO registers.  It is also
10886          * critical that the PCI-X hw workaround situation is decided
10887          * before that as well.
10888          */
10889         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10890                               &misc_ctrl_reg);
10891
10892         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10893                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10895                 u32 prod_id_asic_rev;
10896
10897                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10898                                       &prod_id_asic_rev);
10899                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10900         }
10901
10902         /* Wrong chip ID in 5752 A0. This code can be removed later
10903          * as A0 is not in production.
10904          */
10905         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10906                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10907
10908         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10909          * we need to disable memory and use config. cycles
10910          * only to access all registers. The 5702/03 chips
10911          * can mistakenly decode the special cycles from the
10912          * ICH chipsets as memory write cycles, causing corruption
10913          * of register and memory space. Only certain ICH bridges
10914          * will drive special cycles with non-zero data during the
10915          * address phase which can fall within the 5703's address
10916          * range. This is not an ICH bug as the PCI spec allows
10917          * non-zero address during special cycles. However, only
10918          * these ICH bridges are known to drive non-zero addresses
10919          * during special cycles.
10920          *
10921          * Since special cycles do not cross PCI bridges, we only
10922          * enable this workaround if the 5703 is on the secondary
10923          * bus of these ICH bridges.
10924          */
10925         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10926             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10927                 static struct tg3_dev_id {
10928                         u32     vendor;
10929                         u32     device;
10930                         u32     rev;
10931                 } ich_chipsets[] = {
10932                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10933                           PCI_ANY_ID },
10934                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10935                           PCI_ANY_ID },
10936                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10937                           0xa },
10938                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10939                           PCI_ANY_ID },
10940                         { },
10941                 };
10942                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10943                 struct pci_dev *bridge = NULL;
10944
10945                 while (pci_id->vendor != 0) {
10946                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10947                                                 bridge);
10948                         if (!bridge) {
10949                                 pci_id++;
10950                                 continue;
10951                         }
10952                         if (pci_id->rev != PCI_ANY_ID) {
10953                                 if (bridge->revision > pci_id->rev)
10954                                         continue;
10955                         }
10956                         if (bridge->subordinate &&
10957                             (bridge->subordinate->number ==
10958                              tp->pdev->bus->number)) {
10959
10960                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10961                                 pci_dev_put(bridge);
10962                                 break;
10963                         }
10964                 }
10965         }
10966
10967         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10968          * DMA addresses > 40-bit. This bridge may have other additional
10969          * 57xx devices behind it in some 4-port NIC designs for example.
10970          * Any tg3 device found behind the bridge will also need the 40-bit
10971          * DMA workaround.
10972          */
10973         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10975                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10976                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10977                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10978         }
10979         else {
10980                 struct pci_dev *bridge = NULL;
10981
10982                 do {
10983                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10984                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10985                                                 bridge);
10986                         if (bridge && bridge->subordinate &&
10987                             (bridge->subordinate->number <=
10988                              tp->pdev->bus->number) &&
10989                             (bridge->subordinate->subordinate >=
10990                              tp->pdev->bus->number)) {
10991                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10992                                 pci_dev_put(bridge);
10993                                 break;
10994                         }
10995                 } while (bridge);
10996         }
10997
10998         /* Initialize misc host control in PCI block. */
10999         tp->misc_host_ctrl |= (misc_ctrl_reg &
11000                                MISC_HOST_CTRL_CHIPREV);
11001         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11002                                tp->misc_host_ctrl);
11003
11004         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11005                               &cacheline_sz_reg);
11006
11007         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11008         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11009         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11010         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11011
11012         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11013             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11014                 tp->pdev_peer = tg3_find_peer(tp);
11015
11016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11018             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11023             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11024                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11025
11026         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11027             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11028                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11029
11030         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11031                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11032                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11033                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11034                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11035                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11036                      tp->pdev_peer == tp->pdev))
11037                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11038
11039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11040                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11041                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11042                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11043                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11044                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11045                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11046                 } else {
11047                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11048                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11049                                 ASIC_REV_5750 &&
11050                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11051                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11052                 }
11053         }
11054
11055         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11056             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11057             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11058             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11059             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11060             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11061             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11062             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11063                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11064
11065         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11066         if (pcie_cap != 0) {
11067                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11068                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11069                         u16 lnkctl;
11070
11071                         pci_read_config_word(tp->pdev,
11072                                              pcie_cap + PCI_EXP_LNKCTL,
11073                                              &lnkctl);
11074                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11075                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11076                 }
11077         }
11078
11079         /* If we have an AMD 762 or VIA K8T800 chipset, write
11080          * reordering to the mailbox registers done by the host
11081          * controller can cause major troubles.  We read back from
11082          * every mailbox register write to force the writes to be
11083          * posted to the chip in order.
11084          */
11085         if (pci_dev_present(write_reorder_chipsets) &&
11086             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11087                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11088
11089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11090             tp->pci_lat_timer < 64) {
11091                 tp->pci_lat_timer = 64;
11092
11093                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11094                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11095                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11096                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11097
11098                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11099                                        cacheline_sz_reg);
11100         }
11101
11102         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11103             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11104                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11105                 if (!tp->pcix_cap) {
11106                         printk(KERN_ERR PFX "Cannot find PCI-X "
11107                                             "capability, aborting.\n");
11108                         return -EIO;
11109                 }
11110         }
11111
11112         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11113                               &pci_state_reg);
11114
11115         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11116                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11117
11118                 /* If this is a 5700 BX chipset, and we are in PCI-X
11119                  * mode, enable register write workaround.
11120                  *
11121                  * The workaround is to use indirect register accesses
11122                  * for all chip writes not to mailbox registers.
11123                  */
11124                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11125                         u32 pm_reg;
11126
11127                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11128
11129                         /* The chip can have it's power management PCI config
11130                          * space registers clobbered due to this bug.
11131                          * So explicitly force the chip into D0 here.
11132                          */
11133                         pci_read_config_dword(tp->pdev,
11134                                               tp->pm_cap + PCI_PM_CTRL,
11135                                               &pm_reg);
11136                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11137                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11138                         pci_write_config_dword(tp->pdev,
11139                                                tp->pm_cap + PCI_PM_CTRL,
11140                                                pm_reg);
11141
11142                         /* Also, force SERR#/PERR# in PCI command. */
11143                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11144                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11145                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11146                 }
11147         }
11148
11149         /* 5700 BX chips need to have their TX producer index mailboxes
11150          * written twice to workaround a bug.
11151          */
11152         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11153                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11154
11155         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11156                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11157         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11158                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11159
11160         /* Chip-specific fixup from Broadcom driver */
11161         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11162             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11163                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11164                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11165         }
11166
11167         /* Default fast path register access methods */
11168         tp->read32 = tg3_read32;
11169         tp->write32 = tg3_write32;
11170         tp->read32_mbox = tg3_read32;
11171         tp->write32_mbox = tg3_write32;
11172         tp->write32_tx_mbox = tg3_write32;
11173         tp->write32_rx_mbox = tg3_write32;
11174
11175         /* Various workaround register access methods */
11176         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11177                 tp->write32 = tg3_write_indirect_reg32;
11178         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11179                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11180                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11181                 /*
11182                  * Back to back register writes can cause problems on these
11183                  * chips, the workaround is to read back all reg writes
11184                  * except those to mailbox regs.
11185                  *
11186                  * See tg3_write_indirect_reg32().
11187                  */
11188                 tp->write32 = tg3_write_flush_reg32;
11189         }
11190
11191
11192         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11193             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11194                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11195                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11196                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11197         }
11198
11199         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11200                 tp->read32 = tg3_read_indirect_reg32;
11201                 tp->write32 = tg3_write_indirect_reg32;
11202                 tp->read32_mbox = tg3_read_indirect_mbox;
11203                 tp->write32_mbox = tg3_write_indirect_mbox;
11204                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11205                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11206
11207                 iounmap(tp->regs);
11208                 tp->regs = NULL;
11209
11210                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11211                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11212                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11213         }
11214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11215                 tp->read32_mbox = tg3_read32_mbox_5906;
11216                 tp->write32_mbox = tg3_write32_mbox_5906;
11217                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11218                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11219         }
11220
11221         if (tp->write32 == tg3_write_indirect_reg32 ||
11222             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11223              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11224               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11225                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11226
11227         /* Get eeprom hw config before calling tg3_set_power_state().
11228          * In particular, the TG3_FLG2_IS_NIC flag must be
11229          * determined before calling tg3_set_power_state() so that
11230          * we know whether or not to switch out of Vaux power.
11231          * When the flag is set, it means that GPIO1 is used for eeprom
11232          * write protect and also implies that it is a LOM where GPIOs
11233          * are not used to switch power.
11234          */
11235         tg3_get_eeprom_hw_cfg(tp);
11236
11237         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11238                 /* Allow reads and writes to the
11239                  * APE register and memory space.
11240                  */
11241                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11242                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11243                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11244                                        pci_state_reg);
11245         }
11246
11247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11249                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11250
11251         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11252          * GPIO1 driven high will bring 5700's external PHY out of reset.
11253          * It is also used as eeprom write protect on LOMs.
11254          */
11255         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11256         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11257             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11258                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11259                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11260         /* Unused GPIO3 must be driven as output on 5752 because there
11261          * are no pull-up resistors on unused GPIO pins.
11262          */
11263         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11264                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11265
11266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11267                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11268
11269         /* Force the chip into D0. */
11270         err = tg3_set_power_state(tp, PCI_D0);
11271         if (err) {
11272                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11273                        pci_name(tp->pdev));
11274                 return err;
11275         }
11276
11277         /* 5700 B0 chips do not support checksumming correctly due
11278          * to hardware bugs.
11279          */
11280         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11281                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11282
11283         /* Derive initial jumbo mode from MTU assigned in
11284          * ether_setup() via the alloc_etherdev() call
11285          */
11286         if (tp->dev->mtu > ETH_DATA_LEN &&
11287             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11288                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11289
11290         /* Determine WakeOnLan speed to use. */
11291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11292             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11293             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11294             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11295                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11296         } else {
11297                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11298         }
11299
11300         /* A few boards don't want Ethernet@WireSpeed phy feature */
11301         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11302             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11303              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11304              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11305             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11306             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11307                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11308
11309         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11310             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11311                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11312         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11313                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11314
11315         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11316                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11317                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11318                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11319                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11320                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11321                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11322                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11323                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11324                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11325                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11326                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11327         }
11328
11329         tp->coalesce_mode = 0;
11330         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11331             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11332                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11333
11334         /* Initialize MAC MI mode, polling disabled. */
11335         tw32_f(MAC_MI_MODE, tp->mi_mode);
11336         udelay(80);
11337
11338         /* Initialize data/descriptor byte/word swapping. */
11339         val = tr32(GRC_MODE);
11340         val &= GRC_MODE_HOST_STACKUP;
11341         tw32(GRC_MODE, val | tp->grc_mode);
11342
11343         tg3_switch_clocks(tp);
11344
11345         /* Clear this out for sanity. */
11346         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11347
11348         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11349                               &pci_state_reg);
11350         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11351             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11352                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11353
11354                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11355                     chiprevid == CHIPREV_ID_5701_B0 ||
11356                     chiprevid == CHIPREV_ID_5701_B2 ||
11357                     chiprevid == CHIPREV_ID_5701_B5) {
11358                         void __iomem *sram_base;
11359
11360                         /* Write some dummy words into the SRAM status block
11361                          * area, see if it reads back correctly.  If the return
11362                          * value is bad, force enable the PCIX workaround.
11363                          */
11364                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11365
11366                         writel(0x00000000, sram_base);
11367                         writel(0x00000000, sram_base + 4);
11368                         writel(0xffffffff, sram_base + 4);
11369                         if (readl(sram_base) != 0x00000000)
11370                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11371                 }
11372         }
11373
11374         udelay(50);
11375         tg3_nvram_init(tp);
11376
11377         grc_misc_cfg = tr32(GRC_MISC_CFG);
11378         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11379
11380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11381             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11382              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11383                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11384
11385         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11386             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11387                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11388         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11389                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11390                                       HOSTCC_MODE_CLRTICK_TXBD);
11391
11392                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11393                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11394                                        tp->misc_host_ctrl);
11395         }
11396
11397         /* these are limited to 10/100 only */
11398         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11399              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11400             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11401              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11402              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11403               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11404               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11405             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11406              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11407               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11408               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11409             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11410                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11411
11412         err = tg3_phy_probe(tp);
11413         if (err) {
11414                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11415                        pci_name(tp->pdev), err);
11416                 /* ... but do not return immediately ... */
11417         }
11418
11419         tg3_read_partno(tp);
11420         tg3_read_fw_ver(tp);
11421
11422         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11423                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11424         } else {
11425                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11426                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11427                 else
11428                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11429         }
11430
11431         /* 5700 {AX,BX} chips have a broken status block link
11432          * change bit implementation, so we must use the
11433          * status register in those cases.
11434          */
11435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11436                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11437         else
11438                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11439
11440         /* The led_ctrl is set during tg3_phy_probe, here we might
11441          * have to force the link status polling mechanism based
11442          * upon subsystem IDs.
11443          */
11444         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11446             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11447                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11448                                   TG3_FLAG_USE_LINKCHG_REG);
11449         }
11450
11451         /* For all SERDES we poll the MAC status register. */
11452         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11453                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11454         else
11455                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11456
11457         /* All chips before 5787 can get confused if TX buffers
11458          * straddle the 4GB address boundary in some cases.
11459          */
11460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11461             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11463             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11465                 tp->dev->hard_start_xmit = tg3_start_xmit;
11466         else
11467                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11468
11469         tp->rx_offset = 2;
11470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11471             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11472                 tp->rx_offset = 0;
11473
11474         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11475
11476         /* Increment the rx prod index on the rx std ring by at most
11477          * 8 for these chips to workaround hw errata.
11478          */
11479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11482                 tp->rx_std_max_post = 8;
11483
11484         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11485                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11486                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11487
11488         return err;
11489 }
11490
11491 #ifdef CONFIG_SPARC
11492 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11493 {
11494         struct net_device *dev = tp->dev;
11495         struct pci_dev *pdev = tp->pdev;
11496         struct device_node *dp = pci_device_to_OF_node(pdev);
11497         const unsigned char *addr;
11498         int len;
11499
11500         addr = of_get_property(dp, "local-mac-address", &len);
11501         if (addr && len == 6) {
11502                 memcpy(dev->dev_addr, addr, 6);
11503                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11504                 return 0;
11505         }
11506         return -ENODEV;
11507 }
11508
11509 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11510 {
11511         struct net_device *dev = tp->dev;
11512
11513         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11514         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11515         return 0;
11516 }
11517 #endif
11518
11519 static int __devinit tg3_get_device_address(struct tg3 *tp)
11520 {
11521         struct net_device *dev = tp->dev;
11522         u32 hi, lo, mac_offset;
11523         int addr_ok = 0;
11524
11525 #ifdef CONFIG_SPARC
11526         if (!tg3_get_macaddr_sparc(tp))
11527                 return 0;
11528 #endif
11529
11530         mac_offset = 0x7c;
11531         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11532             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11533                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11534                         mac_offset = 0xcc;
11535                 if (tg3_nvram_lock(tp))
11536                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11537                 else
11538                         tg3_nvram_unlock(tp);
11539         }
11540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11541                 mac_offset = 0x10;
11542
11543         /* First try to get it from MAC address mailbox. */
11544         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11545         if ((hi >> 16) == 0x484b) {
11546                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11547                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11548
11549                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11550                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11551                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11552                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11553                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11554
11555                 /* Some old bootcode may report a 0 MAC address in SRAM */
11556                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11557         }
11558         if (!addr_ok) {
11559                 /* Next, try NVRAM. */
11560                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11561                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11562                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11563                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11564                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11565                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11566                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11567                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11568                 }
11569                 /* Finally just fetch it out of the MAC control regs. */
11570                 else {
11571                         hi = tr32(MAC_ADDR_0_HIGH);
11572                         lo = tr32(MAC_ADDR_0_LOW);
11573
11574                         dev->dev_addr[5] = lo & 0xff;
11575                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11576                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11577                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11578                         dev->dev_addr[1] = hi & 0xff;
11579                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11580                 }
11581         }
11582
11583         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11584 #ifdef CONFIG_SPARC64
11585                 if (!tg3_get_default_macaddr_sparc(tp))
11586                         return 0;
11587 #endif
11588                 return -EINVAL;
11589         }
11590         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11591         return 0;
11592 }
11593
11594 #define BOUNDARY_SINGLE_CACHELINE       1
11595 #define BOUNDARY_MULTI_CACHELINE        2
11596
11597 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11598 {
11599         int cacheline_size;
11600         u8 byte;
11601         int goal;
11602
11603         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11604         if (byte == 0)
11605                 cacheline_size = 1024;
11606         else
11607                 cacheline_size = (int) byte * 4;
11608
11609         /* On 5703 and later chips, the boundary bits have no
11610          * effect.
11611          */
11612         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11613             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11614             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11615                 goto out;
11616
11617 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11618         goal = BOUNDARY_MULTI_CACHELINE;
11619 #else
11620 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11621         goal = BOUNDARY_SINGLE_CACHELINE;
11622 #else
11623         goal = 0;
11624 #endif
11625 #endif
11626
11627         if (!goal)
11628                 goto out;
11629
11630         /* PCI controllers on most RISC systems tend to disconnect
11631          * when a device tries to burst across a cache-line boundary.
11632          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11633          *
11634          * Unfortunately, for PCI-E there are only limited
11635          * write-side controls for this, and thus for reads
11636          * we will still get the disconnects.  We'll also waste
11637          * these PCI cycles for both read and write for chips
11638          * other than 5700 and 5701 which do not implement the
11639          * boundary bits.
11640          */
11641         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11642             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11643                 switch (cacheline_size) {
11644                 case 16:
11645                 case 32:
11646                 case 64:
11647                 case 128:
11648                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11649                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11650                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11651                         } else {
11652                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11653                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11654                         }
11655                         break;
11656
11657                 case 256:
11658                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11659                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11660                         break;
11661
11662                 default:
11663                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11664                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11665                         break;
11666                 };
11667         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11668                 switch (cacheline_size) {
11669                 case 16:
11670                 case 32:
11671                 case 64:
11672                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11673                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11674                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11675                                 break;
11676                         }
11677                         /* fallthrough */
11678                 case 128:
11679                 default:
11680                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11681                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11682                         break;
11683                 };
11684         } else {
11685                 switch (cacheline_size) {
11686                 case 16:
11687                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11688                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11689                                         DMA_RWCTRL_WRITE_BNDRY_16);
11690                                 break;
11691                         }
11692                         /* fallthrough */
11693                 case 32:
11694                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11695                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11696                                         DMA_RWCTRL_WRITE_BNDRY_32);
11697                                 break;
11698                         }
11699                         /* fallthrough */
11700                 case 64:
11701                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11702                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11703                                         DMA_RWCTRL_WRITE_BNDRY_64);
11704                                 break;
11705                         }
11706                         /* fallthrough */
11707                 case 128:
11708                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11709                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11710                                         DMA_RWCTRL_WRITE_BNDRY_128);
11711                                 break;
11712                         }
11713                         /* fallthrough */
11714                 case 256:
11715                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11716                                 DMA_RWCTRL_WRITE_BNDRY_256);
11717                         break;
11718                 case 512:
11719                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11720                                 DMA_RWCTRL_WRITE_BNDRY_512);
11721                         break;
11722                 case 1024:
11723                 default:
11724                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11725                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11726                         break;
11727                 };
11728         }
11729
11730 out:
11731         return val;
11732 }
11733
11734 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11735 {
11736         struct tg3_internal_buffer_desc test_desc;
11737         u32 sram_dma_descs;
11738         int i, ret;
11739
11740         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11741
11742         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11743         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11744         tw32(RDMAC_STATUS, 0);
11745         tw32(WDMAC_STATUS, 0);
11746
11747         tw32(BUFMGR_MODE, 0);
11748         tw32(FTQ_RESET, 0);
11749
11750         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11751         test_desc.addr_lo = buf_dma & 0xffffffff;
11752         test_desc.nic_mbuf = 0x00002100;
11753         test_desc.len = size;
11754
11755         /*
11756          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11757          * the *second* time the tg3 driver was getting loaded after an
11758          * initial scan.
11759          *
11760          * Broadcom tells me:
11761          *   ...the DMA engine is connected to the GRC block and a DMA
11762          *   reset may affect the GRC block in some unpredictable way...
11763          *   The behavior of resets to individual blocks has not been tested.
11764          *
11765          * Broadcom noted the GRC reset will also reset all sub-components.
11766          */
11767         if (to_device) {
11768                 test_desc.cqid_sqid = (13 << 8) | 2;
11769
11770                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11771                 udelay(40);
11772         } else {
11773                 test_desc.cqid_sqid = (16 << 8) | 7;
11774
11775                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11776                 udelay(40);
11777         }
11778         test_desc.flags = 0x00000005;
11779
11780         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11781                 u32 val;
11782
11783                 val = *(((u32 *)&test_desc) + i);
11784                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11785                                        sram_dma_descs + (i * sizeof(u32)));
11786                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11787         }
11788         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11789
11790         if (to_device) {
11791                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11792         } else {
11793                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11794         }
11795
11796         ret = -ENODEV;
11797         for (i = 0; i < 40; i++) {
11798                 u32 val;
11799
11800                 if (to_device)
11801                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11802                 else
11803                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11804                 if ((val & 0xffff) == sram_dma_descs) {
11805                         ret = 0;
11806                         break;
11807                 }
11808
11809                 udelay(100);
11810         }
11811
11812         return ret;
11813 }
11814
11815 #define TEST_BUFFER_SIZE        0x2000
11816
11817 static int __devinit tg3_test_dma(struct tg3 *tp)
11818 {
11819         dma_addr_t buf_dma;
11820         u32 *buf, saved_dma_rwctrl;
11821         int ret;
11822
11823         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11824         if (!buf) {
11825                 ret = -ENOMEM;
11826                 goto out_nofree;
11827         }
11828
11829         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11830                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11831
11832         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11833
11834         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11835                 /* DMA read watermark not used on PCIE */
11836                 tp->dma_rwctrl |= 0x00180000;
11837         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11838                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11839                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11840                         tp->dma_rwctrl |= 0x003f0000;
11841                 else
11842                         tp->dma_rwctrl |= 0x003f000f;
11843         } else {
11844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11845                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11846                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11847                         u32 read_water = 0x7;
11848
11849                         /* If the 5704 is behind the EPB bridge, we can
11850                          * do the less restrictive ONE_DMA workaround for
11851                          * better performance.
11852                          */
11853                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11854                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11855                                 tp->dma_rwctrl |= 0x8000;
11856                         else if (ccval == 0x6 || ccval == 0x7)
11857                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11858
11859                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11860                                 read_water = 4;
11861                         /* Set bit 23 to enable PCIX hw bug fix */
11862                         tp->dma_rwctrl |=
11863                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11864                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11865                                 (1 << 23);
11866                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11867                         /* 5780 always in PCIX mode */
11868                         tp->dma_rwctrl |= 0x00144000;
11869                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11870                         /* 5714 always in PCIX mode */
11871                         tp->dma_rwctrl |= 0x00148000;
11872                 } else {
11873                         tp->dma_rwctrl |= 0x001b000f;
11874                 }
11875         }
11876
11877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11879                 tp->dma_rwctrl &= 0xfffffff0;
11880
11881         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11882             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11883                 /* Remove this if it causes problems for some boards. */
11884                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11885
11886                 /* On 5700/5701 chips, we need to set this bit.
11887                  * Otherwise the chip will issue cacheline transactions
11888                  * to streamable DMA memory with not all the byte
11889                  * enables turned on.  This is an error on several
11890                  * RISC PCI controllers, in particular sparc64.
11891                  *
11892                  * On 5703/5704 chips, this bit has been reassigned
11893                  * a different meaning.  In particular, it is used
11894                  * on those chips to enable a PCI-X workaround.
11895                  */
11896                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11897         }
11898
11899         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11900
11901 #if 0
11902         /* Unneeded, already done by tg3_get_invariants.  */
11903         tg3_switch_clocks(tp);
11904 #endif
11905
11906         ret = 0;
11907         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11908             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11909                 goto out;
11910
11911         /* It is best to perform DMA test with maximum write burst size
11912          * to expose the 5700/5701 write DMA bug.
11913          */
11914         saved_dma_rwctrl = tp->dma_rwctrl;
11915         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11916         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11917
11918         while (1) {
11919                 u32 *p = buf, i;
11920
11921                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11922                         p[i] = i;
11923
11924                 /* Send the buffer to the chip. */
11925                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11926                 if (ret) {
11927                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11928                         break;
11929                 }
11930
11931 #if 0
11932                 /* validate data reached card RAM correctly. */
11933                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11934                         u32 val;
11935                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11936                         if (le32_to_cpu(val) != p[i]) {
11937                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11938                                 /* ret = -ENODEV here? */
11939                         }
11940                         p[i] = 0;
11941                 }
11942 #endif
11943                 /* Now read it back. */
11944                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11945                 if (ret) {
11946                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11947
11948                         break;
11949                 }
11950
11951                 /* Verify it. */
11952                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11953                         if (p[i] == i)
11954                                 continue;
11955
11956                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11957                             DMA_RWCTRL_WRITE_BNDRY_16) {
11958                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11959                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11960                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11961                                 break;
11962                         } else {
11963                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11964                                 ret = -ENODEV;
11965                                 goto out;
11966                         }
11967                 }
11968
11969                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11970                         /* Success. */
11971                         ret = 0;
11972                         break;
11973                 }
11974         }
11975         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11976             DMA_RWCTRL_WRITE_BNDRY_16) {
11977                 static struct pci_device_id dma_wait_state_chipsets[] = {
11978                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11979                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11980                         { },
11981                 };
11982
11983                 /* DMA test passed without adjusting DMA boundary,
11984                  * now look for chipsets that are known to expose the
11985                  * DMA bug without failing the test.
11986                  */
11987                 if (pci_dev_present(dma_wait_state_chipsets)) {
11988                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11989                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11990                 }
11991                 else
11992                         /* Safe to use the calculated DMA boundary. */
11993                         tp->dma_rwctrl = saved_dma_rwctrl;
11994
11995                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11996         }
11997
11998 out:
11999         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12000 out_nofree:
12001         return ret;
12002 }
12003
12004 static void __devinit tg3_init_link_config(struct tg3 *tp)
12005 {
12006         tp->link_config.advertising =
12007                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12008                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12009                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12010                  ADVERTISED_Autoneg | ADVERTISED_MII);
12011         tp->link_config.speed = SPEED_INVALID;
12012         tp->link_config.duplex = DUPLEX_INVALID;
12013         tp->link_config.autoneg = AUTONEG_ENABLE;
12014         tp->link_config.active_speed = SPEED_INVALID;
12015         tp->link_config.active_duplex = DUPLEX_INVALID;
12016         tp->link_config.phy_is_low_power = 0;
12017         tp->link_config.orig_speed = SPEED_INVALID;
12018         tp->link_config.orig_duplex = DUPLEX_INVALID;
12019         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12020 }
12021
12022 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12023 {
12024         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12025                 tp->bufmgr_config.mbuf_read_dma_low_water =
12026                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12027                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12028                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12029                 tp->bufmgr_config.mbuf_high_water =
12030                         DEFAULT_MB_HIGH_WATER_5705;
12031                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12032                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12033                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12034                         tp->bufmgr_config.mbuf_high_water =
12035                                 DEFAULT_MB_HIGH_WATER_5906;
12036                 }
12037
12038                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12039                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12040                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12041                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12042                 tp->bufmgr_config.mbuf_high_water_jumbo =
12043                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12044         } else {
12045                 tp->bufmgr_config.mbuf_read_dma_low_water =
12046                         DEFAULT_MB_RDMA_LOW_WATER;
12047                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12048                         DEFAULT_MB_MACRX_LOW_WATER;
12049                 tp->bufmgr_config.mbuf_high_water =
12050                         DEFAULT_MB_HIGH_WATER;
12051
12052                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12053                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12054                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12055                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12056                 tp->bufmgr_config.mbuf_high_water_jumbo =
12057                         DEFAULT_MB_HIGH_WATER_JUMBO;
12058         }
12059
12060         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12061         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12062 }
12063
12064 static char * __devinit tg3_phy_string(struct tg3 *tp)
12065 {
12066         switch (tp->phy_id & PHY_ID_MASK) {
12067         case PHY_ID_BCM5400:    return "5400";
12068         case PHY_ID_BCM5401:    return "5401";
12069         case PHY_ID_BCM5411:    return "5411";
12070         case PHY_ID_BCM5701:    return "5701";
12071         case PHY_ID_BCM5703:    return "5703";
12072         case PHY_ID_BCM5704:    return "5704";
12073         case PHY_ID_BCM5705:    return "5705";
12074         case PHY_ID_BCM5750:    return "5750";
12075         case PHY_ID_BCM5752:    return "5752";
12076         case PHY_ID_BCM5714:    return "5714";
12077         case PHY_ID_BCM5780:    return "5780";
12078         case PHY_ID_BCM5755:    return "5755";
12079         case PHY_ID_BCM5787:    return "5787";
12080         case PHY_ID_BCM5784:    return "5784";
12081         case PHY_ID_BCM5756:    return "5722/5756";
12082         case PHY_ID_BCM5906:    return "5906";
12083         case PHY_ID_BCM5761:    return "5761";
12084         case PHY_ID_BCM8002:    return "8002/serdes";
12085         case 0:                 return "serdes";
12086         default:                return "unknown";
12087         };
12088 }
12089
12090 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12091 {
12092         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12093                 strcpy(str, "PCI Express");
12094                 return str;
12095         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12096                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12097
12098                 strcpy(str, "PCIX:");
12099
12100                 if ((clock_ctrl == 7) ||
12101                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12102                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12103                         strcat(str, "133MHz");
12104                 else if (clock_ctrl == 0)
12105                         strcat(str, "33MHz");
12106                 else if (clock_ctrl == 2)
12107                         strcat(str, "50MHz");
12108                 else if (clock_ctrl == 4)
12109                         strcat(str, "66MHz");
12110                 else if (clock_ctrl == 6)
12111                         strcat(str, "100MHz");
12112         } else {
12113                 strcpy(str, "PCI:");
12114                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12115                         strcat(str, "66MHz");
12116                 else
12117                         strcat(str, "33MHz");
12118         }
12119         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12120                 strcat(str, ":32-bit");
12121         else
12122                 strcat(str, ":64-bit");
12123         return str;
12124 }
12125
12126 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12127 {
12128         struct pci_dev *peer;
12129         unsigned int func, devnr = tp->pdev->devfn & ~7;
12130
12131         for (func = 0; func < 8; func++) {
12132                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12133                 if (peer && peer != tp->pdev)
12134                         break;
12135                 pci_dev_put(peer);
12136         }
12137         /* 5704 can be configured in single-port mode, set peer to
12138          * tp->pdev in that case.
12139          */
12140         if (!peer) {
12141                 peer = tp->pdev;
12142                 return peer;
12143         }
12144
12145         /*
12146          * We don't need to keep the refcount elevated; there's no way
12147          * to remove one half of this device without removing the other
12148          */
12149         pci_dev_put(peer);
12150
12151         return peer;
12152 }
12153
12154 static void __devinit tg3_init_coal(struct tg3 *tp)
12155 {
12156         struct ethtool_coalesce *ec = &tp->coal;
12157
12158         memset(ec, 0, sizeof(*ec));
12159         ec->cmd = ETHTOOL_GCOALESCE;
12160         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12161         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12162         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12163         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12164         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12165         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12166         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12167         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12168         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12169
12170         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12171                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12172                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12173                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12174                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12175                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12176         }
12177
12178         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12179                 ec->rx_coalesce_usecs_irq = 0;
12180                 ec->tx_coalesce_usecs_irq = 0;
12181                 ec->stats_block_coalesce_usecs = 0;
12182         }
12183 }
12184
12185 static int __devinit tg3_init_one(struct pci_dev *pdev,
12186                                   const struct pci_device_id *ent)
12187 {
12188         static int tg3_version_printed = 0;
12189         unsigned long tg3reg_base, tg3reg_len;
12190         struct net_device *dev;
12191         struct tg3 *tp;
12192         int i, err, pm_cap;
12193         char str[40];
12194         u64 dma_mask, persist_dma_mask;
12195
12196         if (tg3_version_printed++ == 0)
12197                 printk(KERN_INFO "%s", version);
12198
12199         err = pci_enable_device(pdev);
12200         if (err) {
12201                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12202                        "aborting.\n");
12203                 return err;
12204         }
12205
12206         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12207                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12208                        "base address, aborting.\n");
12209                 err = -ENODEV;
12210                 goto err_out_disable_pdev;
12211         }
12212
12213         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12214         if (err) {
12215                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12216                        "aborting.\n");
12217                 goto err_out_disable_pdev;
12218         }
12219
12220         pci_set_master(pdev);
12221
12222         /* Find power-management capability. */
12223         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12224         if (pm_cap == 0) {
12225                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12226                        "aborting.\n");
12227                 err = -EIO;
12228                 goto err_out_free_res;
12229         }
12230
12231         tg3reg_base = pci_resource_start(pdev, 0);
12232         tg3reg_len = pci_resource_len(pdev, 0);
12233
12234         dev = alloc_etherdev(sizeof(*tp));
12235         if (!dev) {
12236                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12237                 err = -ENOMEM;
12238                 goto err_out_free_res;
12239         }
12240
12241         SET_NETDEV_DEV(dev, &pdev->dev);
12242
12243 #if TG3_VLAN_TAG_USED
12244         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12245         dev->vlan_rx_register = tg3_vlan_rx_register;
12246 #endif
12247
12248         tp = netdev_priv(dev);
12249         tp->pdev = pdev;
12250         tp->dev = dev;
12251         tp->pm_cap = pm_cap;
12252         tp->mac_mode = TG3_DEF_MAC_MODE;
12253         tp->rx_mode = TG3_DEF_RX_MODE;
12254         tp->tx_mode = TG3_DEF_TX_MODE;
12255         tp->mi_mode = MAC_MI_MODE_BASE;
12256         if (tg3_debug > 0)
12257                 tp->msg_enable = tg3_debug;
12258         else
12259                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12260
12261         /* The word/byte swap controls here control register access byte
12262          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12263          * setting below.
12264          */
12265         tp->misc_host_ctrl =
12266                 MISC_HOST_CTRL_MASK_PCI_INT |
12267                 MISC_HOST_CTRL_WORD_SWAP |
12268                 MISC_HOST_CTRL_INDIR_ACCESS |
12269                 MISC_HOST_CTRL_PCISTATE_RW;
12270
12271         /* The NONFRM (non-frame) byte/word swap controls take effect
12272          * on descriptor entries, anything which isn't packet data.
12273          *
12274          * The StrongARM chips on the board (one for tx, one for rx)
12275          * are running in big-endian mode.
12276          */
12277         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12278                         GRC_MODE_WSWAP_NONFRM_DATA);
12279 #ifdef __BIG_ENDIAN
12280         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12281 #endif
12282         spin_lock_init(&tp->lock);
12283         spin_lock_init(&tp->indirect_lock);
12284         INIT_WORK(&tp->reset_task, tg3_reset_task);
12285
12286         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12287         if (!tp->regs) {
12288                 printk(KERN_ERR PFX "Cannot map device registers, "
12289                        "aborting.\n");
12290                 err = -ENOMEM;
12291                 goto err_out_free_dev;
12292         }
12293
12294         tg3_init_link_config(tp);
12295
12296         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12297         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12298         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12299
12300         dev->open = tg3_open;
12301         dev->stop = tg3_close;
12302         dev->get_stats = tg3_get_stats;
12303         dev->set_multicast_list = tg3_set_rx_mode;
12304         dev->set_mac_address = tg3_set_mac_addr;
12305         dev->do_ioctl = tg3_ioctl;
12306         dev->tx_timeout = tg3_tx_timeout;
12307         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12308         dev->ethtool_ops = &tg3_ethtool_ops;
12309         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12310         dev->change_mtu = tg3_change_mtu;
12311         dev->irq = pdev->irq;
12312 #ifdef CONFIG_NET_POLL_CONTROLLER
12313         dev->poll_controller = tg3_poll_controller;
12314 #endif
12315
12316         err = tg3_get_invariants(tp);
12317         if (err) {
12318                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12319                        "aborting.\n");
12320                 goto err_out_iounmap;
12321         }
12322
12323         /* The EPB bridge inside 5714, 5715, and 5780 and any
12324          * device behind the EPB cannot support DMA addresses > 40-bit.
12325          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12326          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12327          * do DMA address check in tg3_start_xmit().
12328          */
12329         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12330                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12331         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12332                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12333 #ifdef CONFIG_HIGHMEM
12334                 dma_mask = DMA_64BIT_MASK;
12335 #endif
12336         } else
12337                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12338
12339         /* Configure DMA attributes. */
12340         if (dma_mask > DMA_32BIT_MASK) {
12341                 err = pci_set_dma_mask(pdev, dma_mask);
12342                 if (!err) {
12343                         dev->features |= NETIF_F_HIGHDMA;
12344                         err = pci_set_consistent_dma_mask(pdev,
12345                                                           persist_dma_mask);
12346                         if (err < 0) {
12347                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12348                                        "DMA for consistent allocations\n");
12349                                 goto err_out_iounmap;
12350                         }
12351                 }
12352         }
12353         if (err || dma_mask == DMA_32BIT_MASK) {
12354                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12355                 if (err) {
12356                         printk(KERN_ERR PFX "No usable DMA configuration, "
12357                                "aborting.\n");
12358                         goto err_out_iounmap;
12359                 }
12360         }
12361
12362         tg3_init_bufmgr_config(tp);
12363
12364         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12365                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12366         }
12367         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12369             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12371             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12372                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12373         } else {
12374                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12375         }
12376
12377         /* TSO is on by default on chips that support hardware TSO.
12378          * Firmware TSO on older chips gives lower performance, so it
12379          * is off by default, but can be enabled using ethtool.
12380          */
12381         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12382                 dev->features |= NETIF_F_TSO;
12383                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12384                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12385                         dev->features |= NETIF_F_TSO6;
12386                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12387                         dev->features |= NETIF_F_TSO_ECN;
12388         }
12389
12390
12391         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12392             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12393             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12394                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12395                 tp->rx_pending = 63;
12396         }
12397
12398         err = tg3_get_device_address(tp);
12399         if (err) {
12400                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12401                        "aborting.\n");
12402                 goto err_out_iounmap;
12403         }
12404
12405         /*
12406          * Reset chip in case UNDI or EFI driver did not shutdown
12407          * DMA self test will enable WDMAC and we'll see (spurious)
12408          * pending DMA on the PCI bus at that point.
12409          */
12410         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12411             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12412                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12413                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12414         }
12415
12416         err = tg3_test_dma(tp);
12417         if (err) {
12418                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12419                 goto err_out_iounmap;
12420         }
12421
12422         /* Tigon3 can do ipv4 only... and some chips have buggy
12423          * checksumming.
12424          */
12425         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12426                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12427                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12428                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12429                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12430                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12431                         dev->features |= NETIF_F_IPV6_CSUM;
12432
12433                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12434         } else
12435                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12436
12437         /* flow control autonegotiation is default behavior */
12438         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12439
12440         tg3_init_coal(tp);
12441
12442         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12443                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12444                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12445                                "base address for APE, aborting.\n");
12446                         err = -ENODEV;
12447                         goto err_out_iounmap;
12448                 }
12449
12450                 tg3reg_base = pci_resource_start(pdev, 2);
12451                 tg3reg_len = pci_resource_len(pdev, 2);
12452
12453                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12454                 if (tp->aperegs == 0UL) {
12455                         printk(KERN_ERR PFX "Cannot map APE registers, "
12456                                "aborting.\n");
12457                         err = -ENOMEM;
12458                         goto err_out_iounmap;
12459                 }
12460
12461                 tg3_ape_lock_init(tp);
12462         }
12463
12464         pci_set_drvdata(pdev, dev);
12465
12466         err = register_netdev(dev);
12467         if (err) {
12468                 printk(KERN_ERR PFX "Cannot register net device, "
12469                        "aborting.\n");
12470                 goto err_out_apeunmap;
12471         }
12472
12473         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12474                dev->name,
12475                tp->board_part_number,
12476                tp->pci_chip_rev_id,
12477                tg3_phy_string(tp),
12478                tg3_bus_string(tp, str),
12479                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12480                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12481                  "10/100/1000Base-T")));
12482
12483         for (i = 0; i < 6; i++)
12484                 printk("%2.2x%c", dev->dev_addr[i],
12485                        i == 5 ? '\n' : ':');
12486
12487         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12488                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12489                dev->name,
12490                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12491                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12492                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12493                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12494                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12495                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12496         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12497                dev->name, tp->dma_rwctrl,
12498                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12499                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12500
12501         return 0;
12502
12503 err_out_apeunmap:
12504         if (tp->aperegs) {
12505                 iounmap(tp->aperegs);
12506                 tp->aperegs = NULL;
12507         }
12508
12509 err_out_iounmap:
12510         if (tp->regs) {
12511                 iounmap(tp->regs);
12512                 tp->regs = NULL;
12513         }
12514
12515 err_out_free_dev:
12516         free_netdev(dev);
12517
12518 err_out_free_res:
12519         pci_release_regions(pdev);
12520
12521 err_out_disable_pdev:
12522         pci_disable_device(pdev);
12523         pci_set_drvdata(pdev, NULL);
12524         return err;
12525 }
12526
12527 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12528 {
12529         struct net_device *dev = pci_get_drvdata(pdev);
12530
12531         if (dev) {
12532                 struct tg3 *tp = netdev_priv(dev);
12533
12534                 flush_scheduled_work();
12535                 unregister_netdev(dev);
12536                 if (tp->aperegs) {
12537                         iounmap(tp->aperegs);
12538                         tp->aperegs = NULL;
12539                 }
12540                 if (tp->regs) {
12541                         iounmap(tp->regs);
12542                         tp->regs = NULL;
12543                 }
12544                 free_netdev(dev);
12545                 pci_release_regions(pdev);
12546                 pci_disable_device(pdev);
12547                 pci_set_drvdata(pdev, NULL);
12548         }
12549 }
12550
12551 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12552 {
12553         struct net_device *dev = pci_get_drvdata(pdev);
12554         struct tg3 *tp = netdev_priv(dev);
12555         int err;
12556
12557         /* PCI register 4 needs to be saved whether netif_running() or not.
12558          * MSI address and data need to be saved if using MSI and
12559          * netif_running().
12560          */
12561         pci_save_state(pdev);
12562
12563         if (!netif_running(dev))
12564                 return 0;
12565
12566         flush_scheduled_work();
12567         tg3_netif_stop(tp);
12568
12569         del_timer_sync(&tp->timer);
12570
12571         tg3_full_lock(tp, 1);
12572         tg3_disable_ints(tp);
12573         tg3_full_unlock(tp);
12574
12575         netif_device_detach(dev);
12576
12577         tg3_full_lock(tp, 0);
12578         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12579         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12580         tg3_full_unlock(tp);
12581
12582         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12583         if (err) {
12584                 tg3_full_lock(tp, 0);
12585
12586                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12587                 if (tg3_restart_hw(tp, 1))
12588                         goto out;
12589
12590                 tp->timer.expires = jiffies + tp->timer_offset;
12591                 add_timer(&tp->timer);
12592
12593                 netif_device_attach(dev);
12594                 tg3_netif_start(tp);
12595
12596 out:
12597                 tg3_full_unlock(tp);
12598         }
12599
12600         return err;
12601 }
12602
12603 static int tg3_resume(struct pci_dev *pdev)
12604 {
12605         struct net_device *dev = pci_get_drvdata(pdev);
12606         struct tg3 *tp = netdev_priv(dev);
12607         int err;
12608
12609         pci_restore_state(tp->pdev);
12610
12611         if (!netif_running(dev))
12612                 return 0;
12613
12614         err = tg3_set_power_state(tp, PCI_D0);
12615         if (err)
12616                 return err;
12617
12618         /* Hardware bug - MSI won't work if INTX disabled. */
12619         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12620             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12621                 pci_intx(tp->pdev, 1);
12622
12623         netif_device_attach(dev);
12624
12625         tg3_full_lock(tp, 0);
12626
12627         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12628         err = tg3_restart_hw(tp, 1);
12629         if (err)
12630                 goto out;
12631
12632         tp->timer.expires = jiffies + tp->timer_offset;
12633         add_timer(&tp->timer);
12634
12635         tg3_netif_start(tp);
12636
12637 out:
12638         tg3_full_unlock(tp);
12639
12640         return err;
12641 }
12642
12643 static struct pci_driver tg3_driver = {
12644         .name           = DRV_MODULE_NAME,
12645         .id_table       = tg3_pci_tbl,
12646         .probe          = tg3_init_one,
12647         .remove         = __devexit_p(tg3_remove_one),
12648         .suspend        = tg3_suspend,
12649         .resume         = tg3_resume
12650 };
12651
12652 static int __init tg3_init(void)
12653 {
12654         return pci_register_driver(&tg3_driver);
12655 }
12656
12657 static void __exit tg3_cleanup(void)
12658 {
12659         pci_unregister_driver(&tg3_driver);
12660 }
12661
12662 module_init(tg3_init);
12663 module_exit(tg3_cleanup);