]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
phylib: move to dynamic allocation of struct mii_bus
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.94"
70 #define DRV_MODULE_RELDATE      "August 14, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus->mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus->mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus->mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus->mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950
951         tg3_mdio_start(tp);
952
953         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
954             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
955                 return 0;
956
957         tp->mdio_bus = mdiobus_alloc();
958         if (tp->mdio_bus == NULL)
959                 return -ENOMEM;
960
961         tp->mdio_bus->name     = "tg3 mdio bus";
962         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
963                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
964         tp->mdio_bus->priv     = tp;
965         tp->mdio_bus->parent   = &tp->pdev->dev;
966         tp->mdio_bus->read     = &tg3_mdio_read;
967         tp->mdio_bus->write    = &tg3_mdio_write;
968         tp->mdio_bus->reset    = &tg3_mdio_reset;
969         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
970         tp->mdio_bus->irq      = &tp->mdio_irq[0];
971
972         for (i = 0; i < PHY_MAX_ADDR; i++)
973                 tp->mdio_bus->irq[i] = PHY_POLL;
974
975         /* The bus registration will look for all the PHYs on the mdio bus.
976          * Unfortunately, it does not ensure the PHY is powered up before
977          * accessing the PHY ID registers.  A chip reset is the
978          * quickest way to bring the device back to an operational state..
979          */
980         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
981                 tg3_bmcr_reset(tp);
982
983         i = mdiobus_register(tp->mdio_bus);
984         if (i) {
985                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
986                         tp->dev->name, i);
987                 return i;
988         }
989
990         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
991
992         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
993
994         switch (phydev->phy_id) {
995         case TG3_PHY_ID_BCM50610:
996                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
997                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
998                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
999                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1000                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1001                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1003                 break;
1004         case TG3_PHY_ID_BCMAC131:
1005                 phydev->interface = PHY_INTERFACE_MODE_MII;
1006                 break;
1007         }
1008
1009         tg3_mdio_config(tp);
1010
1011         return 0;
1012 }
1013
1014 static void tg3_mdio_fini(struct tg3 *tp)
1015 {
1016         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1017                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1018                 mdiobus_unregister(tp->mdio_bus);
1019                 mdiobus_free(tp->mdio_bus);
1020                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1021         }
1022 }
1023
1024 /* tp->lock is held. */
1025 static inline void tg3_generate_fw_event(struct tg3 *tp)
1026 {
1027         u32 val;
1028
1029         val = tr32(GRC_RX_CPU_EVENT);
1030         val |= GRC_RX_CPU_DRIVER_EVENT;
1031         tw32_f(GRC_RX_CPU_EVENT, val);
1032
1033         tp->last_event_jiffies = jiffies;
1034 }
1035
1036 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1037
1038 /* tp->lock is held. */
1039 static void tg3_wait_for_event_ack(struct tg3 *tp)
1040 {
1041         int i;
1042         unsigned int delay_cnt;
1043         long time_remain;
1044
1045         /* If enough time has passed, no wait is necessary. */
1046         time_remain = (long)(tp->last_event_jiffies + 1 +
1047                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1048                       (long)jiffies;
1049         if (time_remain < 0)
1050                 return;
1051
1052         /* Check if we can shorten the wait time. */
1053         delay_cnt = jiffies_to_usecs(time_remain);
1054         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1055                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1056         delay_cnt = (delay_cnt >> 3) + 1;
1057
1058         for (i = 0; i < delay_cnt; i++) {
1059                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1060                         break;
1061                 udelay(8);
1062         }
1063 }
1064
1065 /* tp->lock is held. */
1066 static void tg3_ump_link_report(struct tg3 *tp)
1067 {
1068         u32 reg;
1069         u32 val;
1070
1071         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1072             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1073                 return;
1074
1075         tg3_wait_for_event_ack(tp);
1076
1077         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1078
1079         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1080
1081         val = 0;
1082         if (!tg3_readphy(tp, MII_BMCR, &reg))
1083                 val = reg << 16;
1084         if (!tg3_readphy(tp, MII_BMSR, &reg))
1085                 val |= (reg & 0xffff);
1086         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1087
1088         val = 0;
1089         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1090                 val = reg << 16;
1091         if (!tg3_readphy(tp, MII_LPA, &reg))
1092                 val |= (reg & 0xffff);
1093         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1094
1095         val = 0;
1096         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1097                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1098                         val = reg << 16;
1099                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1100                         val |= (reg & 0xffff);
1101         }
1102         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1103
1104         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1105                 val = reg << 16;
1106         else
1107                 val = 0;
1108         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1109
1110         tg3_generate_fw_event(tp);
1111 }
1112
1113 static void tg3_link_report(struct tg3 *tp)
1114 {
1115         if (!netif_carrier_ok(tp->dev)) {
1116                 if (netif_msg_link(tp))
1117                         printk(KERN_INFO PFX "%s: Link is down.\n",
1118                                tp->dev->name);
1119                 tg3_ump_link_report(tp);
1120         } else if (netif_msg_link(tp)) {
1121                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1122                        tp->dev->name,
1123                        (tp->link_config.active_speed == SPEED_1000 ?
1124                         1000 :
1125                         (tp->link_config.active_speed == SPEED_100 ?
1126                          100 : 10)),
1127                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1128                         "full" : "half"));
1129
1130                 printk(KERN_INFO PFX
1131                        "%s: Flow control is %s for TX and %s for RX.\n",
1132                        tp->dev->name,
1133                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1134                        "on" : "off",
1135                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1136                        "on" : "off");
1137                 tg3_ump_link_report(tp);
1138         }
1139 }
1140
1141 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1142 {
1143         u16 miireg;
1144
1145         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1146                 miireg = ADVERTISE_PAUSE_CAP;
1147         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1148                 miireg = ADVERTISE_PAUSE_ASYM;
1149         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1150                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1151         else
1152                 miireg = 0;
1153
1154         return miireg;
1155 }
1156
1157 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1158 {
1159         u16 miireg;
1160
1161         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1162                 miireg = ADVERTISE_1000XPAUSE;
1163         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1164                 miireg = ADVERTISE_1000XPSE_ASYM;
1165         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1166                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1167         else
1168                 miireg = 0;
1169
1170         return miireg;
1171 }
1172
1173 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1174 {
1175         u8 cap = 0;
1176
1177         if (lcladv & ADVERTISE_PAUSE_CAP) {
1178                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1179                         if (rmtadv & LPA_PAUSE_CAP)
1180                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1181                         else if (rmtadv & LPA_PAUSE_ASYM)
1182                                 cap = TG3_FLOW_CTRL_RX;
1183                 } else {
1184                         if (rmtadv & LPA_PAUSE_CAP)
1185                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186                 }
1187         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1188                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1189                         cap = TG3_FLOW_CTRL_TX;
1190         }
1191
1192         return cap;
1193 }
1194
1195 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1196 {
1197         u8 cap = 0;
1198
1199         if (lcladv & ADVERTISE_1000XPAUSE) {
1200                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1201                         if (rmtadv & LPA_1000XPAUSE)
1202                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1203                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1204                                 cap = TG3_FLOW_CTRL_RX;
1205                 } else {
1206                         if (rmtadv & LPA_1000XPAUSE)
1207                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208                 }
1209         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1210                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1211                         cap = TG3_FLOW_CTRL_TX;
1212         }
1213
1214         return cap;
1215 }
1216
1217 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1218 {
1219         u8 autoneg;
1220         u8 flowctrl = 0;
1221         u32 old_rx_mode = tp->rx_mode;
1222         u32 old_tx_mode = tp->tx_mode;
1223
1224         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1225                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1226         else
1227                 autoneg = tp->link_config.autoneg;
1228
1229         if (autoneg == AUTONEG_ENABLE &&
1230             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1231                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1232                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1233                 else
1234                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1235         } else
1236                 flowctrl = tp->link_config.flowctrl;
1237
1238         tp->link_config.active_flowctrl = flowctrl;
1239
1240         if (flowctrl & TG3_FLOW_CTRL_RX)
1241                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1242         else
1243                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1244
1245         if (old_rx_mode != tp->rx_mode)
1246                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1247
1248         if (flowctrl & TG3_FLOW_CTRL_TX)
1249                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1250         else
1251                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1252
1253         if (old_tx_mode != tp->tx_mode)
1254                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1255 }
1256
1257 static void tg3_adjust_link(struct net_device *dev)
1258 {
1259         u8 oldflowctrl, linkmesg = 0;
1260         u32 mac_mode, lcl_adv, rmt_adv;
1261         struct tg3 *tp = netdev_priv(dev);
1262         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1263
1264         spin_lock(&tp->lock);
1265
1266         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1267                                     MAC_MODE_HALF_DUPLEX);
1268
1269         oldflowctrl = tp->link_config.active_flowctrl;
1270
1271         if (phydev->link) {
1272                 lcl_adv = 0;
1273                 rmt_adv = 0;
1274
1275                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1276                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1277                 else
1278                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1279
1280                 if (phydev->duplex == DUPLEX_HALF)
1281                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1282                 else {
1283                         lcl_adv = tg3_advert_flowctrl_1000T(
1284                                   tp->link_config.flowctrl);
1285
1286                         if (phydev->pause)
1287                                 rmt_adv = LPA_PAUSE_CAP;
1288                         if (phydev->asym_pause)
1289                                 rmt_adv |= LPA_PAUSE_ASYM;
1290                 }
1291
1292                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1293         } else
1294                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1295
1296         if (mac_mode != tp->mac_mode) {
1297                 tp->mac_mode = mac_mode;
1298                 tw32_f(MAC_MODE, tp->mac_mode);
1299                 udelay(40);
1300         }
1301
1302         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1303                 tw32(MAC_TX_LENGTHS,
1304                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1305                       (6 << TX_LENGTHS_IPG_SHIFT) |
1306                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1307         else
1308                 tw32(MAC_TX_LENGTHS,
1309                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310                       (6 << TX_LENGTHS_IPG_SHIFT) |
1311                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312
1313         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1314             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1315             phydev->speed != tp->link_config.active_speed ||
1316             phydev->duplex != tp->link_config.active_duplex ||
1317             oldflowctrl != tp->link_config.active_flowctrl)
1318             linkmesg = 1;
1319
1320         tp->link_config.active_speed = phydev->speed;
1321         tp->link_config.active_duplex = phydev->duplex;
1322
1323         spin_unlock(&tp->lock);
1324
1325         if (linkmesg)
1326                 tg3_link_report(tp);
1327 }
1328
1329 static int tg3_phy_init(struct tg3 *tp)
1330 {
1331         struct phy_device *phydev;
1332
1333         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1334                 return 0;
1335
1336         /* Bring the PHY back to a known state. */
1337         tg3_bmcr_reset(tp);
1338
1339         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1340
1341         /* Attach the MAC to the PHY. */
1342         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1343                              phydev->dev_flags, phydev->interface);
1344         if (IS_ERR(phydev)) {
1345                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1346                 return PTR_ERR(phydev);
1347         }
1348
1349         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1350
1351         /* Mask with MAC supported features. */
1352         phydev->supported &= (PHY_GBIT_FEATURES |
1353                               SUPPORTED_Pause |
1354                               SUPPORTED_Asym_Pause);
1355
1356         phydev->advertising = phydev->supported;
1357
1358         printk(KERN_INFO
1359                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1360                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1361
1362         return 0;
1363 }
1364
1365 static void tg3_phy_start(struct tg3 *tp)
1366 {
1367         struct phy_device *phydev;
1368
1369         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1370                 return;
1371
1372         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1373
1374         if (tp->link_config.phy_is_low_power) {
1375                 tp->link_config.phy_is_low_power = 0;
1376                 phydev->speed = tp->link_config.orig_speed;
1377                 phydev->duplex = tp->link_config.orig_duplex;
1378                 phydev->autoneg = tp->link_config.orig_autoneg;
1379                 phydev->advertising = tp->link_config.orig_advertising;
1380         }
1381
1382         phy_start(phydev);
1383
1384         phy_start_aneg(phydev);
1385 }
1386
1387 static void tg3_phy_stop(struct tg3 *tp)
1388 {
1389         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1390                 return;
1391
1392         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1393 }
1394
1395 static void tg3_phy_fini(struct tg3 *tp)
1396 {
1397         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1398                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1399                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1400         }
1401 }
1402
1403 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1404 {
1405         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1406         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1407 }
1408
1409 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1410 {
1411         u32 phy;
1412
1413         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1414             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1415                 return;
1416
1417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1418                 u32 ephy;
1419
1420                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1421                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1422                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1423                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1424                                 if (enable)
1425                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1426                                 else
1427                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1428                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1429                         }
1430                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1431                 }
1432         } else {
1433                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1434                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1435                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1436                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1437                         if (enable)
1438                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1439                         else
1440                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1441                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1442                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1443                 }
1444         }
1445 }
1446
1447 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1448 {
1449         u32 val;
1450
1451         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1452                 return;
1453
1454         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1455             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1456                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1457                              (val | (1 << 15) | (1 << 4)));
1458 }
1459
1460 static void tg3_phy_apply_otp(struct tg3 *tp)
1461 {
1462         u32 otp, phy;
1463
1464         if (!tp->phy_otp)
1465                 return;
1466
1467         otp = tp->phy_otp;
1468
1469         /* Enable SM_DSP clock and tx 6dB coding. */
1470         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1471               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1472               MII_TG3_AUXCTL_ACTL_TX_6DB;
1473         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1474
1475         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1476         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1477         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1478
1479         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1480               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1481         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1482
1483         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1484         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1485         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1486
1487         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1488         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1489
1490         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1491         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1492
1493         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1494               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1495         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1496
1497         /* Turn off SM_DSP clock. */
1498         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1499               MII_TG3_AUXCTL_ACTL_TX_6DB;
1500         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1501 }
1502
1503 static int tg3_wait_macro_done(struct tg3 *tp)
1504 {
1505         int limit = 100;
1506
1507         while (limit--) {
1508                 u32 tmp32;
1509
1510                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1511                         if ((tmp32 & 0x1000) == 0)
1512                                 break;
1513                 }
1514         }
1515         if (limit <= 0)
1516                 return -EBUSY;
1517
1518         return 0;
1519 }
1520
1521 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1522 {
1523         static const u32 test_pat[4][6] = {
1524         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1525         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1526         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1527         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1528         };
1529         int chan;
1530
1531         for (chan = 0; chan < 4; chan++) {
1532                 int i;
1533
1534                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1535                              (chan * 0x2000) | 0x0200);
1536                 tg3_writephy(tp, 0x16, 0x0002);
1537
1538                 for (i = 0; i < 6; i++)
1539                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1540                                      test_pat[chan][i]);
1541
1542                 tg3_writephy(tp, 0x16, 0x0202);
1543                 if (tg3_wait_macro_done(tp)) {
1544                         *resetp = 1;
1545                         return -EBUSY;
1546                 }
1547
1548                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1549                              (chan * 0x2000) | 0x0200);
1550                 tg3_writephy(tp, 0x16, 0x0082);
1551                 if (tg3_wait_macro_done(tp)) {
1552                         *resetp = 1;
1553                         return -EBUSY;
1554                 }
1555
1556                 tg3_writephy(tp, 0x16, 0x0802);
1557                 if (tg3_wait_macro_done(tp)) {
1558                         *resetp = 1;
1559                         return -EBUSY;
1560                 }
1561
1562                 for (i = 0; i < 6; i += 2) {
1563                         u32 low, high;
1564
1565                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1566                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1567                             tg3_wait_macro_done(tp)) {
1568                                 *resetp = 1;
1569                                 return -EBUSY;
1570                         }
1571                         low &= 0x7fff;
1572                         high &= 0x000f;
1573                         if (low != test_pat[chan][i] ||
1574                             high != test_pat[chan][i+1]) {
1575                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1576                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1577                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1578
1579                                 return -EBUSY;
1580                         }
1581                 }
1582         }
1583
1584         return 0;
1585 }
1586
1587 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1588 {
1589         int chan;
1590
1591         for (chan = 0; chan < 4; chan++) {
1592                 int i;
1593
1594                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1595                              (chan * 0x2000) | 0x0200);
1596                 tg3_writephy(tp, 0x16, 0x0002);
1597                 for (i = 0; i < 6; i++)
1598                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1599                 tg3_writephy(tp, 0x16, 0x0202);
1600                 if (tg3_wait_macro_done(tp))
1601                         return -EBUSY;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1608 {
1609         u32 reg32, phy9_orig;
1610         int retries, do_phy_reset, err;
1611
1612         retries = 10;
1613         do_phy_reset = 1;
1614         do {
1615                 if (do_phy_reset) {
1616                         err = tg3_bmcr_reset(tp);
1617                         if (err)
1618                                 return err;
1619                         do_phy_reset = 0;
1620                 }
1621
1622                 /* Disable transmitter and interrupt.  */
1623                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1624                         continue;
1625
1626                 reg32 |= 0x3000;
1627                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1628
1629                 /* Set full-duplex, 1000 mbps.  */
1630                 tg3_writephy(tp, MII_BMCR,
1631                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1632
1633                 /* Set to master mode.  */
1634                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1635                         continue;
1636
1637                 tg3_writephy(tp, MII_TG3_CTRL,
1638                              (MII_TG3_CTRL_AS_MASTER |
1639                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1640
1641                 /* Enable SM_DSP_CLOCK and 6dB.  */
1642                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1643
1644                 /* Block the PHY control access.  */
1645                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1646                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1647
1648                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1649                 if (!err)
1650                         break;
1651         } while (--retries);
1652
1653         err = tg3_phy_reset_chanpat(tp);
1654         if (err)
1655                 return err;
1656
1657         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1658         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1659
1660         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1661         tg3_writephy(tp, 0x16, 0x0000);
1662
1663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1665                 /* Set Extended packet length bit for jumbo frames */
1666                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1667         }
1668         else {
1669                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1670         }
1671
1672         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1673
1674         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1675                 reg32 &= ~0x3000;
1676                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1677         } else if (!err)
1678                 err = -EBUSY;
1679
1680         return err;
1681 }
1682
1683 /* This will reset the tigon3 PHY if there is no valid
1684  * link unless the FORCE argument is non-zero.
1685  */
1686 static int tg3_phy_reset(struct tg3 *tp)
1687 {
1688         u32 cpmuctrl;
1689         u32 phy_status;
1690         int err;
1691
1692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                 u32 val;
1694
1695                 val = tr32(GRC_MISC_CFG);
1696                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1697                 udelay(40);
1698         }
1699         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1700         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1701         if (err != 0)
1702                 return -EBUSY;
1703
1704         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1705                 netif_carrier_off(tp->dev);
1706                 tg3_link_report(tp);
1707         }
1708
1709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1712                 err = tg3_phy_reset_5703_4_5(tp);
1713                 if (err)
1714                         return err;
1715                 goto out;
1716         }
1717
1718         cpmuctrl = 0;
1719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1720             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1721                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1722                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1723                         tw32(TG3_CPMU_CTRL,
1724                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1725         }
1726
1727         err = tg3_bmcr_reset(tp);
1728         if (err)
1729                 return err;
1730
1731         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1732                 u32 phy;
1733
1734                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1735                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1736
1737                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1738         }
1739
1740         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1741                 u32 val;
1742
1743                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1744                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1745                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1746                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1747                         udelay(40);
1748                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1749                 }
1750
1751                 /* Disable GPHY autopowerdown. */
1752                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1753                              MII_TG3_MISC_SHDW_WREN |
1754                              MII_TG3_MISC_SHDW_APD_SEL |
1755                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1756         }
1757
1758         tg3_phy_apply_otp(tp);
1759
1760 out:
1761         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1762                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1763                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1764                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1765                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1766                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1767                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1768         }
1769         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1770                 tg3_writephy(tp, 0x1c, 0x8d68);
1771                 tg3_writephy(tp, 0x1c, 0x8d68);
1772         }
1773         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1774                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1775                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1776                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1779                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1780                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1781                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1782         }
1783         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1784                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1786                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1787                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1788                         tg3_writephy(tp, MII_TG3_TEST1,
1789                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1790                 } else
1791                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1793         }
1794         /* Set Extended packet length bit (bit 14) on all chips that */
1795         /* support jumbo frames */
1796         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1797                 /* Cannot do read-modify-write on 5401 */
1798                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1799         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1800                 u32 phy_reg;
1801
1802                 /* Set bit 14 with read-modify-write to preserve other bits */
1803                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1804                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1805                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1806         }
1807
1808         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1809          * jumbo frames transmission.
1810          */
1811         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1812                 u32 phy_reg;
1813
1814                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1815                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1816                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1817         }
1818
1819         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1820                 /* adjust output voltage */
1821                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1822         }
1823
1824         tg3_phy_toggle_automdix(tp, 1);
1825         tg3_phy_set_wirespeed(tp);
1826         return 0;
1827 }
1828
1829 static void tg3_frob_aux_power(struct tg3 *tp)
1830 {
1831         struct tg3 *tp_peer = tp;
1832
1833         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1834                 return;
1835
1836         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1837             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1838                 struct net_device *dev_peer;
1839
1840                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1841                 /* remove_one() may have been run on the peer. */
1842                 if (!dev_peer)
1843                         tp_peer = tp;
1844                 else
1845                         tp_peer = netdev_priv(dev_peer);
1846         }
1847
1848         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1849             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1850             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1851             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1853                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1854                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1855                                     (GRC_LCLCTRL_GPIO_OE0 |
1856                                      GRC_LCLCTRL_GPIO_OE1 |
1857                                      GRC_LCLCTRL_GPIO_OE2 |
1858                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1859                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1860                                     100);
1861                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1862                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1863                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1864                                              GRC_LCLCTRL_GPIO_OE1 |
1865                                              GRC_LCLCTRL_GPIO_OE2 |
1866                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1867                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1868                                              tp->grc_local_ctrl;
1869                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1870
1871                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1872                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1873
1874                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1875                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1876                 } else {
1877                         u32 no_gpio2;
1878                         u32 grc_local_ctrl = 0;
1879
1880                         if (tp_peer != tp &&
1881                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1882                                 return;
1883
1884                         /* Workaround to prevent overdrawing Amps. */
1885                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1886                             ASIC_REV_5714) {
1887                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1888                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1889                                             grc_local_ctrl, 100);
1890                         }
1891
1892                         /* On 5753 and variants, GPIO2 cannot be used. */
1893                         no_gpio2 = tp->nic_sram_data_cfg &
1894                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1895
1896                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1897                                          GRC_LCLCTRL_GPIO_OE1 |
1898                                          GRC_LCLCTRL_GPIO_OE2 |
1899                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1900                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1901                         if (no_gpio2) {
1902                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1903                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1904                         }
1905                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1906                                                     grc_local_ctrl, 100);
1907
1908                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1909
1910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1911                                                     grc_local_ctrl, 100);
1912
1913                         if (!no_gpio2) {
1914                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1915                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1916                                             grc_local_ctrl, 100);
1917                         }
1918                 }
1919         } else {
1920                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1921                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1922                         if (tp_peer != tp &&
1923                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1924                                 return;
1925
1926                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1927                                     (GRC_LCLCTRL_GPIO_OE1 |
1928                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1929
1930                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1931                                     GRC_LCLCTRL_GPIO_OE1, 100);
1932
1933                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1934                                     (GRC_LCLCTRL_GPIO_OE1 |
1935                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1936                 }
1937         }
1938 }
1939
1940 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1941 {
1942         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1943                 return 1;
1944         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1945                 if (speed != SPEED_10)
1946                         return 1;
1947         } else if (speed == SPEED_10)
1948                 return 1;
1949
1950         return 0;
1951 }
1952
1953 static int tg3_setup_phy(struct tg3 *, int);
1954
1955 #define RESET_KIND_SHUTDOWN     0
1956 #define RESET_KIND_INIT         1
1957 #define RESET_KIND_SUSPEND      2
1958
1959 static void tg3_write_sig_post_reset(struct tg3 *, int);
1960 static int tg3_halt_cpu(struct tg3 *, u32);
1961 static int tg3_nvram_lock(struct tg3 *);
1962 static void tg3_nvram_unlock(struct tg3 *);
1963
1964 static void tg3_power_down_phy(struct tg3 *tp)
1965 {
1966         u32 val;
1967
1968         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1969                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1970                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1971                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1972
1973                         sg_dig_ctrl |=
1974                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1975                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1976                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1977                 }
1978                 return;
1979         }
1980
1981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1982                 tg3_bmcr_reset(tp);
1983                 val = tr32(GRC_MISC_CFG);
1984                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1985                 udelay(40);
1986                 return;
1987         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1988                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1989                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1991         }
1992
1993         /* The PHY should not be powered down on some chips because
1994          * of bugs.
1995          */
1996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1998             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1999              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2000                 return;
2001
2002         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2003                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2004                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2005                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2006                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2007         }
2008
2009         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2010 }
2011
2012 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2013 {
2014         u32 misc_host_ctrl;
2015
2016         /* Make sure register accesses (indirect or otherwise)
2017          * will function correctly.
2018          */
2019         pci_write_config_dword(tp->pdev,
2020                                TG3PCI_MISC_HOST_CTRL,
2021                                tp->misc_host_ctrl);
2022
2023         switch (state) {
2024         case PCI_D0:
2025                 pci_enable_wake(tp->pdev, state, false);
2026                 pci_set_power_state(tp->pdev, PCI_D0);
2027
2028                 /* Switch out of Vaux if it is a NIC */
2029                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2030                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2031
2032                 return 0;
2033
2034         case PCI_D1:
2035         case PCI_D2:
2036         case PCI_D3hot:
2037                 break;
2038
2039         default:
2040                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2041                         tp->dev->name, state);
2042                 return -EINVAL;
2043         }
2044         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2045         tw32(TG3PCI_MISC_HOST_CTRL,
2046              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2047
2048         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2049                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2050                     !tp->link_config.phy_is_low_power) {
2051                         struct phy_device *phydev;
2052                         u32 advertising;
2053
2054                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2055
2056                         tp->link_config.phy_is_low_power = 1;
2057
2058                         tp->link_config.orig_speed = phydev->speed;
2059                         tp->link_config.orig_duplex = phydev->duplex;
2060                         tp->link_config.orig_autoneg = phydev->autoneg;
2061                         tp->link_config.orig_advertising = phydev->advertising;
2062
2063                         advertising = ADVERTISED_TP |
2064                                       ADVERTISED_Pause |
2065                                       ADVERTISED_Autoneg |
2066                                       ADVERTISED_10baseT_Half;
2067
2068                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2069                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2070                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2071                                         advertising |=
2072                                                 ADVERTISED_100baseT_Half |
2073                                                 ADVERTISED_100baseT_Full |
2074                                                 ADVERTISED_10baseT_Full;
2075                                 else
2076                                         advertising |= ADVERTISED_10baseT_Full;
2077                         }
2078
2079                         phydev->advertising = advertising;
2080
2081                         phy_start_aneg(phydev);
2082                 }
2083         } else {
2084                 if (tp->link_config.phy_is_low_power == 0) {
2085                         tp->link_config.phy_is_low_power = 1;
2086                         tp->link_config.orig_speed = tp->link_config.speed;
2087                         tp->link_config.orig_duplex = tp->link_config.duplex;
2088                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2089                 }
2090
2091                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2092                         tp->link_config.speed = SPEED_10;
2093                         tp->link_config.duplex = DUPLEX_HALF;
2094                         tp->link_config.autoneg = AUTONEG_ENABLE;
2095                         tg3_setup_phy(tp, 0);
2096                 }
2097         }
2098
2099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2100                 u32 val;
2101
2102                 val = tr32(GRC_VCPU_EXT_CTRL);
2103                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2104         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2105                 int i;
2106                 u32 val;
2107
2108                 for (i = 0; i < 200; i++) {
2109                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2110                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2111                                 break;
2112                         msleep(1);
2113                 }
2114         }
2115         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2116                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2117                                                      WOL_DRV_STATE_SHUTDOWN |
2118                                                      WOL_DRV_WOL |
2119                                                      WOL_SET_MAGIC_PKT);
2120
2121         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2122                 u32 mac_mode;
2123
2124                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2125                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2126                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2127                                 udelay(40);
2128                         }
2129
2130                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2131                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2132                         else
2133                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2134
2135                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2136                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2137                             ASIC_REV_5700) {
2138                                 u32 speed = (tp->tg3_flags &
2139                                              TG3_FLAG_WOL_SPEED_100MB) ?
2140                                              SPEED_100 : SPEED_10;
2141                                 if (tg3_5700_link_polarity(tp, speed))
2142                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2143                                 else
2144                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2145                         }
2146                 } else {
2147                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2148                 }
2149
2150                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2151                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2152
2153                 if (pci_pme_capable(tp->pdev, state) &&
2154                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2155                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2156
2157                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2158                         mac_mode |= tp->mac_mode &
2159                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2160                         if (mac_mode & MAC_MODE_APE_TX_EN)
2161                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2162                 }
2163
2164                 tw32_f(MAC_MODE, mac_mode);
2165                 udelay(100);
2166
2167                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2168                 udelay(10);
2169         }
2170
2171         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2172             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2173              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2174                 u32 base_val;
2175
2176                 base_val = tp->pci_clock_ctrl;
2177                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2178                              CLOCK_CTRL_TXCLK_DISABLE);
2179
2180                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2181                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2182         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2183                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2184                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2185                 /* do nothing */
2186         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2187                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2188                 u32 newbits1, newbits2;
2189
2190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2191                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2192                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2193                                     CLOCK_CTRL_TXCLK_DISABLE |
2194                                     CLOCK_CTRL_ALTCLK);
2195                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2196                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2197                         newbits1 = CLOCK_CTRL_625_CORE;
2198                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2199                 } else {
2200                         newbits1 = CLOCK_CTRL_ALTCLK;
2201                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2202                 }
2203
2204                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2205                             40);
2206
2207                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2208                             40);
2209
2210                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2211                         u32 newbits3;
2212
2213                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2214                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2215                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2216                                             CLOCK_CTRL_TXCLK_DISABLE |
2217                                             CLOCK_CTRL_44MHZ_CORE);
2218                         } else {
2219                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2220                         }
2221
2222                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2223                                     tp->pci_clock_ctrl | newbits3, 40);
2224                 }
2225         }
2226
2227         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2228             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2229             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2230                 tg3_power_down_phy(tp);
2231
2232         tg3_frob_aux_power(tp);
2233
2234         /* Workaround for unstable PLL clock */
2235         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2236             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2237                 u32 val = tr32(0x7d00);
2238
2239                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2240                 tw32(0x7d00, val);
2241                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2242                         int err;
2243
2244                         err = tg3_nvram_lock(tp);
2245                         tg3_halt_cpu(tp, RX_CPU_BASE);
2246                         if (!err)
2247                                 tg3_nvram_unlock(tp);
2248                 }
2249         }
2250
2251         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2252
2253         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2254                 pci_enable_wake(tp->pdev, state, true);
2255
2256         /* Finally, set the new power state. */
2257         pci_set_power_state(tp->pdev, state);
2258
2259         return 0;
2260 }
2261
2262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2263 {
2264         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2265         case MII_TG3_AUX_STAT_10HALF:
2266                 *speed = SPEED_10;
2267                 *duplex = DUPLEX_HALF;
2268                 break;
2269
2270         case MII_TG3_AUX_STAT_10FULL:
2271                 *speed = SPEED_10;
2272                 *duplex = DUPLEX_FULL;
2273                 break;
2274
2275         case MII_TG3_AUX_STAT_100HALF:
2276                 *speed = SPEED_100;
2277                 *duplex = DUPLEX_HALF;
2278                 break;
2279
2280         case MII_TG3_AUX_STAT_100FULL:
2281                 *speed = SPEED_100;
2282                 *duplex = DUPLEX_FULL;
2283                 break;
2284
2285         case MII_TG3_AUX_STAT_1000HALF:
2286                 *speed = SPEED_1000;
2287                 *duplex = DUPLEX_HALF;
2288                 break;
2289
2290         case MII_TG3_AUX_STAT_1000FULL:
2291                 *speed = SPEED_1000;
2292                 *duplex = DUPLEX_FULL;
2293                 break;
2294
2295         default:
2296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2297                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2298                                  SPEED_10;
2299                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2300                                   DUPLEX_HALF;
2301                         break;
2302                 }
2303                 *speed = SPEED_INVALID;
2304                 *duplex = DUPLEX_INVALID;
2305                 break;
2306         }
2307 }
2308
2309 static void tg3_phy_copper_begin(struct tg3 *tp)
2310 {
2311         u32 new_adv;
2312         int i;
2313
2314         if (tp->link_config.phy_is_low_power) {
2315                 /* Entering low power mode.  Disable gigabit and
2316                  * 100baseT advertisements.
2317                  */
2318                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2319
2320                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2321                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2322                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2323                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2324
2325                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2326         } else if (tp->link_config.speed == SPEED_INVALID) {
2327                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2328                         tp->link_config.advertising &=
2329                                 ~(ADVERTISED_1000baseT_Half |
2330                                   ADVERTISED_1000baseT_Full);
2331
2332                 new_adv = ADVERTISE_CSMA;
2333                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2334                         new_adv |= ADVERTISE_10HALF;
2335                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2336                         new_adv |= ADVERTISE_10FULL;
2337                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2338                         new_adv |= ADVERTISE_100HALF;
2339                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2340                         new_adv |= ADVERTISE_100FULL;
2341
2342                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2343
2344                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2345
2346                 if (tp->link_config.advertising &
2347                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2348                         new_adv = 0;
2349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2350                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2351                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2352                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2353                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2354                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2355                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2356                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2357                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2358                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2359                 } else {
2360                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2361                 }
2362         } else {
2363                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2364                 new_adv |= ADVERTISE_CSMA;
2365
2366                 /* Asking for a specific link mode. */
2367                 if (tp->link_config.speed == SPEED_1000) {
2368                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2369
2370                         if (tp->link_config.duplex == DUPLEX_FULL)
2371                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2372                         else
2373                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2374                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2375                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2376                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2377                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2378                 } else {
2379                         if (tp->link_config.speed == SPEED_100) {
2380                                 if (tp->link_config.duplex == DUPLEX_FULL)
2381                                         new_adv |= ADVERTISE_100FULL;
2382                                 else
2383                                         new_adv |= ADVERTISE_100HALF;
2384                         } else {
2385                                 if (tp->link_config.duplex == DUPLEX_FULL)
2386                                         new_adv |= ADVERTISE_10FULL;
2387                                 else
2388                                         new_adv |= ADVERTISE_10HALF;
2389                         }
2390                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2391
2392                         new_adv = 0;
2393                 }
2394
2395                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2396         }
2397
2398         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2399             tp->link_config.speed != SPEED_INVALID) {
2400                 u32 bmcr, orig_bmcr;
2401
2402                 tp->link_config.active_speed = tp->link_config.speed;
2403                 tp->link_config.active_duplex = tp->link_config.duplex;
2404
2405                 bmcr = 0;
2406                 switch (tp->link_config.speed) {
2407                 default:
2408                 case SPEED_10:
2409                         break;
2410
2411                 case SPEED_100:
2412                         bmcr |= BMCR_SPEED100;
2413                         break;
2414
2415                 case SPEED_1000:
2416                         bmcr |= TG3_BMCR_SPEED1000;
2417                         break;
2418                 }
2419
2420                 if (tp->link_config.duplex == DUPLEX_FULL)
2421                         bmcr |= BMCR_FULLDPLX;
2422
2423                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2424                     (bmcr != orig_bmcr)) {
2425                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2426                         for (i = 0; i < 1500; i++) {
2427                                 u32 tmp;
2428
2429                                 udelay(10);
2430                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2431                                     tg3_readphy(tp, MII_BMSR, &tmp))
2432                                         continue;
2433                                 if (!(tmp & BMSR_LSTATUS)) {
2434                                         udelay(40);
2435                                         break;
2436                                 }
2437                         }
2438                         tg3_writephy(tp, MII_BMCR, bmcr);
2439                         udelay(40);
2440                 }
2441         } else {
2442                 tg3_writephy(tp, MII_BMCR,
2443                              BMCR_ANENABLE | BMCR_ANRESTART);
2444         }
2445 }
2446
2447 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2448 {
2449         int err;
2450
2451         /* Turn off tap power management. */
2452         /* Set Extended packet length bit */
2453         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2454
2455         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2456         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2457
2458         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2459         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2460
2461         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2462         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2463
2464         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2465         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2466
2467         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2468         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2469
2470         udelay(40);
2471
2472         return err;
2473 }
2474
2475 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2476 {
2477         u32 adv_reg, all_mask = 0;
2478
2479         if (mask & ADVERTISED_10baseT_Half)
2480                 all_mask |= ADVERTISE_10HALF;
2481         if (mask & ADVERTISED_10baseT_Full)
2482                 all_mask |= ADVERTISE_10FULL;
2483         if (mask & ADVERTISED_100baseT_Half)
2484                 all_mask |= ADVERTISE_100HALF;
2485         if (mask & ADVERTISED_100baseT_Full)
2486                 all_mask |= ADVERTISE_100FULL;
2487
2488         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2489                 return 0;
2490
2491         if ((adv_reg & all_mask) != all_mask)
2492                 return 0;
2493         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2494                 u32 tg3_ctrl;
2495
2496                 all_mask = 0;
2497                 if (mask & ADVERTISED_1000baseT_Half)
2498                         all_mask |= ADVERTISE_1000HALF;
2499                 if (mask & ADVERTISED_1000baseT_Full)
2500                         all_mask |= ADVERTISE_1000FULL;
2501
2502                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2503                         return 0;
2504
2505                 if ((tg3_ctrl & all_mask) != all_mask)
2506                         return 0;
2507         }
2508         return 1;
2509 }
2510
2511 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2512 {
2513         u32 curadv, reqadv;
2514
2515         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2516                 return 1;
2517
2518         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2519         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2520
2521         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2522                 if (curadv != reqadv)
2523                         return 0;
2524
2525                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2526                         tg3_readphy(tp, MII_LPA, rmtadv);
2527         } else {
2528                 /* Reprogram the advertisement register, even if it
2529                  * does not affect the current link.  If the link
2530                  * gets renegotiated in the future, we can save an
2531                  * additional renegotiation cycle by advertising
2532                  * it correctly in the first place.
2533                  */
2534                 if (curadv != reqadv) {
2535                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2536                                      ADVERTISE_PAUSE_ASYM);
2537                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2538                 }
2539         }
2540
2541         return 1;
2542 }
2543
2544 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2545 {
2546         int current_link_up;
2547         u32 bmsr, dummy;
2548         u32 lcl_adv, rmt_adv;
2549         u16 current_speed;
2550         u8 current_duplex;
2551         int i, err;
2552
2553         tw32(MAC_EVENT, 0);
2554
2555         tw32_f(MAC_STATUS,
2556              (MAC_STATUS_SYNC_CHANGED |
2557               MAC_STATUS_CFG_CHANGED |
2558               MAC_STATUS_MI_COMPLETION |
2559               MAC_STATUS_LNKSTATE_CHANGED));
2560         udelay(40);
2561
2562         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2563                 tw32_f(MAC_MI_MODE,
2564                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2565                 udelay(80);
2566         }
2567
2568         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2569
2570         /* Some third-party PHYs need to be reset on link going
2571          * down.
2572          */
2573         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2574              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2575              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2576             netif_carrier_ok(tp->dev)) {
2577                 tg3_readphy(tp, MII_BMSR, &bmsr);
2578                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2579                     !(bmsr & BMSR_LSTATUS))
2580                         force_reset = 1;
2581         }
2582         if (force_reset)
2583                 tg3_phy_reset(tp);
2584
2585         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2586                 tg3_readphy(tp, MII_BMSR, &bmsr);
2587                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2588                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2589                         bmsr = 0;
2590
2591                 if (!(bmsr & BMSR_LSTATUS)) {
2592                         err = tg3_init_5401phy_dsp(tp);
2593                         if (err)
2594                                 return err;
2595
2596                         tg3_readphy(tp, MII_BMSR, &bmsr);
2597                         for (i = 0; i < 1000; i++) {
2598                                 udelay(10);
2599                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2600                                     (bmsr & BMSR_LSTATUS)) {
2601                                         udelay(40);
2602                                         break;
2603                                 }
2604                         }
2605
2606                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2607                             !(bmsr & BMSR_LSTATUS) &&
2608                             tp->link_config.active_speed == SPEED_1000) {
2609                                 err = tg3_phy_reset(tp);
2610                                 if (!err)
2611                                         err = tg3_init_5401phy_dsp(tp);
2612                                 if (err)
2613                                         return err;
2614                         }
2615                 }
2616         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2617                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2618                 /* 5701 {A0,B0} CRC bug workaround */
2619                 tg3_writephy(tp, 0x15, 0x0a75);
2620                 tg3_writephy(tp, 0x1c, 0x8c68);
2621                 tg3_writephy(tp, 0x1c, 0x8d68);
2622                 tg3_writephy(tp, 0x1c, 0x8c68);
2623         }
2624
2625         /* Clear pending interrupts... */
2626         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2627         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2628
2629         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2630                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2631         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2632                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2633
2634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2636                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2637                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2638                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2639                 else
2640                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2641         }
2642
2643         current_link_up = 0;
2644         current_speed = SPEED_INVALID;
2645         current_duplex = DUPLEX_INVALID;
2646
2647         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2648                 u32 val;
2649
2650                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2651                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2652                 if (!(val & (1 << 10))) {
2653                         val |= (1 << 10);
2654                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2655                         goto relink;
2656                 }
2657         }
2658
2659         bmsr = 0;
2660         for (i = 0; i < 100; i++) {
2661                 tg3_readphy(tp, MII_BMSR, &bmsr);
2662                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2663                     (bmsr & BMSR_LSTATUS))
2664                         break;
2665                 udelay(40);
2666         }
2667
2668         if (bmsr & BMSR_LSTATUS) {
2669                 u32 aux_stat, bmcr;
2670
2671                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2672                 for (i = 0; i < 2000; i++) {
2673                         udelay(10);
2674                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2675                             aux_stat)
2676                                 break;
2677                 }
2678
2679                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2680                                              &current_speed,
2681                                              &current_duplex);
2682
2683                 bmcr = 0;
2684                 for (i = 0; i < 200; i++) {
2685                         tg3_readphy(tp, MII_BMCR, &bmcr);
2686                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2687                                 continue;
2688                         if (bmcr && bmcr != 0x7fff)
2689                                 break;
2690                         udelay(10);
2691                 }
2692
2693                 lcl_adv = 0;
2694                 rmt_adv = 0;
2695
2696                 tp->link_config.active_speed = current_speed;
2697                 tp->link_config.active_duplex = current_duplex;
2698
2699                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2700                         if ((bmcr & BMCR_ANENABLE) &&
2701                             tg3_copper_is_advertising_all(tp,
2702                                                 tp->link_config.advertising)) {
2703                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2704                                                                   &rmt_adv))
2705                                         current_link_up = 1;
2706                         }
2707                 } else {
2708                         if (!(bmcr & BMCR_ANENABLE) &&
2709                             tp->link_config.speed == current_speed &&
2710                             tp->link_config.duplex == current_duplex &&
2711                             tp->link_config.flowctrl ==
2712                             tp->link_config.active_flowctrl) {
2713                                 current_link_up = 1;
2714                         }
2715                 }
2716
2717                 if (current_link_up == 1 &&
2718                     tp->link_config.active_duplex == DUPLEX_FULL)
2719                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2720         }
2721
2722 relink:
2723         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2724                 u32 tmp;
2725
2726                 tg3_phy_copper_begin(tp);
2727
2728                 tg3_readphy(tp, MII_BMSR, &tmp);
2729                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2730                     (tmp & BMSR_LSTATUS))
2731                         current_link_up = 1;
2732         }
2733
2734         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2735         if (current_link_up == 1) {
2736                 if (tp->link_config.active_speed == SPEED_100 ||
2737                     tp->link_config.active_speed == SPEED_10)
2738                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2739                 else
2740                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2741         } else
2742                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743
2744         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2745         if (tp->link_config.active_duplex == DUPLEX_HALF)
2746                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2747
2748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2749                 if (current_link_up == 1 &&
2750                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2751                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2752                 else
2753                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2754         }
2755
2756         /* ??? Without this setting Netgear GA302T PHY does not
2757          * ??? send/receive packets...
2758          */
2759         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2760             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2761                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2762                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2763                 udelay(80);
2764         }
2765
2766         tw32_f(MAC_MODE, tp->mac_mode);
2767         udelay(40);
2768
2769         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2770                 /* Polled via timer. */
2771                 tw32_f(MAC_EVENT, 0);
2772         } else {
2773                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2774         }
2775         udelay(40);
2776
2777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2778             current_link_up == 1 &&
2779             tp->link_config.active_speed == SPEED_1000 &&
2780             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2781              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2782                 udelay(120);
2783                 tw32_f(MAC_STATUS,
2784                      (MAC_STATUS_SYNC_CHANGED |
2785                       MAC_STATUS_CFG_CHANGED));
2786                 udelay(40);
2787                 tg3_write_mem(tp,
2788                               NIC_SRAM_FIRMWARE_MBOX,
2789                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2790         }
2791
2792         if (current_link_up != netif_carrier_ok(tp->dev)) {
2793                 if (current_link_up)
2794                         netif_carrier_on(tp->dev);
2795                 else
2796                         netif_carrier_off(tp->dev);
2797                 tg3_link_report(tp);
2798         }
2799
2800         return 0;
2801 }
2802
2803 struct tg3_fiber_aneginfo {
2804         int state;
2805 #define ANEG_STATE_UNKNOWN              0
2806 #define ANEG_STATE_AN_ENABLE            1
2807 #define ANEG_STATE_RESTART_INIT         2
2808 #define ANEG_STATE_RESTART              3
2809 #define ANEG_STATE_DISABLE_LINK_OK      4
2810 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2811 #define ANEG_STATE_ABILITY_DETECT       6
2812 #define ANEG_STATE_ACK_DETECT_INIT      7
2813 #define ANEG_STATE_ACK_DETECT           8
2814 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2815 #define ANEG_STATE_COMPLETE_ACK         10
2816 #define ANEG_STATE_IDLE_DETECT_INIT     11
2817 #define ANEG_STATE_IDLE_DETECT          12
2818 #define ANEG_STATE_LINK_OK              13
2819 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2820 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2821
2822         u32 flags;
2823 #define MR_AN_ENABLE            0x00000001
2824 #define MR_RESTART_AN           0x00000002
2825 #define MR_AN_COMPLETE          0x00000004
2826 #define MR_PAGE_RX              0x00000008
2827 #define MR_NP_LOADED            0x00000010
2828 #define MR_TOGGLE_TX            0x00000020
2829 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2830 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2831 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2832 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2833 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2834 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2835 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2836 #define MR_TOGGLE_RX            0x00002000
2837 #define MR_NP_RX                0x00004000
2838
2839 #define MR_LINK_OK              0x80000000
2840
2841         unsigned long link_time, cur_time;
2842
2843         u32 ability_match_cfg;
2844         int ability_match_count;
2845
2846         char ability_match, idle_match, ack_match;
2847
2848         u32 txconfig, rxconfig;
2849 #define ANEG_CFG_NP             0x00000080
2850 #define ANEG_CFG_ACK            0x00000040
2851 #define ANEG_CFG_RF2            0x00000020
2852 #define ANEG_CFG_RF1            0x00000010
2853 #define ANEG_CFG_PS2            0x00000001
2854 #define ANEG_CFG_PS1            0x00008000
2855 #define ANEG_CFG_HD             0x00004000
2856 #define ANEG_CFG_FD             0x00002000
2857 #define ANEG_CFG_INVAL          0x00001f06
2858
2859 };
2860 #define ANEG_OK         0
2861 #define ANEG_DONE       1
2862 #define ANEG_TIMER_ENAB 2
2863 #define ANEG_FAILED     -1
2864
2865 #define ANEG_STATE_SETTLE_TIME  10000
2866
2867 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2868                                    struct tg3_fiber_aneginfo *ap)
2869 {
2870         u16 flowctrl;
2871         unsigned long delta;
2872         u32 rx_cfg_reg;
2873         int ret;
2874
2875         if (ap->state == ANEG_STATE_UNKNOWN) {
2876                 ap->rxconfig = 0;
2877                 ap->link_time = 0;
2878                 ap->cur_time = 0;
2879                 ap->ability_match_cfg = 0;
2880                 ap->ability_match_count = 0;
2881                 ap->ability_match = 0;
2882                 ap->idle_match = 0;
2883                 ap->ack_match = 0;
2884         }
2885         ap->cur_time++;
2886
2887         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2888                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2889
2890                 if (rx_cfg_reg != ap->ability_match_cfg) {
2891                         ap->ability_match_cfg = rx_cfg_reg;
2892                         ap->ability_match = 0;
2893                         ap->ability_match_count = 0;
2894                 } else {
2895                         if (++ap->ability_match_count > 1) {
2896                                 ap->ability_match = 1;
2897                                 ap->ability_match_cfg = rx_cfg_reg;
2898                         }
2899                 }
2900                 if (rx_cfg_reg & ANEG_CFG_ACK)
2901                         ap->ack_match = 1;
2902                 else
2903                         ap->ack_match = 0;
2904
2905                 ap->idle_match = 0;
2906         } else {
2907                 ap->idle_match = 1;
2908                 ap->ability_match_cfg = 0;
2909                 ap->ability_match_count = 0;
2910                 ap->ability_match = 0;
2911                 ap->ack_match = 0;
2912
2913                 rx_cfg_reg = 0;
2914         }
2915
2916         ap->rxconfig = rx_cfg_reg;
2917         ret = ANEG_OK;
2918
2919         switch(ap->state) {
2920         case ANEG_STATE_UNKNOWN:
2921                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2922                         ap->state = ANEG_STATE_AN_ENABLE;
2923
2924                 /* fallthru */
2925         case ANEG_STATE_AN_ENABLE:
2926                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2927                 if (ap->flags & MR_AN_ENABLE) {
2928                         ap->link_time = 0;
2929                         ap->cur_time = 0;
2930                         ap->ability_match_cfg = 0;
2931                         ap->ability_match_count = 0;
2932                         ap->ability_match = 0;
2933                         ap->idle_match = 0;
2934                         ap->ack_match = 0;
2935
2936                         ap->state = ANEG_STATE_RESTART_INIT;
2937                 } else {
2938                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2939                 }
2940                 break;
2941
2942         case ANEG_STATE_RESTART_INIT:
2943                 ap->link_time = ap->cur_time;
2944                 ap->flags &= ~(MR_NP_LOADED);
2945                 ap->txconfig = 0;
2946                 tw32(MAC_TX_AUTO_NEG, 0);
2947                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2948                 tw32_f(MAC_MODE, tp->mac_mode);
2949                 udelay(40);
2950
2951                 ret = ANEG_TIMER_ENAB;
2952                 ap->state = ANEG_STATE_RESTART;
2953
2954                 /* fallthru */
2955         case ANEG_STATE_RESTART:
2956                 delta = ap->cur_time - ap->link_time;
2957                 if (delta > ANEG_STATE_SETTLE_TIME) {
2958                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2959                 } else {
2960                         ret = ANEG_TIMER_ENAB;
2961                 }
2962                 break;
2963
2964         case ANEG_STATE_DISABLE_LINK_OK:
2965                 ret = ANEG_DONE;
2966                 break;
2967
2968         case ANEG_STATE_ABILITY_DETECT_INIT:
2969                 ap->flags &= ~(MR_TOGGLE_TX);
2970                 ap->txconfig = ANEG_CFG_FD;
2971                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2972                 if (flowctrl & ADVERTISE_1000XPAUSE)
2973                         ap->txconfig |= ANEG_CFG_PS1;
2974                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2975                         ap->txconfig |= ANEG_CFG_PS2;
2976                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2977                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2978                 tw32_f(MAC_MODE, tp->mac_mode);
2979                 udelay(40);
2980
2981                 ap->state = ANEG_STATE_ABILITY_DETECT;
2982                 break;
2983
2984         case ANEG_STATE_ABILITY_DETECT:
2985                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2986                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2987                 }
2988                 break;
2989
2990         case ANEG_STATE_ACK_DETECT_INIT:
2991                 ap->txconfig |= ANEG_CFG_ACK;
2992                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2993                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2994                 tw32_f(MAC_MODE, tp->mac_mode);
2995                 udelay(40);
2996
2997                 ap->state = ANEG_STATE_ACK_DETECT;
2998
2999                 /* fallthru */
3000         case ANEG_STATE_ACK_DETECT:
3001                 if (ap->ack_match != 0) {
3002                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3003                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3004                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3005                         } else {
3006                                 ap->state = ANEG_STATE_AN_ENABLE;
3007                         }
3008                 } else if (ap->ability_match != 0 &&
3009                            ap->rxconfig == 0) {
3010                         ap->state = ANEG_STATE_AN_ENABLE;
3011                 }
3012                 break;
3013
3014         case ANEG_STATE_COMPLETE_ACK_INIT:
3015                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3016                         ret = ANEG_FAILED;
3017                         break;
3018                 }
3019                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3020                                MR_LP_ADV_HALF_DUPLEX |
3021                                MR_LP_ADV_SYM_PAUSE |
3022                                MR_LP_ADV_ASYM_PAUSE |
3023                                MR_LP_ADV_REMOTE_FAULT1 |
3024                                MR_LP_ADV_REMOTE_FAULT2 |
3025                                MR_LP_ADV_NEXT_PAGE |
3026                                MR_TOGGLE_RX |
3027                                MR_NP_RX);
3028                 if (ap->rxconfig & ANEG_CFG_FD)
3029                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3030                 if (ap->rxconfig & ANEG_CFG_HD)
3031                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3032                 if (ap->rxconfig & ANEG_CFG_PS1)
3033                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3034                 if (ap->rxconfig & ANEG_CFG_PS2)
3035                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3036                 if (ap->rxconfig & ANEG_CFG_RF1)
3037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3038                 if (ap->rxconfig & ANEG_CFG_RF2)
3039                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3040                 if (ap->rxconfig & ANEG_CFG_NP)
3041                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3042
3043                 ap->link_time = ap->cur_time;
3044
3045                 ap->flags ^= (MR_TOGGLE_TX);
3046                 if (ap->rxconfig & 0x0008)
3047                         ap->flags |= MR_TOGGLE_RX;
3048                 if (ap->rxconfig & ANEG_CFG_NP)
3049                         ap->flags |= MR_NP_RX;
3050                 ap->flags |= MR_PAGE_RX;
3051
3052                 ap->state = ANEG_STATE_COMPLETE_ACK;
3053                 ret = ANEG_TIMER_ENAB;
3054                 break;
3055
3056         case ANEG_STATE_COMPLETE_ACK:
3057                 if (ap->ability_match != 0 &&
3058                     ap->rxconfig == 0) {
3059                         ap->state = ANEG_STATE_AN_ENABLE;
3060                         break;
3061                 }
3062                 delta = ap->cur_time - ap->link_time;
3063                 if (delta > ANEG_STATE_SETTLE_TIME) {
3064                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3065                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3066                         } else {
3067                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3068                                     !(ap->flags & MR_NP_RX)) {
3069                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3070                                 } else {
3071                                         ret = ANEG_FAILED;
3072                                 }
3073                         }
3074                 }
3075                 break;
3076
3077         case ANEG_STATE_IDLE_DETECT_INIT:
3078                 ap->link_time = ap->cur_time;
3079                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3080                 tw32_f(MAC_MODE, tp->mac_mode);
3081                 udelay(40);
3082
3083                 ap->state = ANEG_STATE_IDLE_DETECT;
3084                 ret = ANEG_TIMER_ENAB;
3085                 break;
3086
3087         case ANEG_STATE_IDLE_DETECT:
3088                 if (ap->ability_match != 0 &&
3089                     ap->rxconfig == 0) {
3090                         ap->state = ANEG_STATE_AN_ENABLE;
3091                         break;
3092                 }
3093                 delta = ap->cur_time - ap->link_time;
3094                 if (delta > ANEG_STATE_SETTLE_TIME) {
3095                         /* XXX another gem from the Broadcom driver :( */
3096                         ap->state = ANEG_STATE_LINK_OK;
3097                 }
3098                 break;
3099
3100         case ANEG_STATE_LINK_OK:
3101                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3102                 ret = ANEG_DONE;
3103                 break;
3104
3105         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3106                 /* ??? unimplemented */
3107                 break;
3108
3109         case ANEG_STATE_NEXT_PAGE_WAIT:
3110                 /* ??? unimplemented */
3111                 break;
3112
3113         default:
3114                 ret = ANEG_FAILED;
3115                 break;
3116         }
3117
3118         return ret;
3119 }
3120
3121 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3122 {
3123         int res = 0;
3124         struct tg3_fiber_aneginfo aninfo;
3125         int status = ANEG_FAILED;
3126         unsigned int tick;
3127         u32 tmp;
3128
3129         tw32_f(MAC_TX_AUTO_NEG, 0);
3130
3131         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3132         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3133         udelay(40);
3134
3135         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3136         udelay(40);
3137
3138         memset(&aninfo, 0, sizeof(aninfo));
3139         aninfo.flags |= MR_AN_ENABLE;
3140         aninfo.state = ANEG_STATE_UNKNOWN;
3141         aninfo.cur_time = 0;
3142         tick = 0;
3143         while (++tick < 195000) {
3144                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3145                 if (status == ANEG_DONE || status == ANEG_FAILED)
3146                         break;
3147
3148                 udelay(1);
3149         }
3150
3151         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3152         tw32_f(MAC_MODE, tp->mac_mode);
3153         udelay(40);
3154
3155         *txflags = aninfo.txconfig;
3156         *rxflags = aninfo.flags;
3157
3158         if (status == ANEG_DONE &&
3159             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3160                              MR_LP_ADV_FULL_DUPLEX)))
3161                 res = 1;
3162
3163         return res;
3164 }
3165
3166 static void tg3_init_bcm8002(struct tg3 *tp)
3167 {
3168         u32 mac_status = tr32(MAC_STATUS);
3169         int i;
3170
3171         /* Reset when initting first time or we have a link. */
3172         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3173             !(mac_status & MAC_STATUS_PCS_SYNCED))
3174                 return;
3175
3176         /* Set PLL lock range. */
3177         tg3_writephy(tp, 0x16, 0x8007);
3178
3179         /* SW reset */
3180         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3181
3182         /* Wait for reset to complete. */
3183         /* XXX schedule_timeout() ... */
3184         for (i = 0; i < 500; i++)
3185                 udelay(10);
3186
3187         /* Config mode; select PMA/Ch 1 regs. */
3188         tg3_writephy(tp, 0x10, 0x8411);
3189
3190         /* Enable auto-lock and comdet, select txclk for tx. */
3191         tg3_writephy(tp, 0x11, 0x0a10);
3192
3193         tg3_writephy(tp, 0x18, 0x00a0);
3194         tg3_writephy(tp, 0x16, 0x41ff);
3195
3196         /* Assert and deassert POR. */
3197         tg3_writephy(tp, 0x13, 0x0400);
3198         udelay(40);
3199         tg3_writephy(tp, 0x13, 0x0000);
3200
3201         tg3_writephy(tp, 0x11, 0x0a50);
3202         udelay(40);
3203         tg3_writephy(tp, 0x11, 0x0a10);
3204
3205         /* Wait for signal to stabilize */
3206         /* XXX schedule_timeout() ... */
3207         for (i = 0; i < 15000; i++)
3208                 udelay(10);
3209
3210         /* Deselect the channel register so we can read the PHYID
3211          * later.
3212          */
3213         tg3_writephy(tp, 0x10, 0x8011);
3214 }
3215
3216 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3217 {
3218         u16 flowctrl;
3219         u32 sg_dig_ctrl, sg_dig_status;
3220         u32 serdes_cfg, expected_sg_dig_ctrl;
3221         int workaround, port_a;
3222         int current_link_up;
3223
3224         serdes_cfg = 0;
3225         expected_sg_dig_ctrl = 0;
3226         workaround = 0;
3227         port_a = 1;
3228         current_link_up = 0;
3229
3230         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3231             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3232                 workaround = 1;
3233                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3234                         port_a = 0;
3235
3236                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3237                 /* preserve bits 20-23 for voltage regulator */
3238                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3239         }
3240
3241         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3242
3243         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3244                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3245                         if (workaround) {
3246                                 u32 val = serdes_cfg;
3247
3248                                 if (port_a)
3249                                         val |= 0xc010000;
3250                                 else
3251                                         val |= 0x4010000;
3252                                 tw32_f(MAC_SERDES_CFG, val);
3253                         }
3254
3255                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3256                 }
3257                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3258                         tg3_setup_flow_control(tp, 0, 0);
3259                         current_link_up = 1;
3260                 }
3261                 goto out;
3262         }
3263
3264         /* Want auto-negotiation.  */
3265         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3266
3267         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3268         if (flowctrl & ADVERTISE_1000XPAUSE)
3269                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3270         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3271                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3272
3273         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3274                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3275                     tp->serdes_counter &&
3276                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3277                                     MAC_STATUS_RCVD_CFG)) ==
3278                      MAC_STATUS_PCS_SYNCED)) {
3279                         tp->serdes_counter--;
3280                         current_link_up = 1;
3281                         goto out;
3282                 }
3283 restart_autoneg:
3284                 if (workaround)
3285                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3286                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3287                 udelay(5);
3288                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3289
3290                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3291                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3292         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3293                                  MAC_STATUS_SIGNAL_DET)) {
3294                 sg_dig_status = tr32(SG_DIG_STATUS);
3295                 mac_status = tr32(MAC_STATUS);
3296
3297                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3298                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3299                         u32 local_adv = 0, remote_adv = 0;
3300
3301                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3302                                 local_adv |= ADVERTISE_1000XPAUSE;
3303                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3304                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3305
3306                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3307                                 remote_adv |= LPA_1000XPAUSE;
3308                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3309                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3310
3311                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3312                         current_link_up = 1;
3313                         tp->serdes_counter = 0;
3314                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3315                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3316                         if (tp->serdes_counter)
3317                                 tp->serdes_counter--;
3318                         else {
3319                                 if (workaround) {
3320                                         u32 val = serdes_cfg;
3321
3322                                         if (port_a)
3323                                                 val |= 0xc010000;
3324                                         else
3325                                                 val |= 0x4010000;
3326
3327                                         tw32_f(MAC_SERDES_CFG, val);
3328                                 }
3329
3330                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3331                                 udelay(40);
3332
3333                                 /* Link parallel detection - link is up */
3334                                 /* only if we have PCS_SYNC and not */
3335                                 /* receiving config code words */
3336                                 mac_status = tr32(MAC_STATUS);
3337                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3338                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3339                                         tg3_setup_flow_control(tp, 0, 0);
3340                                         current_link_up = 1;
3341                                         tp->tg3_flags2 |=
3342                                                 TG3_FLG2_PARALLEL_DETECT;
3343                                         tp->serdes_counter =
3344                                                 SERDES_PARALLEL_DET_TIMEOUT;
3345                                 } else
3346                                         goto restart_autoneg;
3347                         }
3348                 }
3349         } else {
3350                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3351                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3352         }
3353
3354 out:
3355         return current_link_up;
3356 }
3357
3358 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3359 {
3360         int current_link_up = 0;
3361
3362         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3363                 goto out;
3364
3365         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3366                 u32 txflags, rxflags;
3367                 int i;
3368
3369                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3370                         u32 local_adv = 0, remote_adv = 0;
3371
3372                         if (txflags & ANEG_CFG_PS1)
3373                                 local_adv |= ADVERTISE_1000XPAUSE;
3374                         if (txflags & ANEG_CFG_PS2)
3375                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3376
3377                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3378                                 remote_adv |= LPA_1000XPAUSE;
3379                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3380                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3381
3382                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3383
3384                         current_link_up = 1;
3385                 }
3386                 for (i = 0; i < 30; i++) {
3387                         udelay(20);
3388                         tw32_f(MAC_STATUS,
3389                                (MAC_STATUS_SYNC_CHANGED |
3390                                 MAC_STATUS_CFG_CHANGED));
3391                         udelay(40);
3392                         if ((tr32(MAC_STATUS) &
3393                              (MAC_STATUS_SYNC_CHANGED |
3394                               MAC_STATUS_CFG_CHANGED)) == 0)
3395                                 break;
3396                 }
3397
3398                 mac_status = tr32(MAC_STATUS);
3399                 if (current_link_up == 0 &&
3400                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3401                     !(mac_status & MAC_STATUS_RCVD_CFG))
3402                         current_link_up = 1;
3403         } else {
3404                 tg3_setup_flow_control(tp, 0, 0);
3405
3406                 /* Forcing 1000FD link up. */
3407                 current_link_up = 1;
3408
3409                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3410                 udelay(40);
3411
3412                 tw32_f(MAC_MODE, tp->mac_mode);
3413                 udelay(40);
3414         }
3415
3416 out:
3417         return current_link_up;
3418 }
3419
3420 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3421 {
3422         u32 orig_pause_cfg;
3423         u16 orig_active_speed;
3424         u8 orig_active_duplex;
3425         u32 mac_status;
3426         int current_link_up;
3427         int i;
3428
3429         orig_pause_cfg = tp->link_config.active_flowctrl;
3430         orig_active_speed = tp->link_config.active_speed;
3431         orig_active_duplex = tp->link_config.active_duplex;
3432
3433         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3434             netif_carrier_ok(tp->dev) &&
3435             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3436                 mac_status = tr32(MAC_STATUS);
3437                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3438                                MAC_STATUS_SIGNAL_DET |
3439                                MAC_STATUS_CFG_CHANGED |
3440                                MAC_STATUS_RCVD_CFG);
3441                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3442                                    MAC_STATUS_SIGNAL_DET)) {
3443                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3444                                             MAC_STATUS_CFG_CHANGED));
3445                         return 0;
3446                 }
3447         }
3448
3449         tw32_f(MAC_TX_AUTO_NEG, 0);
3450
3451         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3452         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3453         tw32_f(MAC_MODE, tp->mac_mode);
3454         udelay(40);
3455
3456         if (tp->phy_id == PHY_ID_BCM8002)
3457                 tg3_init_bcm8002(tp);
3458
3459         /* Enable link change event even when serdes polling.  */
3460         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3461         udelay(40);
3462
3463         current_link_up = 0;
3464         mac_status = tr32(MAC_STATUS);
3465
3466         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3467                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3468         else
3469                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3470
3471         tp->hw_status->status =
3472                 (SD_STATUS_UPDATED |
3473                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3474
3475         for (i = 0; i < 100; i++) {
3476                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3477                                     MAC_STATUS_CFG_CHANGED));
3478                 udelay(5);
3479                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3480                                          MAC_STATUS_CFG_CHANGED |
3481                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3482                         break;
3483         }
3484
3485         mac_status = tr32(MAC_STATUS);
3486         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3487                 current_link_up = 0;
3488                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3489                     tp->serdes_counter == 0) {
3490                         tw32_f(MAC_MODE, (tp->mac_mode |
3491                                           MAC_MODE_SEND_CONFIGS));
3492                         udelay(1);
3493                         tw32_f(MAC_MODE, tp->mac_mode);
3494                 }
3495         }
3496
3497         if (current_link_up == 1) {
3498                 tp->link_config.active_speed = SPEED_1000;
3499                 tp->link_config.active_duplex = DUPLEX_FULL;
3500                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3501                                     LED_CTRL_LNKLED_OVERRIDE |
3502                                     LED_CTRL_1000MBPS_ON));
3503         } else {
3504                 tp->link_config.active_speed = SPEED_INVALID;
3505                 tp->link_config.active_duplex = DUPLEX_INVALID;
3506                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3507                                     LED_CTRL_LNKLED_OVERRIDE |
3508                                     LED_CTRL_TRAFFIC_OVERRIDE));
3509         }
3510
3511         if (current_link_up != netif_carrier_ok(tp->dev)) {
3512                 if (current_link_up)
3513                         netif_carrier_on(tp->dev);
3514                 else
3515                         netif_carrier_off(tp->dev);
3516                 tg3_link_report(tp);
3517         } else {
3518                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3519                 if (orig_pause_cfg != now_pause_cfg ||
3520                     orig_active_speed != tp->link_config.active_speed ||
3521                     orig_active_duplex != tp->link_config.active_duplex)
3522                         tg3_link_report(tp);
3523         }
3524
3525         return 0;
3526 }
3527
3528 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3529 {
3530         int current_link_up, err = 0;
3531         u32 bmsr, bmcr;
3532         u16 current_speed;
3533         u8 current_duplex;
3534         u32 local_adv, remote_adv;
3535
3536         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3537         tw32_f(MAC_MODE, tp->mac_mode);
3538         udelay(40);
3539
3540         tw32(MAC_EVENT, 0);
3541
3542         tw32_f(MAC_STATUS,
3543              (MAC_STATUS_SYNC_CHANGED |
3544               MAC_STATUS_CFG_CHANGED |
3545               MAC_STATUS_MI_COMPLETION |
3546               MAC_STATUS_LNKSTATE_CHANGED));
3547         udelay(40);
3548
3549         if (force_reset)
3550                 tg3_phy_reset(tp);
3551
3552         current_link_up = 0;
3553         current_speed = SPEED_INVALID;
3554         current_duplex = DUPLEX_INVALID;
3555
3556         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3557         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3559                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3560                         bmsr |= BMSR_LSTATUS;
3561                 else
3562                         bmsr &= ~BMSR_LSTATUS;
3563         }
3564
3565         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3566
3567         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3568             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3569                 /* do nothing, just check for link up at the end */
3570         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3571                 u32 adv, new_adv;
3572
3573                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3574                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3575                                   ADVERTISE_1000XPAUSE |
3576                                   ADVERTISE_1000XPSE_ASYM |
3577                                   ADVERTISE_SLCT);
3578
3579                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3580
3581                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3582                         new_adv |= ADVERTISE_1000XHALF;
3583                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3584                         new_adv |= ADVERTISE_1000XFULL;
3585
3586                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3587                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3588                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3589                         tg3_writephy(tp, MII_BMCR, bmcr);
3590
3591                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3593                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3594
3595                         return err;
3596                 }
3597         } else {
3598                 u32 new_bmcr;
3599
3600                 bmcr &= ~BMCR_SPEED1000;
3601                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3602
3603                 if (tp->link_config.duplex == DUPLEX_FULL)
3604                         new_bmcr |= BMCR_FULLDPLX;
3605
3606                 if (new_bmcr != bmcr) {
3607                         /* BMCR_SPEED1000 is a reserved bit that needs
3608                          * to be set on write.
3609                          */
3610                         new_bmcr |= BMCR_SPEED1000;
3611
3612                         /* Force a linkdown */
3613                         if (netif_carrier_ok(tp->dev)) {
3614                                 u32 adv;
3615
3616                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3617                                 adv &= ~(ADVERTISE_1000XFULL |
3618                                          ADVERTISE_1000XHALF |
3619                                          ADVERTISE_SLCT);
3620                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3621                                 tg3_writephy(tp, MII_BMCR, bmcr |
3622                                                            BMCR_ANRESTART |
3623                                                            BMCR_ANENABLE);
3624                                 udelay(10);
3625                                 netif_carrier_off(tp->dev);
3626                         }
3627                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3628                         bmcr = new_bmcr;
3629                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3630                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3631                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3632                             ASIC_REV_5714) {
3633                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3634                                         bmsr |= BMSR_LSTATUS;
3635                                 else
3636                                         bmsr &= ~BMSR_LSTATUS;
3637                         }
3638                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3639                 }
3640         }
3641
3642         if (bmsr & BMSR_LSTATUS) {
3643                 current_speed = SPEED_1000;
3644                 current_link_up = 1;
3645                 if (bmcr & BMCR_FULLDPLX)
3646                         current_duplex = DUPLEX_FULL;
3647                 else
3648                         current_duplex = DUPLEX_HALF;
3649
3650                 local_adv = 0;
3651                 remote_adv = 0;
3652
3653                 if (bmcr & BMCR_ANENABLE) {
3654                         u32 common;
3655
3656                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3657                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3658                         common = local_adv & remote_adv;
3659                         if (common & (ADVERTISE_1000XHALF |
3660                                       ADVERTISE_1000XFULL)) {
3661                                 if (common & ADVERTISE_1000XFULL)
3662                                         current_duplex = DUPLEX_FULL;
3663                                 else
3664                                         current_duplex = DUPLEX_HALF;
3665                         }
3666                         else
3667                                 current_link_up = 0;
3668                 }
3669         }
3670
3671         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3672                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3673
3674         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3675         if (tp->link_config.active_duplex == DUPLEX_HALF)
3676                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3677
3678         tw32_f(MAC_MODE, tp->mac_mode);
3679         udelay(40);
3680
3681         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3682
3683         tp->link_config.active_speed = current_speed;
3684         tp->link_config.active_duplex = current_duplex;
3685
3686         if (current_link_up != netif_carrier_ok(tp->dev)) {
3687                 if (current_link_up)
3688                         netif_carrier_on(tp->dev);
3689                 else {
3690                         netif_carrier_off(tp->dev);
3691                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3692                 }
3693                 tg3_link_report(tp);
3694         }
3695         return err;
3696 }
3697
3698 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3699 {
3700         if (tp->serdes_counter) {
3701                 /* Give autoneg time to complete. */
3702                 tp->serdes_counter--;
3703                 return;
3704         }
3705         if (!netif_carrier_ok(tp->dev) &&
3706             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3707                 u32 bmcr;
3708
3709                 tg3_readphy(tp, MII_BMCR, &bmcr);
3710                 if (bmcr & BMCR_ANENABLE) {
3711                         u32 phy1, phy2;
3712
3713                         /* Select shadow register 0x1f */
3714                         tg3_writephy(tp, 0x1c, 0x7c00);
3715                         tg3_readphy(tp, 0x1c, &phy1);
3716
3717                         /* Select expansion interrupt status register */
3718                         tg3_writephy(tp, 0x17, 0x0f01);
3719                         tg3_readphy(tp, 0x15, &phy2);
3720                         tg3_readphy(tp, 0x15, &phy2);
3721
3722                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3723                                 /* We have signal detect and not receiving
3724                                  * config code words, link is up by parallel
3725                                  * detection.
3726                                  */
3727
3728                                 bmcr &= ~BMCR_ANENABLE;
3729                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3730                                 tg3_writephy(tp, MII_BMCR, bmcr);
3731                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3732                         }
3733                 }
3734         }
3735         else if (netif_carrier_ok(tp->dev) &&
3736                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3737                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3738                 u32 phy2;
3739
3740                 /* Select expansion interrupt status register */
3741                 tg3_writephy(tp, 0x17, 0x0f01);
3742                 tg3_readphy(tp, 0x15, &phy2);
3743                 if (phy2 & 0x20) {
3744                         u32 bmcr;
3745
3746                         /* Config code words received, turn on autoneg. */
3747                         tg3_readphy(tp, MII_BMCR, &bmcr);
3748                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3749
3750                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3751
3752                 }
3753         }
3754 }
3755
3756 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3757 {
3758         int err;
3759
3760         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3761                 err = tg3_setup_fiber_phy(tp, force_reset);
3762         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3763                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3764         } else {
3765                 err = tg3_setup_copper_phy(tp, force_reset);
3766         }
3767
3768         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3769             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3770                 u32 val, scale;
3771
3772                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3773                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3774                         scale = 65;
3775                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3776                         scale = 6;
3777                 else
3778                         scale = 12;
3779
3780                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3781                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3782                 tw32(GRC_MISC_CFG, val);
3783         }
3784
3785         if (tp->link_config.active_speed == SPEED_1000 &&
3786             tp->link_config.active_duplex == DUPLEX_HALF)
3787                 tw32(MAC_TX_LENGTHS,
3788                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3789                       (6 << TX_LENGTHS_IPG_SHIFT) |
3790                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3791         else
3792                 tw32(MAC_TX_LENGTHS,
3793                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3794                       (6 << TX_LENGTHS_IPG_SHIFT) |
3795                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3796
3797         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3798                 if (netif_carrier_ok(tp->dev)) {
3799                         tw32(HOSTCC_STAT_COAL_TICKS,
3800                              tp->coal.stats_block_coalesce_usecs);
3801                 } else {
3802                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3803                 }
3804         }
3805
3806         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3807                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3808                 if (!netif_carrier_ok(tp->dev))
3809                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3810                               tp->pwrmgmt_thresh;
3811                 else
3812                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3813                 tw32(PCIE_PWR_MGMT_THRESH, val);
3814         }
3815
3816         return err;
3817 }
3818
3819 /* This is called whenever we suspect that the system chipset is re-
3820  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3821  * is bogus tx completions. We try to recover by setting the
3822  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3823  * in the workqueue.
3824  */
3825 static void tg3_tx_recover(struct tg3 *tp)
3826 {
3827         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3828                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3829
3830         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3831                "mapped I/O cycles to the network device, attempting to "
3832                "recover. Please report the problem to the driver maintainer "
3833                "and include system chipset information.\n", tp->dev->name);
3834
3835         spin_lock(&tp->lock);
3836         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3837         spin_unlock(&tp->lock);
3838 }
3839
3840 static inline u32 tg3_tx_avail(struct tg3 *tp)
3841 {
3842         smp_mb();
3843         return (tp->tx_pending -
3844                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3845 }
3846
3847 /* Tigon3 never reports partial packet sends.  So we do not
3848  * need special logic to handle SKBs that have not had all
3849  * of their frags sent yet, like SunGEM does.
3850  */
3851 static void tg3_tx(struct tg3 *tp)
3852 {
3853         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3854         u32 sw_idx = tp->tx_cons;
3855
3856         while (sw_idx != hw_idx) {
3857                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3858                 struct sk_buff *skb = ri->skb;
3859                 int i, tx_bug = 0;
3860
3861                 if (unlikely(skb == NULL)) {
3862                         tg3_tx_recover(tp);
3863                         return;
3864                 }
3865
3866                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3867
3868                 ri->skb = NULL;
3869
3870                 sw_idx = NEXT_TX(sw_idx);
3871
3872                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3873                         ri = &tp->tx_buffers[sw_idx];
3874                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3875                                 tx_bug = 1;
3876                         sw_idx = NEXT_TX(sw_idx);
3877                 }
3878
3879                 dev_kfree_skb(skb);
3880
3881                 if (unlikely(tx_bug)) {
3882                         tg3_tx_recover(tp);
3883                         return;
3884                 }
3885         }
3886
3887         tp->tx_cons = sw_idx;
3888
3889         /* Need to make the tx_cons update visible to tg3_start_xmit()
3890          * before checking for netif_queue_stopped().  Without the
3891          * memory barrier, there is a small possibility that tg3_start_xmit()
3892          * will miss it and cause the queue to be stopped forever.
3893          */
3894         smp_mb();
3895
3896         if (unlikely(netif_queue_stopped(tp->dev) &&
3897                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3898                 netif_tx_lock(tp->dev);
3899                 if (netif_queue_stopped(tp->dev) &&
3900                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3901                         netif_wake_queue(tp->dev);
3902                 netif_tx_unlock(tp->dev);
3903         }
3904 }
3905
3906 /* Returns size of skb allocated or < 0 on error.
3907  *
3908  * We only need to fill in the address because the other members
3909  * of the RX descriptor are invariant, see tg3_init_rings.
3910  *
3911  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3912  * posting buffers we only dirty the first cache line of the RX
3913  * descriptor (containing the address).  Whereas for the RX status
3914  * buffers the cpu only reads the last cacheline of the RX descriptor
3915  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3916  */
3917 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3918                             int src_idx, u32 dest_idx_unmasked)
3919 {
3920         struct tg3_rx_buffer_desc *desc;
3921         struct ring_info *map, *src_map;
3922         struct sk_buff *skb;
3923         dma_addr_t mapping;
3924         int skb_size, dest_idx;
3925
3926         src_map = NULL;
3927         switch (opaque_key) {
3928         case RXD_OPAQUE_RING_STD:
3929                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3930                 desc = &tp->rx_std[dest_idx];
3931                 map = &tp->rx_std_buffers[dest_idx];
3932                 if (src_idx >= 0)
3933                         src_map = &tp->rx_std_buffers[src_idx];
3934                 skb_size = tp->rx_pkt_buf_sz;
3935                 break;
3936
3937         case RXD_OPAQUE_RING_JUMBO:
3938                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3939                 desc = &tp->rx_jumbo[dest_idx];
3940                 map = &tp->rx_jumbo_buffers[dest_idx];
3941                 if (src_idx >= 0)
3942                         src_map = &tp->rx_jumbo_buffers[src_idx];
3943                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3944                 break;
3945
3946         default:
3947                 return -EINVAL;
3948         }
3949
3950         /* Do not overwrite any of the map or rp information
3951          * until we are sure we can commit to a new buffer.
3952          *
3953          * Callers depend upon this behavior and assume that
3954          * we leave everything unchanged if we fail.
3955          */
3956         skb = netdev_alloc_skb(tp->dev, skb_size);
3957         if (skb == NULL)
3958                 return -ENOMEM;
3959
3960         skb_reserve(skb, tp->rx_offset);
3961
3962         mapping = pci_map_single(tp->pdev, skb->data,
3963                                  skb_size - tp->rx_offset,
3964                                  PCI_DMA_FROMDEVICE);
3965
3966         map->skb = skb;
3967         pci_unmap_addr_set(map, mapping, mapping);
3968
3969         if (src_map != NULL)
3970                 src_map->skb = NULL;
3971
3972         desc->addr_hi = ((u64)mapping >> 32);
3973         desc->addr_lo = ((u64)mapping & 0xffffffff);
3974
3975         return skb_size;
3976 }
3977
3978 /* We only need to move over in the address because the other
3979  * members of the RX descriptor are invariant.  See notes above
3980  * tg3_alloc_rx_skb for full details.
3981  */
3982 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3983                            int src_idx, u32 dest_idx_unmasked)
3984 {
3985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3986         struct ring_info *src_map, *dest_map;
3987         int dest_idx;
3988
3989         switch (opaque_key) {
3990         case RXD_OPAQUE_RING_STD:
3991                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3992                 dest_desc = &tp->rx_std[dest_idx];
3993                 dest_map = &tp->rx_std_buffers[dest_idx];
3994                 src_desc = &tp->rx_std[src_idx];
3995                 src_map = &tp->rx_std_buffers[src_idx];
3996                 break;
3997
3998         case RXD_OPAQUE_RING_JUMBO:
3999                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4000                 dest_desc = &tp->rx_jumbo[dest_idx];
4001                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4002                 src_desc = &tp->rx_jumbo[src_idx];
4003                 src_map = &tp->rx_jumbo_buffers[src_idx];
4004                 break;
4005
4006         default:
4007                 return;
4008         }
4009
4010         dest_map->skb = src_map->skb;
4011         pci_unmap_addr_set(dest_map, mapping,
4012                            pci_unmap_addr(src_map, mapping));
4013         dest_desc->addr_hi = src_desc->addr_hi;
4014         dest_desc->addr_lo = src_desc->addr_lo;
4015
4016         src_map->skb = NULL;
4017 }
4018
4019 #if TG3_VLAN_TAG_USED
4020 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4021 {
4022         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4023 }
4024 #endif
4025
4026 /* The RX ring scheme is composed of multiple rings which post fresh
4027  * buffers to the chip, and one special ring the chip uses to report
4028  * status back to the host.
4029  *
4030  * The special ring reports the status of received packets to the
4031  * host.  The chip does not write into the original descriptor the
4032  * RX buffer was obtained from.  The chip simply takes the original
4033  * descriptor as provided by the host, updates the status and length
4034  * field, then writes this into the next status ring entry.
4035  *
4036  * Each ring the host uses to post buffers to the chip is described
4037  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4038  * it is first placed into the on-chip ram.  When the packet's length
4039  * is known, it walks down the TG3_BDINFO entries to select the ring.
4040  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4041  * which is within the range of the new packet's length is chosen.
4042  *
4043  * The "separate ring for rx status" scheme may sound queer, but it makes
4044  * sense from a cache coherency perspective.  If only the host writes
4045  * to the buffer post rings, and only the chip writes to the rx status
4046  * rings, then cache lines never move beyond shared-modified state.
4047  * If both the host and chip were to write into the same ring, cache line
4048  * eviction could occur since both entities want it in an exclusive state.
4049  */
4050 static int tg3_rx(struct tg3 *tp, int budget)
4051 {
4052         u32 work_mask, rx_std_posted = 0;
4053         u32 sw_idx = tp->rx_rcb_ptr;
4054         u16 hw_idx;
4055         int received;
4056
4057         hw_idx = tp->hw_status->idx[0].rx_producer;
4058         /*
4059          * We need to order the read of hw_idx and the read of
4060          * the opaque cookie.
4061          */
4062         rmb();
4063         work_mask = 0;
4064         received = 0;
4065         while (sw_idx != hw_idx && budget > 0) {
4066                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4067                 unsigned int len;
4068                 struct sk_buff *skb;
4069                 dma_addr_t dma_addr;
4070                 u32 opaque_key, desc_idx, *post_ptr;
4071
4072                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4073                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4074                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4075                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4076                                                   mapping);
4077                         skb = tp->rx_std_buffers[desc_idx].skb;
4078                         post_ptr = &tp->rx_std_ptr;
4079                         rx_std_posted++;
4080                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4081                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4082                                                   mapping);
4083                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4084                         post_ptr = &tp->rx_jumbo_ptr;
4085                 }
4086                 else {
4087                         goto next_pkt_nopost;
4088                 }
4089
4090                 work_mask |= opaque_key;
4091
4092                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4093                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4094                 drop_it:
4095                         tg3_recycle_rx(tp, opaque_key,
4096                                        desc_idx, *post_ptr);
4097                 drop_it_no_recycle:
4098                         /* Other statistics kept track of by card. */
4099                         tp->net_stats.rx_dropped++;
4100                         goto next_pkt;
4101                 }
4102
4103                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4104
4105                 if (len > RX_COPY_THRESHOLD
4106                         && tp->rx_offset == 2
4107                         /* rx_offset != 2 iff this is a 5701 card running
4108                          * in PCI-X mode [see tg3_get_invariants()] */
4109                 ) {
4110                         int skb_size;
4111
4112                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4113                                                     desc_idx, *post_ptr);
4114                         if (skb_size < 0)
4115                                 goto drop_it;
4116
4117                         pci_unmap_single(tp->pdev, dma_addr,
4118                                          skb_size - tp->rx_offset,
4119                                          PCI_DMA_FROMDEVICE);
4120
4121                         skb_put(skb, len);
4122                 } else {
4123                         struct sk_buff *copy_skb;
4124
4125                         tg3_recycle_rx(tp, opaque_key,
4126                                        desc_idx, *post_ptr);
4127
4128                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4129                         if (copy_skb == NULL)
4130                                 goto drop_it_no_recycle;
4131
4132                         skb_reserve(copy_skb, 2);
4133                         skb_put(copy_skb, len);
4134                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4135                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4136                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4137
4138                         /* We'll reuse the original ring buffer. */
4139                         skb = copy_skb;
4140                 }
4141
4142                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4143                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4144                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4145                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4146                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4147                 else
4148                         skb->ip_summed = CHECKSUM_NONE;
4149
4150                 skb->protocol = eth_type_trans(skb, tp->dev);
4151 #if TG3_VLAN_TAG_USED
4152                 if (tp->vlgrp != NULL &&
4153                     desc->type_flags & RXD_FLAG_VLAN) {
4154                         tg3_vlan_rx(tp, skb,
4155                                     desc->err_vlan & RXD_VLAN_MASK);
4156                 } else
4157 #endif
4158                         netif_receive_skb(skb);
4159
4160                 tp->dev->last_rx = jiffies;
4161                 received++;
4162                 budget--;
4163
4164 next_pkt:
4165                 (*post_ptr)++;
4166
4167                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4168                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4169
4170                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4171                                      TG3_64BIT_REG_LOW, idx);
4172                         work_mask &= ~RXD_OPAQUE_RING_STD;
4173                         rx_std_posted = 0;
4174                 }
4175 next_pkt_nopost:
4176                 sw_idx++;
4177                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4178
4179                 /* Refresh hw_idx to see if there is new work */
4180                 if (sw_idx == hw_idx) {
4181                         hw_idx = tp->hw_status->idx[0].rx_producer;
4182                         rmb();
4183                 }
4184         }
4185
4186         /* ACK the status ring. */
4187         tp->rx_rcb_ptr = sw_idx;
4188         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4189
4190         /* Refill RX ring(s). */
4191         if (work_mask & RXD_OPAQUE_RING_STD) {
4192                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4193                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4194                              sw_idx);
4195         }
4196         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4197                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4198                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4199                              sw_idx);
4200         }
4201         mmiowb();
4202
4203         return received;
4204 }
4205
4206 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4207 {
4208         struct tg3_hw_status *sblk = tp->hw_status;
4209
4210         /* handle link change and other phy events */
4211         if (!(tp->tg3_flags &
4212               (TG3_FLAG_USE_LINKCHG_REG |
4213                TG3_FLAG_POLL_SERDES))) {
4214                 if (sblk->status & SD_STATUS_LINK_CHG) {
4215                         sblk->status = SD_STATUS_UPDATED |
4216                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4217                         spin_lock(&tp->lock);
4218                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4219                                 tw32_f(MAC_STATUS,
4220                                      (MAC_STATUS_SYNC_CHANGED |
4221                                       MAC_STATUS_CFG_CHANGED |
4222                                       MAC_STATUS_MI_COMPLETION |
4223                                       MAC_STATUS_LNKSTATE_CHANGED));
4224                                 udelay(40);
4225                         } else
4226                                 tg3_setup_phy(tp, 0);
4227                         spin_unlock(&tp->lock);
4228                 }
4229         }
4230
4231         /* run TX completion thread */
4232         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4233                 tg3_tx(tp);
4234                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4235                         return work_done;
4236         }
4237
4238         /* run RX thread, within the bounds set by NAPI.
4239          * All RX "locking" is done by ensuring outside
4240          * code synchronizes with tg3->napi.poll()
4241          */
4242         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4243                 work_done += tg3_rx(tp, budget - work_done);
4244
4245         return work_done;
4246 }
4247
4248 static int tg3_poll(struct napi_struct *napi, int budget)
4249 {
4250         struct tg3 *tp = container_of(napi, struct tg3, napi);
4251         int work_done = 0;
4252         struct tg3_hw_status *sblk = tp->hw_status;
4253
4254         while (1) {
4255                 work_done = tg3_poll_work(tp, work_done, budget);
4256
4257                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4258                         goto tx_recovery;
4259
4260                 if (unlikely(work_done >= budget))
4261                         break;
4262
4263                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4264                         /* tp->last_tag is used in tg3_restart_ints() below
4265                          * to tell the hw how much work has been processed,
4266                          * so we must read it before checking for more work.
4267                          */
4268                         tp->last_tag = sblk->status_tag;
4269                         rmb();
4270                 } else
4271                         sblk->status &= ~SD_STATUS_UPDATED;
4272
4273                 if (likely(!tg3_has_work(tp))) {
4274                         netif_rx_complete(tp->dev, napi);
4275                         tg3_restart_ints(tp);
4276                         break;
4277                 }
4278         }
4279
4280         return work_done;
4281
4282 tx_recovery:
4283         /* work_done is guaranteed to be less than budget. */
4284         netif_rx_complete(tp->dev, napi);
4285         schedule_work(&tp->reset_task);
4286         return work_done;
4287 }
4288
4289 static void tg3_irq_quiesce(struct tg3 *tp)
4290 {
4291         BUG_ON(tp->irq_sync);
4292
4293         tp->irq_sync = 1;
4294         smp_mb();
4295
4296         synchronize_irq(tp->pdev->irq);
4297 }
4298
4299 static inline int tg3_irq_sync(struct tg3 *tp)
4300 {
4301         return tp->irq_sync;
4302 }
4303
4304 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4305  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4306  * with as well.  Most of the time, this is not necessary except when
4307  * shutting down the device.
4308  */
4309 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4310 {
4311         spin_lock_bh(&tp->lock);
4312         if (irq_sync)
4313                 tg3_irq_quiesce(tp);
4314 }
4315
4316 static inline void tg3_full_unlock(struct tg3 *tp)
4317 {
4318         spin_unlock_bh(&tp->lock);
4319 }
4320
4321 /* One-shot MSI handler - Chip automatically disables interrupt
4322  * after sending MSI so driver doesn't have to do it.
4323  */
4324 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4325 {
4326         struct net_device *dev = dev_id;
4327         struct tg3 *tp = netdev_priv(dev);
4328
4329         prefetch(tp->hw_status);
4330         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4331
4332         if (likely(!tg3_irq_sync(tp)))
4333                 netif_rx_schedule(dev, &tp->napi);
4334
4335         return IRQ_HANDLED;
4336 }
4337
4338 /* MSI ISR - No need to check for interrupt sharing and no need to
4339  * flush status block and interrupt mailbox. PCI ordering rules
4340  * guarantee that MSI will arrive after the status block.
4341  */
4342 static irqreturn_t tg3_msi(int irq, void *dev_id)
4343 {
4344         struct net_device *dev = dev_id;
4345         struct tg3 *tp = netdev_priv(dev);
4346
4347         prefetch(tp->hw_status);
4348         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4349         /*
4350          * Writing any value to intr-mbox-0 clears PCI INTA# and
4351          * chip-internal interrupt pending events.
4352          * Writing non-zero to intr-mbox-0 additional tells the
4353          * NIC to stop sending us irqs, engaging "in-intr-handler"
4354          * event coalescing.
4355          */
4356         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4357         if (likely(!tg3_irq_sync(tp)))
4358                 netif_rx_schedule(dev, &tp->napi);
4359
4360         return IRQ_RETVAL(1);
4361 }
4362
4363 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4364 {
4365         struct net_device *dev = dev_id;
4366         struct tg3 *tp = netdev_priv(dev);
4367         struct tg3_hw_status *sblk = tp->hw_status;
4368         unsigned int handled = 1;
4369
4370         /* In INTx mode, it is possible for the interrupt to arrive at
4371          * the CPU before the status block posted prior to the interrupt.
4372          * Reading the PCI State register will confirm whether the
4373          * interrupt is ours and will flush the status block.
4374          */
4375         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4376                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4377                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4378                         handled = 0;
4379                         goto out;
4380                 }
4381         }
4382
4383         /*
4384          * Writing any value to intr-mbox-0 clears PCI INTA# and
4385          * chip-internal interrupt pending events.
4386          * Writing non-zero to intr-mbox-0 additional tells the
4387          * NIC to stop sending us irqs, engaging "in-intr-handler"
4388          * event coalescing.
4389          *
4390          * Flush the mailbox to de-assert the IRQ immediately to prevent
4391          * spurious interrupts.  The flush impacts performance but
4392          * excessive spurious interrupts can be worse in some cases.
4393          */
4394         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4395         if (tg3_irq_sync(tp))
4396                 goto out;
4397         sblk->status &= ~SD_STATUS_UPDATED;
4398         if (likely(tg3_has_work(tp))) {
4399                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4400                 netif_rx_schedule(dev, &tp->napi);
4401         } else {
4402                 /* No work, shared interrupt perhaps?  re-enable
4403                  * interrupts, and flush that PCI write
4404                  */
4405                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4406                                0x00000000);
4407         }
4408 out:
4409         return IRQ_RETVAL(handled);
4410 }
4411
4412 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4413 {
4414         struct net_device *dev = dev_id;
4415         struct tg3 *tp = netdev_priv(dev);
4416         struct tg3_hw_status *sblk = tp->hw_status;
4417         unsigned int handled = 1;
4418
4419         /* In INTx mode, it is possible for the interrupt to arrive at
4420          * the CPU before the status block posted prior to the interrupt.
4421          * Reading the PCI State register will confirm whether the
4422          * interrupt is ours and will flush the status block.
4423          */
4424         if (unlikely(sblk->status_tag == tp->last_tag)) {
4425                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4426                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4427                         handled = 0;
4428                         goto out;
4429                 }
4430         }
4431
4432         /*
4433          * writing any value to intr-mbox-0 clears PCI INTA# and
4434          * chip-internal interrupt pending events.
4435          * writing non-zero to intr-mbox-0 additional tells the
4436          * NIC to stop sending us irqs, engaging "in-intr-handler"
4437          * event coalescing.
4438          *
4439          * Flush the mailbox to de-assert the IRQ immediately to prevent
4440          * spurious interrupts.  The flush impacts performance but
4441          * excessive spurious interrupts can be worse in some cases.
4442          */
4443         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4444         if (tg3_irq_sync(tp))
4445                 goto out;
4446         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4447                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4448                 /* Update last_tag to mark that this status has been
4449                  * seen. Because interrupt may be shared, we may be
4450                  * racing with tg3_poll(), so only update last_tag
4451                  * if tg3_poll() is not scheduled.
4452                  */
4453                 tp->last_tag = sblk->status_tag;
4454                 __netif_rx_schedule(dev, &tp->napi);
4455         }
4456 out:
4457         return IRQ_RETVAL(handled);
4458 }
4459
4460 /* ISR for interrupt test */
4461 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4462 {
4463         struct net_device *dev = dev_id;
4464         struct tg3 *tp = netdev_priv(dev);
4465         struct tg3_hw_status *sblk = tp->hw_status;
4466
4467         if ((sblk->status & SD_STATUS_UPDATED) ||
4468             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4469                 tg3_disable_ints(tp);
4470                 return IRQ_RETVAL(1);
4471         }
4472         return IRQ_RETVAL(0);
4473 }
4474
4475 static int tg3_init_hw(struct tg3 *, int);
4476 static int tg3_halt(struct tg3 *, int, int);
4477
4478 /* Restart hardware after configuration changes, self-test, etc.
4479  * Invoked with tp->lock held.
4480  */
4481 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4482         __releases(tp->lock)
4483         __acquires(tp->lock)
4484 {
4485         int err;
4486
4487         err = tg3_init_hw(tp, reset_phy);
4488         if (err) {
4489                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4490                        "aborting.\n", tp->dev->name);
4491                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4492                 tg3_full_unlock(tp);
4493                 del_timer_sync(&tp->timer);
4494                 tp->irq_sync = 0;
4495                 napi_enable(&tp->napi);
4496                 dev_close(tp->dev);
4497                 tg3_full_lock(tp, 0);
4498         }
4499         return err;
4500 }
4501
4502 #ifdef CONFIG_NET_POLL_CONTROLLER
4503 static void tg3_poll_controller(struct net_device *dev)
4504 {
4505         struct tg3 *tp = netdev_priv(dev);
4506
4507         tg3_interrupt(tp->pdev->irq, dev);
4508 }
4509 #endif
4510
4511 static void tg3_reset_task(struct work_struct *work)
4512 {
4513         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4514         int err;
4515         unsigned int restart_timer;
4516
4517         tg3_full_lock(tp, 0);
4518
4519         if (!netif_running(tp->dev)) {
4520                 tg3_full_unlock(tp);
4521                 return;
4522         }
4523
4524         tg3_full_unlock(tp);
4525
4526         tg3_phy_stop(tp);
4527
4528         tg3_netif_stop(tp);
4529
4530         tg3_full_lock(tp, 1);
4531
4532         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4533         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4534
4535         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4536                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4537                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4538                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4539                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4540         }
4541
4542         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4543         err = tg3_init_hw(tp, 1);
4544         if (err)
4545                 goto out;
4546
4547         tg3_netif_start(tp);
4548
4549         if (restart_timer)
4550                 mod_timer(&tp->timer, jiffies + 1);
4551
4552 out:
4553         tg3_full_unlock(tp);
4554
4555         if (!err)
4556                 tg3_phy_start(tp);
4557 }
4558
4559 static void tg3_dump_short_state(struct tg3 *tp)
4560 {
4561         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4562                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4563         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4564                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4565 }
4566
4567 static void tg3_tx_timeout(struct net_device *dev)
4568 {
4569         struct tg3 *tp = netdev_priv(dev);
4570
4571         if (netif_msg_tx_err(tp)) {
4572                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4573                        dev->name);
4574                 tg3_dump_short_state(tp);
4575         }
4576
4577         schedule_work(&tp->reset_task);
4578 }
4579
4580 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4581 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4582 {
4583         u32 base = (u32) mapping & 0xffffffff;
4584
4585         return ((base > 0xffffdcc0) &&
4586                 (base + len + 8 < base));
4587 }
4588
4589 /* Test for DMA addresses > 40-bit */
4590 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4591                                           int len)
4592 {
4593 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4594         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4595                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4596         return 0;
4597 #else
4598         return 0;
4599 #endif
4600 }
4601
4602 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4603
4604 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4605 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4606                                        u32 last_plus_one, u32 *start,
4607                                        u32 base_flags, u32 mss)
4608 {
4609         struct sk_buff *new_skb;
4610         dma_addr_t new_addr = 0;
4611         u32 entry = *start;
4612         int i, ret = 0;
4613
4614         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4615                 new_skb = skb_copy(skb, GFP_ATOMIC);
4616         else {
4617                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4618
4619                 new_skb = skb_copy_expand(skb,
4620                                           skb_headroom(skb) + more_headroom,
4621                                           skb_tailroom(skb), GFP_ATOMIC);
4622         }
4623
4624         if (!new_skb) {
4625                 ret = -1;
4626         } else {
4627                 /* New SKB is guaranteed to be linear. */
4628                 entry = *start;
4629                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4630                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4631
4632                 /* Make sure new skb does not cross any 4G boundaries.
4633                  * Drop the packet if it does.
4634                  */
4635                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4636                         if (!ret)
4637                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4638                                               DMA_TO_DEVICE);
4639                         ret = -1;
4640                         dev_kfree_skb(new_skb);
4641                         new_skb = NULL;
4642                 } else {
4643                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4644                                     base_flags, 1 | (mss << 1));
4645                         *start = NEXT_TX(entry);
4646                 }
4647         }
4648
4649         /* Now clean up the sw ring entries. */
4650         i = 0;
4651         while (entry != last_plus_one) {
4652                 if (i == 0) {
4653                         tp->tx_buffers[entry].skb = new_skb;
4654                 } else {
4655                         tp->tx_buffers[entry].skb = NULL;
4656                 }
4657                 entry = NEXT_TX(entry);
4658                 i++;
4659         }
4660
4661         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4662         dev_kfree_skb(skb);
4663
4664         return ret;
4665 }
4666
4667 static void tg3_set_txd(struct tg3 *tp, int entry,
4668                         dma_addr_t mapping, int len, u32 flags,
4669                         u32 mss_and_is_end)
4670 {
4671         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4672         int is_end = (mss_and_is_end & 0x1);
4673         u32 mss = (mss_and_is_end >> 1);
4674         u32 vlan_tag = 0;
4675
4676         if (is_end)
4677                 flags |= TXD_FLAG_END;
4678         if (flags & TXD_FLAG_VLAN) {
4679                 vlan_tag = flags >> 16;
4680                 flags &= 0xffff;
4681         }
4682         vlan_tag |= (mss << TXD_MSS_SHIFT);
4683
4684         txd->addr_hi = ((u64) mapping >> 32);
4685         txd->addr_lo = ((u64) mapping & 0xffffffff);
4686         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4687         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4688 }
4689
4690 /* hard_start_xmit for devices that don't have any bugs and
4691  * support TG3_FLG2_HW_TSO_2 only.
4692  */
4693 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4694 {
4695         struct tg3 *tp = netdev_priv(dev);
4696         u32 len, entry, base_flags, mss;
4697         struct skb_shared_info *sp;
4698         dma_addr_t mapping;
4699
4700         len = skb_headlen(skb);
4701
4702         /* We are running in BH disabled context with netif_tx_lock
4703          * and TX reclaim runs via tp->napi.poll inside of a software
4704          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4705          * no IRQ context deadlocks to worry about either.  Rejoice!
4706          */
4707         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4708                 if (!netif_queue_stopped(dev)) {
4709                         netif_stop_queue(dev);
4710
4711                         /* This is a hard error, log it. */
4712                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4713                                "queue awake!\n", dev->name);
4714                 }
4715                 return NETDEV_TX_BUSY;
4716         }
4717
4718         entry = tp->tx_prod;
4719         base_flags = 0;
4720         mss = 0;
4721         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4722                 int tcp_opt_len, ip_tcp_len;
4723
4724                 if (skb_header_cloned(skb) &&
4725                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4726                         dev_kfree_skb(skb);
4727                         goto out_unlock;
4728                 }
4729
4730                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4731                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4732                 else {
4733                         struct iphdr *iph = ip_hdr(skb);
4734
4735                         tcp_opt_len = tcp_optlen(skb);
4736                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4737
4738                         iph->check = 0;
4739                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4740                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4741                 }
4742
4743                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4744                                TXD_FLAG_CPU_POST_DMA);
4745
4746                 tcp_hdr(skb)->check = 0;
4747
4748         }
4749         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4750                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4751 #if TG3_VLAN_TAG_USED
4752         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4753                 base_flags |= (TXD_FLAG_VLAN |
4754                                (vlan_tx_tag_get(skb) << 16));
4755 #endif
4756
4757         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4758                 dev_kfree_skb(skb);
4759                 goto out_unlock;
4760         }
4761
4762         sp = skb_shinfo(skb);
4763
4764         mapping = sp->dma_maps[0];
4765
4766         tp->tx_buffers[entry].skb = skb;
4767
4768         tg3_set_txd(tp, entry, mapping, len, base_flags,
4769                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4770
4771         entry = NEXT_TX(entry);
4772
4773         /* Now loop through additional data fragments, and queue them. */
4774         if (skb_shinfo(skb)->nr_frags > 0) {
4775                 unsigned int i, last;
4776
4777                 last = skb_shinfo(skb)->nr_frags - 1;
4778                 for (i = 0; i <= last; i++) {
4779                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4780
4781                         len = frag->size;
4782                         mapping = sp->dma_maps[i + 1];
4783                         tp->tx_buffers[entry].skb = NULL;
4784
4785                         tg3_set_txd(tp, entry, mapping, len,
4786                                     base_flags, (i == last) | (mss << 1));
4787
4788                         entry = NEXT_TX(entry);
4789                 }
4790         }
4791
4792         /* Packets are ready, update Tx producer idx local and on card. */
4793         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4794
4795         tp->tx_prod = entry;
4796         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4797                 netif_stop_queue(dev);
4798                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4799                         netif_wake_queue(tp->dev);
4800         }
4801
4802 out_unlock:
4803         mmiowb();
4804
4805         dev->trans_start = jiffies;
4806
4807         return NETDEV_TX_OK;
4808 }
4809
4810 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4811
4812 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4813  * TSO header is greater than 80 bytes.
4814  */
4815 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4816 {
4817         struct sk_buff *segs, *nskb;
4818
4819         /* Estimate the number of fragments in the worst case */
4820         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4821                 netif_stop_queue(tp->dev);
4822                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4823                         return NETDEV_TX_BUSY;
4824
4825                 netif_wake_queue(tp->dev);
4826         }
4827
4828         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4829         if (IS_ERR(segs))
4830                 goto tg3_tso_bug_end;
4831
4832         do {
4833                 nskb = segs;
4834                 segs = segs->next;
4835                 nskb->next = NULL;
4836                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4837         } while (segs);
4838
4839 tg3_tso_bug_end:
4840         dev_kfree_skb(skb);
4841
4842         return NETDEV_TX_OK;
4843 }
4844
4845 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4846  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4847  */
4848 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4849 {
4850         struct tg3 *tp = netdev_priv(dev);
4851         u32 len, entry, base_flags, mss;
4852         struct skb_shared_info *sp;
4853         int would_hit_hwbug;
4854         dma_addr_t mapping;
4855
4856         len = skb_headlen(skb);
4857
4858         /* We are running in BH disabled context with netif_tx_lock
4859          * and TX reclaim runs via tp->napi.poll inside of a software
4860          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4861          * no IRQ context deadlocks to worry about either.  Rejoice!
4862          */
4863         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4864                 if (!netif_queue_stopped(dev)) {
4865                         netif_stop_queue(dev);
4866
4867                         /* This is a hard error, log it. */
4868                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4869                                "queue awake!\n", dev->name);
4870                 }
4871                 return NETDEV_TX_BUSY;
4872         }
4873
4874         entry = tp->tx_prod;
4875         base_flags = 0;
4876         if (skb->ip_summed == CHECKSUM_PARTIAL)
4877                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4878         mss = 0;
4879         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4880                 struct iphdr *iph;
4881                 int tcp_opt_len, ip_tcp_len, hdr_len;
4882
4883                 if (skb_header_cloned(skb) &&
4884                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4885                         dev_kfree_skb(skb);
4886                         goto out_unlock;
4887                 }
4888
4889                 tcp_opt_len = tcp_optlen(skb);
4890                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4891
4892                 hdr_len = ip_tcp_len + tcp_opt_len;
4893                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4894                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4895                         return (tg3_tso_bug(tp, skb));
4896
4897                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4898                                TXD_FLAG_CPU_POST_DMA);
4899
4900                 iph = ip_hdr(skb);
4901                 iph->check = 0;
4902                 iph->tot_len = htons(mss + hdr_len);
4903                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4904                         tcp_hdr(skb)->check = 0;
4905                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4906                 } else
4907                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4908                                                                  iph->daddr, 0,
4909                                                                  IPPROTO_TCP,
4910                                                                  0);
4911
4912                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4913                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4914                         if (tcp_opt_len || iph->ihl > 5) {
4915                                 int tsflags;
4916
4917                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4918                                 mss |= (tsflags << 11);
4919                         }
4920                 } else {
4921                         if (tcp_opt_len || iph->ihl > 5) {
4922                                 int tsflags;
4923
4924                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4925                                 base_flags |= tsflags << 12;
4926                         }
4927                 }
4928         }
4929 #if TG3_VLAN_TAG_USED
4930         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4931                 base_flags |= (TXD_FLAG_VLAN |
4932                                (vlan_tx_tag_get(skb) << 16));
4933 #endif
4934
4935         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4936                 dev_kfree_skb(skb);
4937                 goto out_unlock;
4938         }
4939
4940         sp = skb_shinfo(skb);
4941
4942         mapping = sp->dma_maps[0];
4943
4944         tp->tx_buffers[entry].skb = skb;
4945
4946         would_hit_hwbug = 0;
4947
4948         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4949                 would_hit_hwbug = 1;
4950         else if (tg3_4g_overflow_test(mapping, len))
4951                 would_hit_hwbug = 1;
4952
4953         tg3_set_txd(tp, entry, mapping, len, base_flags,
4954                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4955
4956         entry = NEXT_TX(entry);
4957
4958         /* Now loop through additional data fragments, and queue them. */
4959         if (skb_shinfo(skb)->nr_frags > 0) {
4960                 unsigned int i, last;
4961
4962                 last = skb_shinfo(skb)->nr_frags - 1;
4963                 for (i = 0; i <= last; i++) {
4964                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4965
4966                         len = frag->size;
4967                         mapping = sp->dma_maps[i + 1];
4968
4969                         tp->tx_buffers[entry].skb = NULL;
4970
4971                         if (tg3_4g_overflow_test(mapping, len))
4972                                 would_hit_hwbug = 1;
4973
4974                         if (tg3_40bit_overflow_test(tp, mapping, len))
4975                                 would_hit_hwbug = 1;
4976
4977                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4978                                 tg3_set_txd(tp, entry, mapping, len,
4979                                             base_flags, (i == last)|(mss << 1));
4980                         else
4981                                 tg3_set_txd(tp, entry, mapping, len,
4982                                             base_flags, (i == last));
4983
4984                         entry = NEXT_TX(entry);
4985                 }
4986         }
4987
4988         if (would_hit_hwbug) {
4989                 u32 last_plus_one = entry;
4990                 u32 start;
4991
4992                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4993                 start &= (TG3_TX_RING_SIZE - 1);
4994
4995                 /* If the workaround fails due to memory/mapping
4996                  * failure, silently drop this packet.
4997                  */
4998                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4999                                                 &start, base_flags, mss))
5000                         goto out_unlock;
5001
5002                 entry = start;
5003         }
5004
5005         /* Packets are ready, update Tx producer idx local and on card. */
5006         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5007
5008         tp->tx_prod = entry;
5009         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5010                 netif_stop_queue(dev);
5011                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5012                         netif_wake_queue(tp->dev);
5013         }
5014
5015 out_unlock:
5016         mmiowb();
5017
5018         dev->trans_start = jiffies;
5019
5020         return NETDEV_TX_OK;
5021 }
5022
5023 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5024                                int new_mtu)
5025 {
5026         dev->mtu = new_mtu;
5027
5028         if (new_mtu > ETH_DATA_LEN) {
5029                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5030                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5031                         ethtool_op_set_tso(dev, 0);
5032                 }
5033                 else
5034                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5035         } else {
5036                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5037                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5038                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5039         }
5040 }
5041
5042 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5043 {
5044         struct tg3 *tp = netdev_priv(dev);
5045         int err;
5046
5047         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5048                 return -EINVAL;
5049
5050         if (!netif_running(dev)) {
5051                 /* We'll just catch it later when the
5052                  * device is up'd.
5053                  */
5054                 tg3_set_mtu(dev, tp, new_mtu);
5055                 return 0;
5056         }
5057
5058         tg3_phy_stop(tp);
5059
5060         tg3_netif_stop(tp);
5061
5062         tg3_full_lock(tp, 1);
5063
5064         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5065
5066         tg3_set_mtu(dev, tp, new_mtu);
5067
5068         err = tg3_restart_hw(tp, 0);
5069
5070         if (!err)
5071                 tg3_netif_start(tp);
5072
5073         tg3_full_unlock(tp);
5074
5075         if (!err)
5076                 tg3_phy_start(tp);
5077
5078         return err;
5079 }
5080
5081 /* Free up pending packets in all rx/tx rings.
5082  *
5083  * The chip has been shut down and the driver detached from
5084  * the networking, so no interrupts or new tx packets will
5085  * end up in the driver.  tp->{tx,}lock is not held and we are not
5086  * in an interrupt context and thus may sleep.
5087  */
5088 static void tg3_free_rings(struct tg3 *tp)
5089 {
5090         struct ring_info *rxp;
5091         int i;
5092
5093         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5094                 rxp = &tp->rx_std_buffers[i];
5095
5096                 if (rxp->skb == NULL)
5097                         continue;
5098                 pci_unmap_single(tp->pdev,
5099                                  pci_unmap_addr(rxp, mapping),
5100                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5101                                  PCI_DMA_FROMDEVICE);
5102                 dev_kfree_skb_any(rxp->skb);
5103                 rxp->skb = NULL;
5104         }
5105
5106         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5107                 rxp = &tp->rx_jumbo_buffers[i];
5108
5109                 if (rxp->skb == NULL)
5110                         continue;
5111                 pci_unmap_single(tp->pdev,
5112                                  pci_unmap_addr(rxp, mapping),
5113                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5114                                  PCI_DMA_FROMDEVICE);
5115                 dev_kfree_skb_any(rxp->skb);
5116                 rxp->skb = NULL;
5117         }
5118
5119         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5120                 struct tx_ring_info *txp;
5121                 struct sk_buff *skb;
5122
5123                 txp = &tp->tx_buffers[i];
5124                 skb = txp->skb;
5125
5126                 if (skb == NULL) {
5127                         i++;
5128                         continue;
5129                 }
5130
5131                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5132
5133                 txp->skb = NULL;
5134
5135                 i += skb_shinfo(skb)->nr_frags + 1;
5136
5137                 dev_kfree_skb_any(skb);
5138         }
5139 }
5140
5141 /* Initialize tx/rx rings for packet processing.
5142  *
5143  * The chip has been shut down and the driver detached from
5144  * the networking, so no interrupts or new tx packets will
5145  * end up in the driver.  tp->{tx,}lock are held and thus
5146  * we may not sleep.
5147  */
5148 static int tg3_init_rings(struct tg3 *tp)
5149 {
5150         u32 i;
5151
5152         /* Free up all the SKBs. */
5153         tg3_free_rings(tp);
5154
5155         /* Zero out all descriptors. */
5156         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5157         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5158         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5159         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5160
5161         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5162         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5163             (tp->dev->mtu > ETH_DATA_LEN))
5164                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5165
5166         /* Initialize invariants of the rings, we only set this
5167          * stuff once.  This works because the card does not
5168          * write into the rx buffer posting rings.
5169          */
5170         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5171                 struct tg3_rx_buffer_desc *rxd;
5172
5173                 rxd = &tp->rx_std[i];
5174                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5175                         << RXD_LEN_SHIFT;
5176                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5177                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5178                                (i << RXD_OPAQUE_INDEX_SHIFT));
5179         }
5180
5181         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5182                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5183                         struct tg3_rx_buffer_desc *rxd;
5184
5185                         rxd = &tp->rx_jumbo[i];
5186                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5187                                 << RXD_LEN_SHIFT;
5188                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5189                                 RXD_FLAG_JUMBO;
5190                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5191                                (i << RXD_OPAQUE_INDEX_SHIFT));
5192                 }
5193         }
5194
5195         /* Now allocate fresh SKBs for each rx ring. */
5196         for (i = 0; i < tp->rx_pending; i++) {
5197                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5198                         printk(KERN_WARNING PFX
5199                                "%s: Using a smaller RX standard ring, "
5200                                "only %d out of %d buffers were allocated "
5201                                "successfully.\n",
5202                                tp->dev->name, i, tp->rx_pending);
5203                         if (i == 0)
5204                                 return -ENOMEM;
5205                         tp->rx_pending = i;
5206                         break;
5207                 }
5208         }
5209
5210         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5211                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5212                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5213                                              -1, i) < 0) {
5214                                 printk(KERN_WARNING PFX
5215                                        "%s: Using a smaller RX jumbo ring, "
5216                                        "only %d out of %d buffers were "
5217                                        "allocated successfully.\n",
5218                                        tp->dev->name, i, tp->rx_jumbo_pending);
5219                                 if (i == 0) {
5220                                         tg3_free_rings(tp);
5221                                         return -ENOMEM;
5222                                 }
5223                                 tp->rx_jumbo_pending = i;
5224                                 break;
5225                         }
5226                 }
5227         }
5228         return 0;
5229 }
5230
5231 /*
5232  * Must not be invoked with interrupt sources disabled and
5233  * the hardware shutdown down.
5234  */
5235 static void tg3_free_consistent(struct tg3 *tp)
5236 {
5237         kfree(tp->rx_std_buffers);
5238         tp->rx_std_buffers = NULL;
5239         if (tp->rx_std) {
5240                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5241                                     tp->rx_std, tp->rx_std_mapping);
5242                 tp->rx_std = NULL;
5243         }
5244         if (tp->rx_jumbo) {
5245                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5246                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5247                 tp->rx_jumbo = NULL;
5248         }
5249         if (tp->rx_rcb) {
5250                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5251                                     tp->rx_rcb, tp->rx_rcb_mapping);
5252                 tp->rx_rcb = NULL;
5253         }
5254         if (tp->tx_ring) {
5255                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5256                         tp->tx_ring, tp->tx_desc_mapping);
5257                 tp->tx_ring = NULL;
5258         }
5259         if (tp->hw_status) {
5260                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5261                                     tp->hw_status, tp->status_mapping);
5262                 tp->hw_status = NULL;
5263         }
5264         if (tp->hw_stats) {
5265                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5266                                     tp->hw_stats, tp->stats_mapping);
5267                 tp->hw_stats = NULL;
5268         }
5269 }
5270
5271 /*
5272  * Must not be invoked with interrupt sources disabled and
5273  * the hardware shutdown down.  Can sleep.
5274  */
5275 static int tg3_alloc_consistent(struct tg3 *tp)
5276 {
5277         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5278                                       (TG3_RX_RING_SIZE +
5279                                        TG3_RX_JUMBO_RING_SIZE)) +
5280                                      (sizeof(struct tx_ring_info) *
5281                                       TG3_TX_RING_SIZE),
5282                                      GFP_KERNEL);
5283         if (!tp->rx_std_buffers)
5284                 return -ENOMEM;
5285
5286         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5287         tp->tx_buffers = (struct tx_ring_info *)
5288                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5289
5290         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5291                                           &tp->rx_std_mapping);
5292         if (!tp->rx_std)
5293                 goto err_out;
5294
5295         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5296                                             &tp->rx_jumbo_mapping);
5297
5298         if (!tp->rx_jumbo)
5299                 goto err_out;
5300
5301         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5302                                           &tp->rx_rcb_mapping);
5303         if (!tp->rx_rcb)
5304                 goto err_out;
5305
5306         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5307                                            &tp->tx_desc_mapping);
5308         if (!tp->tx_ring)
5309                 goto err_out;
5310
5311         tp->hw_status = pci_alloc_consistent(tp->pdev,
5312                                              TG3_HW_STATUS_SIZE,
5313                                              &tp->status_mapping);
5314         if (!tp->hw_status)
5315                 goto err_out;
5316
5317         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5318                                             sizeof(struct tg3_hw_stats),
5319                                             &tp->stats_mapping);
5320         if (!tp->hw_stats)
5321                 goto err_out;
5322
5323         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5324         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5325
5326         return 0;
5327
5328 err_out:
5329         tg3_free_consistent(tp);
5330         return -ENOMEM;
5331 }
5332
5333 #define MAX_WAIT_CNT 1000
5334
5335 /* To stop a block, clear the enable bit and poll till it
5336  * clears.  tp->lock is held.
5337  */
5338 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5339 {
5340         unsigned int i;
5341         u32 val;
5342
5343         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5344                 switch (ofs) {
5345                 case RCVLSC_MODE:
5346                 case DMAC_MODE:
5347                 case MBFREE_MODE:
5348                 case BUFMGR_MODE:
5349                 case MEMARB_MODE:
5350                         /* We can't enable/disable these bits of the
5351                          * 5705/5750, just say success.
5352                          */
5353                         return 0;
5354
5355                 default:
5356                         break;
5357                 }
5358         }
5359
5360         val = tr32(ofs);
5361         val &= ~enable_bit;
5362         tw32_f(ofs, val);
5363
5364         for (i = 0; i < MAX_WAIT_CNT; i++) {
5365                 udelay(100);
5366                 val = tr32(ofs);
5367                 if ((val & enable_bit) == 0)
5368                         break;
5369         }
5370
5371         if (i == MAX_WAIT_CNT && !silent) {
5372                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5373                        "ofs=%lx enable_bit=%x\n",
5374                        ofs, enable_bit);
5375                 return -ENODEV;
5376         }
5377
5378         return 0;
5379 }
5380
5381 /* tp->lock is held. */
5382 static int tg3_abort_hw(struct tg3 *tp, int silent)
5383 {
5384         int i, err;
5385
5386         tg3_disable_ints(tp);
5387
5388         tp->rx_mode &= ~RX_MODE_ENABLE;
5389         tw32_f(MAC_RX_MODE, tp->rx_mode);
5390         udelay(10);
5391
5392         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5393         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5394         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5398
5399         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5401         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5402         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5403         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5404         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5405         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5406
5407         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5408         tw32_f(MAC_MODE, tp->mac_mode);
5409         udelay(40);
5410
5411         tp->tx_mode &= ~TX_MODE_ENABLE;
5412         tw32_f(MAC_TX_MODE, tp->tx_mode);
5413
5414         for (i = 0; i < MAX_WAIT_CNT; i++) {
5415                 udelay(100);
5416                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5417                         break;
5418         }
5419         if (i >= MAX_WAIT_CNT) {
5420                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5421                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5422                        tp->dev->name, tr32(MAC_TX_MODE));
5423                 err |= -ENODEV;
5424         }
5425
5426         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5427         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5428         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5429
5430         tw32(FTQ_RESET, 0xffffffff);
5431         tw32(FTQ_RESET, 0x00000000);
5432
5433         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5434         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5435
5436         if (tp->hw_status)
5437                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5438         if (tp->hw_stats)
5439                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5440
5441         return err;
5442 }
5443
5444 /* tp->lock is held. */
5445 static int tg3_nvram_lock(struct tg3 *tp)
5446 {
5447         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5448                 int i;
5449
5450                 if (tp->nvram_lock_cnt == 0) {
5451                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5452                         for (i = 0; i < 8000; i++) {
5453                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5454                                         break;
5455                                 udelay(20);
5456                         }
5457                         if (i == 8000) {
5458                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5459                                 return -ENODEV;
5460                         }
5461                 }
5462                 tp->nvram_lock_cnt++;
5463         }
5464         return 0;
5465 }
5466
5467 /* tp->lock is held. */
5468 static void tg3_nvram_unlock(struct tg3 *tp)
5469 {
5470         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5471                 if (tp->nvram_lock_cnt > 0)
5472                         tp->nvram_lock_cnt--;
5473                 if (tp->nvram_lock_cnt == 0)
5474                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5475         }
5476 }
5477
5478 /* tp->lock is held. */
5479 static void tg3_enable_nvram_access(struct tg3 *tp)
5480 {
5481         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5482             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5483                 u32 nvaccess = tr32(NVRAM_ACCESS);
5484
5485                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5486         }
5487 }
5488
5489 /* tp->lock is held. */
5490 static void tg3_disable_nvram_access(struct tg3 *tp)
5491 {
5492         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5493             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5494                 u32 nvaccess = tr32(NVRAM_ACCESS);
5495
5496                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5497         }
5498 }
5499
5500 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5501 {
5502         int i;
5503         u32 apedata;
5504
5505         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5506         if (apedata != APE_SEG_SIG_MAGIC)
5507                 return;
5508
5509         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5510         if (!(apedata & APE_FW_STATUS_READY))
5511                 return;
5512
5513         /* Wait for up to 1 millisecond for APE to service previous event. */
5514         for (i = 0; i < 10; i++) {
5515                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5516                         return;
5517
5518                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5519
5520                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5521                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5522                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5523
5524                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5525
5526                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5527                         break;
5528
5529                 udelay(100);
5530         }
5531
5532         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5533                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5534 }
5535
5536 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5537 {
5538         u32 event;
5539         u32 apedata;
5540
5541         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5542                 return;
5543
5544         switch (kind) {
5545                 case RESET_KIND_INIT:
5546                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5547                                         APE_HOST_SEG_SIG_MAGIC);
5548                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5549                                         APE_HOST_SEG_LEN_MAGIC);
5550                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5551                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5552                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5553                                         APE_HOST_DRIVER_ID_MAGIC);
5554                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5555                                         APE_HOST_BEHAV_NO_PHYLOCK);
5556
5557                         event = APE_EVENT_STATUS_STATE_START;
5558                         break;
5559                 case RESET_KIND_SHUTDOWN:
5560                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5561                         break;
5562                 case RESET_KIND_SUSPEND:
5563                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5564                         break;
5565                 default:
5566                         return;
5567         }
5568
5569         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5570
5571         tg3_ape_send_event(tp, event);
5572 }
5573
5574 /* tp->lock is held. */
5575 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5576 {
5577         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5578                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5579
5580         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5581                 switch (kind) {
5582                 case RESET_KIND_INIT:
5583                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5584                                       DRV_STATE_START);
5585                         break;
5586
5587                 case RESET_KIND_SHUTDOWN:
5588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589                                       DRV_STATE_UNLOAD);
5590                         break;
5591
5592                 case RESET_KIND_SUSPEND:
5593                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5594                                       DRV_STATE_SUSPEND);
5595                         break;
5596
5597                 default:
5598                         break;
5599                 }
5600         }
5601
5602         if (kind == RESET_KIND_INIT ||
5603             kind == RESET_KIND_SUSPEND)
5604                 tg3_ape_driver_state_change(tp, kind);
5605 }
5606
5607 /* tp->lock is held. */
5608 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5609 {
5610         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5611                 switch (kind) {
5612                 case RESET_KIND_INIT:
5613                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5614                                       DRV_STATE_START_DONE);
5615                         break;
5616
5617                 case RESET_KIND_SHUTDOWN:
5618                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5619                                       DRV_STATE_UNLOAD_DONE);
5620                         break;
5621
5622                 default:
5623                         break;
5624                 }
5625         }
5626
5627         if (kind == RESET_KIND_SHUTDOWN)
5628                 tg3_ape_driver_state_change(tp, kind);
5629 }
5630
5631 /* tp->lock is held. */
5632 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5633 {
5634         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5635                 switch (kind) {
5636                 case RESET_KIND_INIT:
5637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5638                                       DRV_STATE_START);
5639                         break;
5640
5641                 case RESET_KIND_SHUTDOWN:
5642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643                                       DRV_STATE_UNLOAD);
5644                         break;
5645
5646                 case RESET_KIND_SUSPEND:
5647                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5648                                       DRV_STATE_SUSPEND);
5649                         break;
5650
5651                 default:
5652                         break;
5653                 }
5654         }
5655 }
5656
5657 static int tg3_poll_fw(struct tg3 *tp)
5658 {
5659         int i;
5660         u32 val;
5661
5662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5663                 /* Wait up to 20ms for init done. */
5664                 for (i = 0; i < 200; i++) {
5665                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5666                                 return 0;
5667                         udelay(100);
5668                 }
5669                 return -ENODEV;
5670         }
5671
5672         /* Wait for firmware initialization to complete. */
5673         for (i = 0; i < 100000; i++) {
5674                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5675                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5676                         break;
5677                 udelay(10);
5678         }
5679
5680         /* Chip might not be fitted with firmware.  Some Sun onboard
5681          * parts are configured like that.  So don't signal the timeout
5682          * of the above loop as an error, but do report the lack of
5683          * running firmware once.
5684          */
5685         if (i >= 100000 &&
5686             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5687                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5688
5689                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5690                        tp->dev->name);
5691         }
5692
5693         return 0;
5694 }
5695
5696 /* Save PCI command register before chip reset */
5697 static void tg3_save_pci_state(struct tg3 *tp)
5698 {
5699         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5700 }
5701
5702 /* Restore PCI state after chip reset */
5703 static void tg3_restore_pci_state(struct tg3 *tp)
5704 {
5705         u32 val;
5706
5707         /* Re-enable indirect register accesses. */
5708         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5709                                tp->misc_host_ctrl);
5710
5711         /* Set MAX PCI retry to zero. */
5712         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5713         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5714             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5715                 val |= PCISTATE_RETRY_SAME_DMA;
5716         /* Allow reads and writes to the APE register and memory space. */
5717         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5718                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5719                        PCISTATE_ALLOW_APE_SHMEM_WR;
5720         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5721
5722         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5723
5724         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5725                 pcie_set_readrq(tp->pdev, 4096);
5726         else {
5727                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5728                                       tp->pci_cacheline_sz);
5729                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5730                                       tp->pci_lat_timer);
5731         }
5732
5733         /* Make sure PCI-X relaxed ordering bit is clear. */
5734         if (tp->pcix_cap) {
5735                 u16 pcix_cmd;
5736
5737                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5738                                      &pcix_cmd);
5739                 pcix_cmd &= ~PCI_X_CMD_ERO;
5740                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5741                                       pcix_cmd);
5742         }
5743
5744         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5745
5746                 /* Chip reset on 5780 will reset MSI enable bit,
5747                  * so need to restore it.
5748                  */
5749                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5750                         u16 ctrl;
5751
5752                         pci_read_config_word(tp->pdev,
5753                                              tp->msi_cap + PCI_MSI_FLAGS,
5754                                              &ctrl);
5755                         pci_write_config_word(tp->pdev,
5756                                               tp->msi_cap + PCI_MSI_FLAGS,
5757                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5758                         val = tr32(MSGINT_MODE);
5759                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5760                 }
5761         }
5762 }
5763
5764 static void tg3_stop_fw(struct tg3 *);
5765
5766 /* tp->lock is held. */
5767 static int tg3_chip_reset(struct tg3 *tp)
5768 {
5769         u32 val;
5770         void (*write_op)(struct tg3 *, u32, u32);
5771         int err;
5772
5773         tg3_nvram_lock(tp);
5774
5775         tg3_mdio_stop(tp);
5776
5777         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5778
5779         /* No matching tg3_nvram_unlock() after this because
5780          * chip reset below will undo the nvram lock.
5781          */
5782         tp->nvram_lock_cnt = 0;
5783
5784         /* GRC_MISC_CFG core clock reset will clear the memory
5785          * enable bit in PCI register 4 and the MSI enable bit
5786          * on some chips, so we save relevant registers here.
5787          */
5788         tg3_save_pci_state(tp);
5789
5790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5794             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5796                 tw32(GRC_FASTBOOT_PC, 0);
5797
5798         /*
5799          * We must avoid the readl() that normally takes place.
5800          * It locks machines, causes machine checks, and other
5801          * fun things.  So, temporarily disable the 5701
5802          * hardware workaround, while we do the reset.
5803          */
5804         write_op = tp->write32;
5805         if (write_op == tg3_write_flush_reg32)
5806                 tp->write32 = tg3_write32;
5807
5808         /* Prevent the irq handler from reading or writing PCI registers
5809          * during chip reset when the memory enable bit in the PCI command
5810          * register may be cleared.  The chip does not generate interrupt
5811          * at this time, but the irq handler may still be called due to irq
5812          * sharing or irqpoll.
5813          */
5814         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5815         if (tp->hw_status) {
5816                 tp->hw_status->status = 0;
5817                 tp->hw_status->status_tag = 0;
5818         }
5819         tp->last_tag = 0;
5820         smp_mb();
5821         synchronize_irq(tp->pdev->irq);
5822
5823         /* do the reset */
5824         val = GRC_MISC_CFG_CORECLK_RESET;
5825
5826         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5827                 if (tr32(0x7e2c) == 0x60) {
5828                         tw32(0x7e2c, 0x20);
5829                 }
5830                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5831                         tw32(GRC_MISC_CFG, (1 << 29));
5832                         val |= (1 << 29);
5833                 }
5834         }
5835
5836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5837                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5838                 tw32(GRC_VCPU_EXT_CTRL,
5839                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5840         }
5841
5842         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5843                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5844         tw32(GRC_MISC_CFG, val);
5845
5846         /* restore 5701 hardware bug workaround write method */
5847         tp->write32 = write_op;
5848
5849         /* Unfortunately, we have to delay before the PCI read back.
5850          * Some 575X chips even will not respond to a PCI cfg access
5851          * when the reset command is given to the chip.
5852          *
5853          * How do these hardware designers expect things to work
5854          * properly if the PCI write is posted for a long period
5855          * of time?  It is always necessary to have some method by
5856          * which a register read back can occur to push the write
5857          * out which does the reset.
5858          *
5859          * For most tg3 variants the trick below was working.
5860          * Ho hum...
5861          */
5862         udelay(120);
5863
5864         /* Flush PCI posted writes.  The normal MMIO registers
5865          * are inaccessible at this time so this is the only
5866          * way to make this reliably (actually, this is no longer
5867          * the case, see above).  I tried to use indirect
5868          * register read/write but this upset some 5701 variants.
5869          */
5870         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5871
5872         udelay(120);
5873
5874         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5875                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5876                         int i;
5877                         u32 cfg_val;
5878
5879                         /* Wait for link training to complete.  */
5880                         for (i = 0; i < 5000; i++)
5881                                 udelay(100);
5882
5883                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5884                         pci_write_config_dword(tp->pdev, 0xc4,
5885                                                cfg_val | (1 << 15));
5886                 }
5887                 /* Set PCIE max payload size and clear error status.  */
5888                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5889         }
5890
5891         tg3_restore_pci_state(tp);
5892
5893         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5894
5895         val = 0;
5896         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5897                 val = tr32(MEMARB_MODE);
5898         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5899
5900         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5901                 tg3_stop_fw(tp);
5902                 tw32(0x5000, 0x400);
5903         }
5904
5905         tw32(GRC_MODE, tp->grc_mode);
5906
5907         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5908                 val = tr32(0xc4);
5909
5910                 tw32(0xc4, val | (1 << 15));
5911         }
5912
5913         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5915                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5916                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5917                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5918                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5919         }
5920
5921         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5922                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5923                 tw32_f(MAC_MODE, tp->mac_mode);
5924         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5925                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5926                 tw32_f(MAC_MODE, tp->mac_mode);
5927         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5928                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5929                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5930                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5931                 tw32_f(MAC_MODE, tp->mac_mode);
5932         } else
5933                 tw32_f(MAC_MODE, 0);
5934         udelay(40);
5935
5936         tg3_mdio_start(tp);
5937
5938         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5939
5940         err = tg3_poll_fw(tp);
5941         if (err)
5942                 return err;
5943
5944         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5945             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5946                 val = tr32(0x7c00);
5947
5948                 tw32(0x7c00, val | (1 << 25));
5949         }
5950
5951         /* Reprobe ASF enable state.  */
5952         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5953         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5954         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5955         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5956                 u32 nic_cfg;
5957
5958                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5959                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5960                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5961                         tp->last_event_jiffies = jiffies;
5962                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5963                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5964                 }
5965         }
5966
5967         return 0;
5968 }
5969
5970 /* tp->lock is held. */
5971 static void tg3_stop_fw(struct tg3 *tp)
5972 {
5973         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5974            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5975                 /* Wait for RX cpu to ACK the previous event. */
5976                 tg3_wait_for_event_ack(tp);
5977
5978                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5979
5980                 tg3_generate_fw_event(tp);
5981
5982                 /* Wait for RX cpu to ACK this event. */
5983                 tg3_wait_for_event_ack(tp);
5984         }
5985 }
5986
5987 /* tp->lock is held. */
5988 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5989 {
5990         int err;
5991
5992         tg3_stop_fw(tp);
5993
5994         tg3_write_sig_pre_reset(tp, kind);
5995
5996         tg3_abort_hw(tp, silent);
5997         err = tg3_chip_reset(tp);
5998
5999         tg3_write_sig_legacy(tp, kind);
6000         tg3_write_sig_post_reset(tp, kind);
6001
6002         if (err)
6003                 return err;
6004
6005         return 0;
6006 }
6007
6008 #define TG3_FW_RELEASE_MAJOR    0x0
6009 #define TG3_FW_RELASE_MINOR     0x0
6010 #define TG3_FW_RELEASE_FIX      0x0
6011 #define TG3_FW_START_ADDR       0x08000000
6012 #define TG3_FW_TEXT_ADDR        0x08000000
6013 #define TG3_FW_TEXT_LEN         0x9c0
6014 #define TG3_FW_RODATA_ADDR      0x080009c0
6015 #define TG3_FW_RODATA_LEN       0x60
6016 #define TG3_FW_DATA_ADDR        0x08000a40
6017 #define TG3_FW_DATA_LEN         0x20
6018 #define TG3_FW_SBSS_ADDR        0x08000a60
6019 #define TG3_FW_SBSS_LEN         0xc
6020 #define TG3_FW_BSS_ADDR         0x08000a70
6021 #define TG3_FW_BSS_LEN          0x10
6022
6023 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6024         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6025         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6026         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6027         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6028         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6029         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6030         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6031         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6032         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6033         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6034         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6035         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6036         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6037         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6038         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6039         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6040         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6041         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6042         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6043         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6044         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6045         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6046         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6047         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6048         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6049         0, 0, 0, 0, 0, 0,
6050         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6051         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6052         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6053         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6054         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6055         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6056         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6057         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6058         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6059         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6060         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6061         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6062         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6063         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6064         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6065         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6066         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6067         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6068         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6069         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6070         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6071         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6072         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6073         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6074         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6075         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6076         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6077         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6078         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6079         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6080         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6081         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6082         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6083         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6084         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6085         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6086         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6087         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6088         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6089         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6090         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6091         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6092         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6093         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6094         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6095         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6096         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6097         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6098         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6099         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6100         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6101         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6102         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6103         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6104         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6105         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6106         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6107         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6108         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6109         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6110         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6111         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6112         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6113         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6114         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6115 };
6116
6117 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6118         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6119         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6120         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6121         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6122         0x00000000
6123 };
6124
6125 #if 0 /* All zeros, don't eat up space with it. */
6126 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6127         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6128         0x00000000, 0x00000000, 0x00000000, 0x00000000
6129 };
6130 #endif
6131
6132 #define RX_CPU_SCRATCH_BASE     0x30000
6133 #define RX_CPU_SCRATCH_SIZE     0x04000
6134 #define TX_CPU_SCRATCH_BASE     0x34000
6135 #define TX_CPU_SCRATCH_SIZE     0x04000
6136
6137 /* tp->lock is held. */
6138 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6139 {
6140         int i;
6141
6142         BUG_ON(offset == TX_CPU_BASE &&
6143             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6144
6145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6146                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6147
6148                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6149                 return 0;
6150         }
6151         if (offset == RX_CPU_BASE) {
6152                 for (i = 0; i < 10000; i++) {
6153                         tw32(offset + CPU_STATE, 0xffffffff);
6154                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6155                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6156                                 break;
6157                 }
6158
6159                 tw32(offset + CPU_STATE, 0xffffffff);
6160                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6161                 udelay(10);
6162         } else {
6163                 for (i = 0; i < 10000; i++) {
6164                         tw32(offset + CPU_STATE, 0xffffffff);
6165                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6166                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6167                                 break;
6168                 }
6169         }
6170
6171         if (i >= 10000) {
6172                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6173                        "and %s CPU\n",
6174                        tp->dev->name,
6175                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6176                 return -ENODEV;
6177         }
6178
6179         /* Clear firmware's nvram arbitration. */
6180         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6181                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6182         return 0;
6183 }
6184
6185 struct fw_info {
6186         unsigned int text_base;
6187         unsigned int text_len;
6188         const u32 *text_data;
6189         unsigned int rodata_base;
6190         unsigned int rodata_len;
6191         const u32 *rodata_data;
6192         unsigned int data_base;
6193         unsigned int data_len;
6194         const u32 *data_data;
6195 };
6196
6197 /* tp->lock is held. */
6198 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6199                                  int cpu_scratch_size, struct fw_info *info)
6200 {
6201         int err, lock_err, i;
6202         void (*write_op)(struct tg3 *, u32, u32);
6203
6204         if (cpu_base == TX_CPU_BASE &&
6205             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6206                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6207                        "TX cpu firmware on %s which is 5705.\n",
6208                        tp->dev->name);
6209                 return -EINVAL;
6210         }
6211
6212         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6213                 write_op = tg3_write_mem;
6214         else
6215                 write_op = tg3_write_indirect_reg32;
6216
6217         /* It is possible that bootcode is still loading at this point.
6218          * Get the nvram lock first before halting the cpu.
6219          */
6220         lock_err = tg3_nvram_lock(tp);
6221         err = tg3_halt_cpu(tp, cpu_base);
6222         if (!lock_err)
6223                 tg3_nvram_unlock(tp);
6224         if (err)
6225                 goto out;
6226
6227         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6228                 write_op(tp, cpu_scratch_base + i, 0);
6229         tw32(cpu_base + CPU_STATE, 0xffffffff);
6230         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6231         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6232                 write_op(tp, (cpu_scratch_base +
6233                               (info->text_base & 0xffff) +
6234                               (i * sizeof(u32))),
6235                          (info->text_data ?
6236                           info->text_data[i] : 0));
6237         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6238                 write_op(tp, (cpu_scratch_base +
6239                               (info->rodata_base & 0xffff) +
6240                               (i * sizeof(u32))),
6241                          (info->rodata_data ?
6242                           info->rodata_data[i] : 0));
6243         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6244                 write_op(tp, (cpu_scratch_base +
6245                               (info->data_base & 0xffff) +
6246                               (i * sizeof(u32))),
6247                          (info->data_data ?
6248                           info->data_data[i] : 0));
6249
6250         err = 0;
6251
6252 out:
6253         return err;
6254 }
6255
6256 /* tp->lock is held. */
6257 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6258 {
6259         struct fw_info info;
6260         int err, i;
6261
6262         info.text_base = TG3_FW_TEXT_ADDR;
6263         info.text_len = TG3_FW_TEXT_LEN;
6264         info.text_data = &tg3FwText[0];
6265         info.rodata_base = TG3_FW_RODATA_ADDR;
6266         info.rodata_len = TG3_FW_RODATA_LEN;
6267         info.rodata_data = &tg3FwRodata[0];
6268         info.data_base = TG3_FW_DATA_ADDR;
6269         info.data_len = TG3_FW_DATA_LEN;
6270         info.data_data = NULL;
6271
6272         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6273                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6274                                     &info);
6275         if (err)
6276                 return err;
6277
6278         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6279                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6280                                     &info);
6281         if (err)
6282                 return err;
6283
6284         /* Now startup only the RX cpu. */
6285         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6286         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6287
6288         for (i = 0; i < 5; i++) {
6289                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6290                         break;
6291                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6292                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6293                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6294                 udelay(1000);
6295         }
6296         if (i >= 5) {
6297                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6298                        "to set RX CPU PC, is %08x should be %08x\n",
6299                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6300                        TG3_FW_TEXT_ADDR);
6301                 return -ENODEV;
6302         }
6303         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6304         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6305
6306         return 0;
6307 }
6308
6309
6310 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6311 #define TG3_TSO_FW_RELASE_MINOR         0x6
6312 #define TG3_TSO_FW_RELEASE_FIX          0x0
6313 #define TG3_TSO_FW_START_ADDR           0x08000000
6314 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6315 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6316 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6317 #define TG3_TSO_FW_RODATA_LEN           0x60
6318 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6319 #define TG3_TSO_FW_DATA_LEN             0x30
6320 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6321 #define TG3_TSO_FW_SBSS_LEN             0x2c
6322 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6323 #define TG3_TSO_FW_BSS_LEN              0x894
6324
6325 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6326         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6327         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6328         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6329         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6330         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6331         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6332         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6333         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6334         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6335         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6336         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6337         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6338         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6339         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6340         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6341         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6342         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6343         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6344         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6345         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6346         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6347         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6348         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6349         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6350         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6351         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6352         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6353         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6354         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6355         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6356         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6357         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6358         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6359         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6360         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6361         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6362         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6363         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6364         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6365         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6366         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6367         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6368         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6369         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6370         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6371         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6372         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6373         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6374         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6375         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6376         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6377         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6378         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6379         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6380         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6381         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6382         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6383         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6384         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6385         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6386         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6387         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6388         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6389         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6390         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6391         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6392         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6393         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6394         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6395         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6396         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6397         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6398         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6399         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6400         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6401         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6402         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6403         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6404         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6405         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6406         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6407         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6408         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6409         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6410         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6411         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6412         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6413         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6414         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6415         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6416         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6417         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6418         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6419         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6420         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6421         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6422         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6423         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6424         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6425         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6426         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6427         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6428         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6429         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6430         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6431         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6432         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6433         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6434         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6435         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6436         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6437         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6438         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6439         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6440         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6441         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6442         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6443         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6444         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6445         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6446         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6447         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6448         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6449         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6450         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6451         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6452         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6453         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6454         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6455         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6456         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6457         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6458         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6459         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6460         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6461         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6462         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6463         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6464         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6465         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6466         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6467         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6468         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6469         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6470         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6471         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6472         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6473         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6474         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6475         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6476         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6477         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6478         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6479         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6480         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6481         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6482         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6483         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6484         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6485         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6486         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6487         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6488         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6489         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6490         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6491         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6492         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6493         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6494         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6495         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6496         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6497         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6498         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6499         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6500         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6501         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6502         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6503         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6504         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6505         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6506         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6507         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6508         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6509         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6510         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6511         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6512         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6513         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6514         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6515         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6516         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6517         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6518         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6519         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6520         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6521         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6522         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6523         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6524         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6525         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6526         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6527         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6528         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6529         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6530         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6531         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6532         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6533         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6534         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6535         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6536         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6537         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6538         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6539         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6540         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6541         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6542         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6543         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6544         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6545         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6546         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6547         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6548         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6549         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6550         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6551         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6552         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6553         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6554         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6555         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6556         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6557         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6558         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6559         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6560         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6561         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6562         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6563         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6564         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6565         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6566         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6567         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6568         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6569         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6570         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6571         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6572         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6573         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6574         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6575         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6576         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6577         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6578         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6579         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6580         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6581         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6582         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6583         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6584         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6585         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6586         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6587         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6588         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6589         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6590         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6591         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6592         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6593         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6594         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6595         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6596         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6597         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6598         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6599         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6600         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6601         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6602         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6603         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6604         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6605         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6606         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6607         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6608         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6609         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6610 };
6611
6612 static const u32 tg3TsoFwRodata[] = {
6613         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6614         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6615         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6616         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6617         0x00000000,
6618 };
6619
6620 static const u32 tg3TsoFwData[] = {
6621         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6622         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6623         0x00000000,
6624 };
6625
6626 /* 5705 needs a special version of the TSO firmware.  */
6627 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6628 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6629 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6630 #define TG3_TSO5_FW_START_ADDR          0x00010000
6631 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6632 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6633 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6634 #define TG3_TSO5_FW_RODATA_LEN          0x50
6635 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6636 #define TG3_TSO5_FW_DATA_LEN            0x20
6637 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6638 #define TG3_TSO5_FW_SBSS_LEN            0x28
6639 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6640 #define TG3_TSO5_FW_BSS_LEN             0x88
6641
6642 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6643         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6644         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6645         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6646         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6647         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6648         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6649         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6650         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6651         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6652         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6653         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6654         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6655         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6656         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6657         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6658         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6659         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6660         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6661         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6662         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6663         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6664         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6665         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6666         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6667         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6668         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6669         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6670         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6671         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6672         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6673         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6674         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6675         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6676         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6677         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6678         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6679         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6680         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6681         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6682         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6683         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6684         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6685         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6686         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6687         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6688         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6689         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6690         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6691         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6692         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6693         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6694         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6695         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6696         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6697         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6698         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6699         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6700         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6701         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6702         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6703         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6704         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6705         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6706         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6707         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6708         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6709         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6710         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6711         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6712         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6713         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6714         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6715         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6716         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6717         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6718         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6719         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6720         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6721         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6722         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6723         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6724         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6725         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6726         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6727         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6728         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6729         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6730         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6731         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6732         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6733         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6734         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6735         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6736         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6737         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6738         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6739         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6740         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6741         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6742         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6743         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6744         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6745         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6746         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6747         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6748         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6749         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6750         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6751         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6752         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6753         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6754         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6755         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6756         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6757         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6758         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6759         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6760         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6761         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6762         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6763         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6764         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6765         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6766         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6767         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6768         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6769         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6770         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6771         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6772         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6773         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6774         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6775         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6776         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6777         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6778         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6779         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6780         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6781         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6782         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6783         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6784         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6785         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6786         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6787         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6788         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6789         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6790         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6791         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6792         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6793         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6794         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6795         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6796         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6797         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6798         0x00000000, 0x00000000, 0x00000000,
6799 };
6800
6801 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6802         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6803         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6804         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6805         0x00000000, 0x00000000, 0x00000000,
6806 };
6807
6808 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6809         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6810         0x00000000, 0x00000000, 0x00000000,
6811 };
6812
6813 /* tp->lock is held. */
6814 static int tg3_load_tso_firmware(struct tg3 *tp)
6815 {
6816         struct fw_info info;
6817         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6818         int err, i;
6819
6820         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6821                 return 0;
6822
6823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6824                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6825                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6826                 info.text_data = &tg3Tso5FwText[0];
6827                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6828                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6829                 info.rodata_data = &tg3Tso5FwRodata[0];
6830                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6831                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6832                 info.data_data = &tg3Tso5FwData[0];
6833                 cpu_base = RX_CPU_BASE;
6834                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6835                 cpu_scratch_size = (info.text_len +
6836                                     info.rodata_len +
6837                                     info.data_len +
6838                                     TG3_TSO5_FW_SBSS_LEN +
6839                                     TG3_TSO5_FW_BSS_LEN);
6840         } else {
6841                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6842                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6843                 info.text_data = &tg3TsoFwText[0];
6844                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6845                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6846                 info.rodata_data = &tg3TsoFwRodata[0];
6847                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6848                 info.data_len = TG3_TSO_FW_DATA_LEN;
6849                 info.data_data = &tg3TsoFwData[0];
6850                 cpu_base = TX_CPU_BASE;
6851                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6852                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6853         }
6854
6855         err = tg3_load_firmware_cpu(tp, cpu_base,
6856                                     cpu_scratch_base, cpu_scratch_size,
6857                                     &info);
6858         if (err)
6859                 return err;
6860
6861         /* Now startup the cpu. */
6862         tw32(cpu_base + CPU_STATE, 0xffffffff);
6863         tw32_f(cpu_base + CPU_PC,    info.text_base);
6864
6865         for (i = 0; i < 5; i++) {
6866                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6867                         break;
6868                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6869                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6870                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6871                 udelay(1000);
6872         }
6873         if (i >= 5) {
6874                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6875                        "to set CPU PC, is %08x should be %08x\n",
6876                        tp->dev->name, tr32(cpu_base + CPU_PC),
6877                        info.text_base);
6878                 return -ENODEV;
6879         }
6880         tw32(cpu_base + CPU_STATE, 0xffffffff);
6881         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6882         return 0;
6883 }
6884
6885
6886 /* tp->lock is held. */
6887 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6888 {
6889         u32 addr_high, addr_low;
6890         int i;
6891
6892         addr_high = ((tp->dev->dev_addr[0] << 8) |
6893                      tp->dev->dev_addr[1]);
6894         addr_low = ((tp->dev->dev_addr[2] << 24) |
6895                     (tp->dev->dev_addr[3] << 16) |
6896                     (tp->dev->dev_addr[4] <<  8) |
6897                     (tp->dev->dev_addr[5] <<  0));
6898         for (i = 0; i < 4; i++) {
6899                 if (i == 1 && skip_mac_1)
6900                         continue;
6901                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6902                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6903         }
6904
6905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6907                 for (i = 0; i < 12; i++) {
6908                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6909                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6910                 }
6911         }
6912
6913         addr_high = (tp->dev->dev_addr[0] +
6914                      tp->dev->dev_addr[1] +
6915                      tp->dev->dev_addr[2] +
6916                      tp->dev->dev_addr[3] +
6917                      tp->dev->dev_addr[4] +
6918                      tp->dev->dev_addr[5]) &
6919                 TX_BACKOFF_SEED_MASK;
6920         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6921 }
6922
6923 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6924 {
6925         struct tg3 *tp = netdev_priv(dev);
6926         struct sockaddr *addr = p;
6927         int err = 0, skip_mac_1 = 0;
6928
6929         if (!is_valid_ether_addr(addr->sa_data))
6930                 return -EINVAL;
6931
6932         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6933
6934         if (!netif_running(dev))
6935                 return 0;
6936
6937         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6938                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6939
6940                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6941                 addr0_low = tr32(MAC_ADDR_0_LOW);
6942                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6943                 addr1_low = tr32(MAC_ADDR_1_LOW);
6944
6945                 /* Skip MAC addr 1 if ASF is using it. */
6946                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6947                     !(addr1_high == 0 && addr1_low == 0))
6948                         skip_mac_1 = 1;
6949         }
6950         spin_lock_bh(&tp->lock);
6951         __tg3_set_mac_addr(tp, skip_mac_1);
6952         spin_unlock_bh(&tp->lock);
6953
6954         return err;
6955 }
6956
6957 /* tp->lock is held. */
6958 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6959                            dma_addr_t mapping, u32 maxlen_flags,
6960                            u32 nic_addr)
6961 {
6962         tg3_write_mem(tp,
6963                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6964                       ((u64) mapping >> 32));
6965         tg3_write_mem(tp,
6966                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6967                       ((u64) mapping & 0xffffffff));
6968         tg3_write_mem(tp,
6969                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6970                        maxlen_flags);
6971
6972         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6973                 tg3_write_mem(tp,
6974                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6975                               nic_addr);
6976 }
6977
6978 static void __tg3_set_rx_mode(struct net_device *);
6979 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6980 {
6981         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6982         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6983         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6984         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6985         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6986                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6987                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6988         }
6989         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6990         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6991         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6992                 u32 val = ec->stats_block_coalesce_usecs;
6993
6994                 if (!netif_carrier_ok(tp->dev))
6995                         val = 0;
6996
6997                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6998         }
6999 }
7000
7001 /* tp->lock is held. */
7002 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7003 {
7004         u32 val, rdmac_mode;
7005         int i, err, limit;
7006
7007         tg3_disable_ints(tp);
7008
7009         tg3_stop_fw(tp);
7010
7011         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7012
7013         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7014                 tg3_abort_hw(tp, 1);
7015         }
7016
7017         if (reset_phy &&
7018             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7019                 tg3_phy_reset(tp);
7020
7021         err = tg3_chip_reset(tp);
7022         if (err)
7023                 return err;
7024
7025         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7026
7027         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7028             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7029                 val = tr32(TG3_CPMU_CTRL);
7030                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7031                 tw32(TG3_CPMU_CTRL, val);
7032
7033                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7034                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7035                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7036                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7037
7038                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7039                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7040                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7041                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7042
7043                 val = tr32(TG3_CPMU_HST_ACC);
7044                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7045                 val |= CPMU_HST_ACC_MACCLK_6_25;
7046                 tw32(TG3_CPMU_HST_ACC, val);
7047         }
7048
7049         /* This works around an issue with Athlon chipsets on
7050          * B3 tigon3 silicon.  This bit has no effect on any
7051          * other revision.  But do not set this on PCI Express
7052          * chips and don't even touch the clocks if the CPMU is present.
7053          */
7054         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7055                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7056                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7057                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7058         }
7059
7060         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7061             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7062                 val = tr32(TG3PCI_PCISTATE);
7063                 val |= PCISTATE_RETRY_SAME_DMA;
7064                 tw32(TG3PCI_PCISTATE, val);
7065         }
7066
7067         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7068                 /* Allow reads and writes to the
7069                  * APE register and memory space.
7070                  */
7071                 val = tr32(TG3PCI_PCISTATE);
7072                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7073                        PCISTATE_ALLOW_APE_SHMEM_WR;
7074                 tw32(TG3PCI_PCISTATE, val);
7075         }
7076
7077         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7078                 /* Enable some hw fixes.  */
7079                 val = tr32(TG3PCI_MSI_DATA);
7080                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7081                 tw32(TG3PCI_MSI_DATA, val);
7082         }
7083
7084         /* Descriptor ring init may make accesses to the
7085          * NIC SRAM area to setup the TX descriptors, so we
7086          * can only do this after the hardware has been
7087          * successfully reset.
7088          */
7089         err = tg3_init_rings(tp);
7090         if (err)
7091                 return err;
7092
7093         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7094             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7095             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7096                 /* This value is determined during the probe time DMA
7097                  * engine test, tg3_test_dma.
7098                  */
7099                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7100         }
7101
7102         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7103                           GRC_MODE_4X_NIC_SEND_RINGS |
7104                           GRC_MODE_NO_TX_PHDR_CSUM |
7105                           GRC_MODE_NO_RX_PHDR_CSUM);
7106         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7107
7108         /* Pseudo-header checksum is done by hardware logic and not
7109          * the offload processers, so make the chip do the pseudo-
7110          * header checksums on receive.  For transmit it is more
7111          * convenient to do the pseudo-header checksum in software
7112          * as Linux does that on transmit for us in all cases.
7113          */
7114         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7115
7116         tw32(GRC_MODE,
7117              tp->grc_mode |
7118              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7119
7120         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7121         val = tr32(GRC_MISC_CFG);
7122         val &= ~0xff;
7123         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7124         tw32(GRC_MISC_CFG, val);
7125
7126         /* Initialize MBUF/DESC pool. */
7127         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7128                 /* Do nothing.  */
7129         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7130                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7132                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7133                 else
7134                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7135                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7136                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7137         }
7138         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7139                 int fw_len;
7140
7141                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7142                           TG3_TSO5_FW_RODATA_LEN +
7143                           TG3_TSO5_FW_DATA_LEN +
7144                           TG3_TSO5_FW_SBSS_LEN +
7145                           TG3_TSO5_FW_BSS_LEN);
7146                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7147                 tw32(BUFMGR_MB_POOL_ADDR,
7148                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7149                 tw32(BUFMGR_MB_POOL_SIZE,
7150                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7151         }
7152
7153         if (tp->dev->mtu <= ETH_DATA_LEN) {
7154                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7155                      tp->bufmgr_config.mbuf_read_dma_low_water);
7156                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7157                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7158                 tw32(BUFMGR_MB_HIGH_WATER,
7159                      tp->bufmgr_config.mbuf_high_water);
7160         } else {
7161                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7162                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7163                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7164                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7165                 tw32(BUFMGR_MB_HIGH_WATER,
7166                      tp->bufmgr_config.mbuf_high_water_jumbo);
7167         }
7168         tw32(BUFMGR_DMA_LOW_WATER,
7169              tp->bufmgr_config.dma_low_water);
7170         tw32(BUFMGR_DMA_HIGH_WATER,
7171              tp->bufmgr_config.dma_high_water);
7172
7173         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7174         for (i = 0; i < 2000; i++) {
7175                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7176                         break;
7177                 udelay(10);
7178         }
7179         if (i >= 2000) {
7180                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7181                        tp->dev->name);
7182                 return -ENODEV;
7183         }
7184
7185         /* Setup replenish threshold. */
7186         val = tp->rx_pending / 8;
7187         if (val == 0)
7188                 val = 1;
7189         else if (val > tp->rx_std_max_post)
7190                 val = tp->rx_std_max_post;
7191         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7192                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7193                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7194
7195                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7196                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7197         }
7198
7199         tw32(RCVBDI_STD_THRESH, val);
7200
7201         /* Initialize TG3_BDINFO's at:
7202          *  RCVDBDI_STD_BD:     standard eth size rx ring
7203          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7204          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7205          *
7206          * like so:
7207          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7208          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7209          *                              ring attribute flags
7210          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7211          *
7212          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7213          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7214          *
7215          * The size of each ring is fixed in the firmware, but the location is
7216          * configurable.
7217          */
7218         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7219              ((u64) tp->rx_std_mapping >> 32));
7220         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7221              ((u64) tp->rx_std_mapping & 0xffffffff));
7222         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7223              NIC_SRAM_RX_BUFFER_DESC);
7224
7225         /* Don't even try to program the JUMBO/MINI buffer descriptor
7226          * configs on 5705.
7227          */
7228         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7229                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7230                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7231         } else {
7232                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7233                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7234
7235                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7236                      BDINFO_FLAGS_DISABLED);
7237
7238                 /* Setup replenish threshold. */
7239                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7240
7241                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7242                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7243                              ((u64) tp->rx_jumbo_mapping >> 32));
7244                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7245                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7246                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7247                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7248                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7249                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7250                 } else {
7251                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7252                              BDINFO_FLAGS_DISABLED);
7253                 }
7254
7255         }
7256
7257         /* There is only one send ring on 5705/5750, no need to explicitly
7258          * disable the others.
7259          */
7260         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7261                 /* Clear out send RCB ring in SRAM. */
7262                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7263                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7264                                       BDINFO_FLAGS_DISABLED);
7265         }
7266
7267         tp->tx_prod = 0;
7268         tp->tx_cons = 0;
7269         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7270         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7271
7272         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7273                        tp->tx_desc_mapping,
7274                        (TG3_TX_RING_SIZE <<
7275                         BDINFO_FLAGS_MAXLEN_SHIFT),
7276                        NIC_SRAM_TX_BUFFER_DESC);
7277
7278         /* There is only one receive return ring on 5705/5750, no need
7279          * to explicitly disable the others.
7280          */
7281         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7282                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7283                      i += TG3_BDINFO_SIZE) {
7284                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7285                                       BDINFO_FLAGS_DISABLED);
7286                 }
7287         }
7288
7289         tp->rx_rcb_ptr = 0;
7290         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7291
7292         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7293                        tp->rx_rcb_mapping,
7294                        (TG3_RX_RCB_RING_SIZE(tp) <<
7295                         BDINFO_FLAGS_MAXLEN_SHIFT),
7296                        0);
7297
7298         tp->rx_std_ptr = tp->rx_pending;
7299         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7300                      tp->rx_std_ptr);
7301
7302         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7303                                                 tp->rx_jumbo_pending : 0;
7304         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7305                      tp->rx_jumbo_ptr);
7306
7307         /* Initialize MAC address and backoff seed. */
7308         __tg3_set_mac_addr(tp, 0);
7309
7310         /* MTU + ethernet header + FCS + optional VLAN tag */
7311         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7312
7313         /* The slot time is changed by tg3_setup_phy if we
7314          * run at gigabit with half duplex.
7315          */
7316         tw32(MAC_TX_LENGTHS,
7317              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7318              (6 << TX_LENGTHS_IPG_SHIFT) |
7319              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7320
7321         /* Receive rules. */
7322         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7323         tw32(RCVLPC_CONFIG, 0x0181);
7324
7325         /* Calculate RDMAC_MODE setting early, we need it to determine
7326          * the RCVLPC_STATE_ENABLE mask.
7327          */
7328         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7329                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7330                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7331                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7332                       RDMAC_MODE_LNGREAD_ENAB);
7333
7334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7335             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7336                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7337                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7338                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7339
7340         /* If statement applies to 5705 and 5750 PCI devices only */
7341         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7342              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7343             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7344                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7345                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7346                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7347                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7348                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7349                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7350                 }
7351         }
7352
7353         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7354                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7355
7356         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7357                 rdmac_mode |= (1 << 27);
7358
7359         /* Receive/send statistics. */
7360         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7361                 val = tr32(RCVLPC_STATS_ENABLE);
7362                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7363                 tw32(RCVLPC_STATS_ENABLE, val);
7364         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7365                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7366                 val = tr32(RCVLPC_STATS_ENABLE);
7367                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7368                 tw32(RCVLPC_STATS_ENABLE, val);
7369         } else {
7370                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7371         }
7372         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7373         tw32(SNDDATAI_STATSENAB, 0xffffff);
7374         tw32(SNDDATAI_STATSCTRL,
7375              (SNDDATAI_SCTRL_ENABLE |
7376               SNDDATAI_SCTRL_FASTUPD));
7377
7378         /* Setup host coalescing engine. */
7379         tw32(HOSTCC_MODE, 0);
7380         for (i = 0; i < 2000; i++) {
7381                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7382                         break;
7383                 udelay(10);
7384         }
7385
7386         __tg3_set_coalesce(tp, &tp->coal);
7387
7388         /* set status block DMA address */
7389         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7390              ((u64) tp->status_mapping >> 32));
7391         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7392              ((u64) tp->status_mapping & 0xffffffff));
7393
7394         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7395                 /* Status/statistics block address.  See tg3_timer,
7396                  * the tg3_periodic_fetch_stats call there, and
7397                  * tg3_get_stats to see how this works for 5705/5750 chips.
7398                  */
7399                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7400                      ((u64) tp->stats_mapping >> 32));
7401                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7402                      ((u64) tp->stats_mapping & 0xffffffff));
7403                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7404                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7405         }
7406
7407         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7408
7409         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7410         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7411         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7412                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7413
7414         /* Clear statistics/status block in chip, and status block in ram. */
7415         for (i = NIC_SRAM_STATS_BLK;
7416              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7417              i += sizeof(u32)) {
7418                 tg3_write_mem(tp, i, 0);
7419                 udelay(40);
7420         }
7421         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7422
7423         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7424                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7425                 /* reset to prevent losing 1st rx packet intermittently */
7426                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7427                 udelay(10);
7428         }
7429
7430         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7431                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7432         else
7433                 tp->mac_mode = 0;
7434         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7435                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7436         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7437             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7438             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7439                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7440         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7441         udelay(40);
7442
7443         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7444          * If TG3_FLG2_IS_NIC is zero, we should read the
7445          * register to preserve the GPIO settings for LOMs. The GPIOs,
7446          * whether used as inputs or outputs, are set by boot code after
7447          * reset.
7448          */
7449         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7450                 u32 gpio_mask;
7451
7452                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7453                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7454                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7455
7456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7457                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7458                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7459
7460                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7461                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7462
7463                 tp->grc_local_ctrl &= ~gpio_mask;
7464                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7465
7466                 /* GPIO1 must be driven high for eeprom write protect */
7467                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7468                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7469                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7470         }
7471         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7472         udelay(100);
7473
7474         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7475         tp->last_tag = 0;
7476
7477         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7478                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7479                 udelay(40);
7480         }
7481
7482         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7483                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7484                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7485                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7486                WDMAC_MODE_LNGREAD_ENAB);
7487
7488         /* If statement applies to 5705 and 5750 PCI devices only */
7489         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7490              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7492                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7493                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7494                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7495                         /* nothing */
7496                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7497                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7498                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7499                         val |= WDMAC_MODE_RX_ACCEL;
7500                 }
7501         }
7502
7503         /* Enable host coalescing bug fix */
7504         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7505             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7506             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7507             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7508             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7509                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7510
7511         tw32_f(WDMAC_MODE, val);
7512         udelay(40);
7513
7514         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7515                 u16 pcix_cmd;
7516
7517                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7518                                      &pcix_cmd);
7519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7520                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7521                         pcix_cmd |= PCI_X_CMD_READ_2K;
7522                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7523                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7524                         pcix_cmd |= PCI_X_CMD_READ_2K;
7525                 }
7526                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7527                                       pcix_cmd);
7528         }
7529
7530         tw32_f(RDMAC_MODE, rdmac_mode);
7531         udelay(40);
7532
7533         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7534         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7535                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7536
7537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7538                 tw32(SNDDATAC_MODE,
7539                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7540         else
7541                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7542
7543         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7544         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7545         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7546         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7547         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7548                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7549         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7550         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7551
7552         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7553                 err = tg3_load_5701_a0_firmware_fix(tp);
7554                 if (err)
7555                         return err;
7556         }
7557
7558         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7559                 err = tg3_load_tso_firmware(tp);
7560                 if (err)
7561                         return err;
7562         }
7563
7564         tp->tx_mode = TX_MODE_ENABLE;
7565         tw32_f(MAC_TX_MODE, tp->tx_mode);
7566         udelay(100);
7567
7568         tp->rx_mode = RX_MODE_ENABLE;
7569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7571             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7573                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7574
7575         tw32_f(MAC_RX_MODE, tp->rx_mode);
7576         udelay(10);
7577
7578         tw32(MAC_LED_CTRL, tp->led_ctrl);
7579
7580         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7581         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7582                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7583                 udelay(10);
7584         }
7585         tw32_f(MAC_RX_MODE, tp->rx_mode);
7586         udelay(10);
7587
7588         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7589                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7590                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7591                         /* Set drive transmission level to 1.2V  */
7592                         /* only if the signal pre-emphasis bit is not set  */
7593                         val = tr32(MAC_SERDES_CFG);
7594                         val &= 0xfffff000;
7595                         val |= 0x880;
7596                         tw32(MAC_SERDES_CFG, val);
7597                 }
7598                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7599                         tw32(MAC_SERDES_CFG, 0x616000);
7600         }
7601
7602         /* Prevent chip from dropping frames when flow control
7603          * is enabled.
7604          */
7605         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7606
7607         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7608             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7609                 /* Use hardware link auto-negotiation */
7610                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7611         }
7612
7613         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7614             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7615                 u32 tmp;
7616
7617                 tmp = tr32(SERDES_RX_CTRL);
7618                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7619                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7620                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7621                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7622         }
7623
7624         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7625                 if (tp->link_config.phy_is_low_power) {
7626                         tp->link_config.phy_is_low_power = 0;
7627                         tp->link_config.speed = tp->link_config.orig_speed;
7628                         tp->link_config.duplex = tp->link_config.orig_duplex;
7629                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7630                 }
7631
7632                 err = tg3_setup_phy(tp, 0);
7633                 if (err)
7634                         return err;
7635
7636                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7637                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7638                         u32 tmp;
7639
7640                         /* Clear CRC stats. */
7641                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7642                                 tg3_writephy(tp, MII_TG3_TEST1,
7643                                              tmp | MII_TG3_TEST1_CRC_EN);
7644                                 tg3_readphy(tp, 0x14, &tmp);
7645                         }
7646                 }
7647         }
7648
7649         __tg3_set_rx_mode(tp->dev);
7650
7651         /* Initialize receive rules. */
7652         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7653         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7654         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7655         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7656
7657         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7658             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7659                 limit = 8;
7660         else
7661                 limit = 16;
7662         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7663                 limit -= 4;
7664         switch (limit) {
7665         case 16:
7666                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7667         case 15:
7668                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7669         case 14:
7670                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7671         case 13:
7672                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7673         case 12:
7674                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7675         case 11:
7676                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7677         case 10:
7678                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7679         case 9:
7680                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7681         case 8:
7682                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7683         case 7:
7684                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7685         case 6:
7686                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7687         case 5:
7688                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7689         case 4:
7690                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7691         case 3:
7692                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7693         case 2:
7694         case 1:
7695
7696         default:
7697                 break;
7698         }
7699
7700         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7701                 /* Write our heartbeat update interval to APE. */
7702                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7703                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7704
7705         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7706
7707         return 0;
7708 }
7709
7710 /* Called at device open time to get the chip ready for
7711  * packet processing.  Invoked with tp->lock held.
7712  */
7713 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7714 {
7715         tg3_switch_clocks(tp);
7716
7717         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7718
7719         return tg3_reset_hw(tp, reset_phy);
7720 }
7721
7722 #define TG3_STAT_ADD32(PSTAT, REG) \
7723 do {    u32 __val = tr32(REG); \
7724         (PSTAT)->low += __val; \
7725         if ((PSTAT)->low < __val) \
7726                 (PSTAT)->high += 1; \
7727 } while (0)
7728
7729 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7730 {
7731         struct tg3_hw_stats *sp = tp->hw_stats;
7732
7733         if (!netif_carrier_ok(tp->dev))
7734                 return;
7735
7736         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7737         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7738         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7739         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7740         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7741         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7742         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7743         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7744         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7745         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7746         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7747         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7748         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7749
7750         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7751         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7752         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7753         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7754         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7755         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7756         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7757         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7758         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7759         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7760         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7761         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7762         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7763         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7764
7765         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7766         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7767         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7768 }
7769
7770 static void tg3_timer(unsigned long __opaque)
7771 {
7772         struct tg3 *tp = (struct tg3 *) __opaque;
7773
7774         if (tp->irq_sync)
7775                 goto restart_timer;
7776
7777         spin_lock(&tp->lock);
7778
7779         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7780                 /* All of this garbage is because when using non-tagged
7781                  * IRQ status the mailbox/status_block protocol the chip
7782                  * uses with the cpu is race prone.
7783                  */
7784                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7785                         tw32(GRC_LOCAL_CTRL,
7786                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7787                 } else {
7788                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7789                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7790                 }
7791
7792                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7793                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7794                         spin_unlock(&tp->lock);
7795                         schedule_work(&tp->reset_task);
7796                         return;
7797                 }
7798         }
7799
7800         /* This part only runs once per second. */
7801         if (!--tp->timer_counter) {
7802                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7803                         tg3_periodic_fetch_stats(tp);
7804
7805                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7806                         u32 mac_stat;
7807                         int phy_event;
7808
7809                         mac_stat = tr32(MAC_STATUS);
7810
7811                         phy_event = 0;
7812                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7813                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7814                                         phy_event = 1;
7815                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7816                                 phy_event = 1;
7817
7818                         if (phy_event)
7819                                 tg3_setup_phy(tp, 0);
7820                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7821                         u32 mac_stat = tr32(MAC_STATUS);
7822                         int need_setup = 0;
7823
7824                         if (netif_carrier_ok(tp->dev) &&
7825                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7826                                 need_setup = 1;
7827                         }
7828                         if (! netif_carrier_ok(tp->dev) &&
7829                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7830                                          MAC_STATUS_SIGNAL_DET))) {
7831                                 need_setup = 1;
7832                         }
7833                         if (need_setup) {
7834                                 if (!tp->serdes_counter) {
7835                                         tw32_f(MAC_MODE,
7836                                              (tp->mac_mode &
7837                                               ~MAC_MODE_PORT_MODE_MASK));
7838                                         udelay(40);
7839                                         tw32_f(MAC_MODE, tp->mac_mode);
7840                                         udelay(40);
7841                                 }
7842                                 tg3_setup_phy(tp, 0);
7843                         }
7844                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7845                         tg3_serdes_parallel_detect(tp);
7846
7847                 tp->timer_counter = tp->timer_multiplier;
7848         }
7849
7850         /* Heartbeat is only sent once every 2 seconds.
7851          *
7852          * The heartbeat is to tell the ASF firmware that the host
7853          * driver is still alive.  In the event that the OS crashes,
7854          * ASF needs to reset the hardware to free up the FIFO space
7855          * that may be filled with rx packets destined for the host.
7856          * If the FIFO is full, ASF will no longer function properly.
7857          *
7858          * Unintended resets have been reported on real time kernels
7859          * where the timer doesn't run on time.  Netpoll will also have
7860          * same problem.
7861          *
7862          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7863          * to check the ring condition when the heartbeat is expiring
7864          * before doing the reset.  This will prevent most unintended
7865          * resets.
7866          */
7867         if (!--tp->asf_counter) {
7868                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7869                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7870                         tg3_wait_for_event_ack(tp);
7871
7872                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7873                                       FWCMD_NICDRV_ALIVE3);
7874                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7875                         /* 5 seconds timeout */
7876                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7877
7878                         tg3_generate_fw_event(tp);
7879                 }
7880                 tp->asf_counter = tp->asf_multiplier;
7881         }
7882
7883         spin_unlock(&tp->lock);
7884
7885 restart_timer:
7886         tp->timer.expires = jiffies + tp->timer_offset;
7887         add_timer(&tp->timer);
7888 }
7889
7890 static int tg3_request_irq(struct tg3 *tp)
7891 {
7892         irq_handler_t fn;
7893         unsigned long flags;
7894         struct net_device *dev = tp->dev;
7895
7896         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7897                 fn = tg3_msi;
7898                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7899                         fn = tg3_msi_1shot;
7900                 flags = IRQF_SAMPLE_RANDOM;
7901         } else {
7902                 fn = tg3_interrupt;
7903                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7904                         fn = tg3_interrupt_tagged;
7905                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7906         }
7907         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7908 }
7909
7910 static int tg3_test_interrupt(struct tg3 *tp)
7911 {
7912         struct net_device *dev = tp->dev;
7913         int err, i, intr_ok = 0;
7914
7915         if (!netif_running(dev))
7916                 return -ENODEV;
7917
7918         tg3_disable_ints(tp);
7919
7920         free_irq(tp->pdev->irq, dev);
7921
7922         err = request_irq(tp->pdev->irq, tg3_test_isr,
7923                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7924         if (err)
7925                 return err;
7926
7927         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7928         tg3_enable_ints(tp);
7929
7930         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7931                HOSTCC_MODE_NOW);
7932
7933         for (i = 0; i < 5; i++) {
7934                 u32 int_mbox, misc_host_ctrl;
7935
7936                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7937                                         TG3_64BIT_REG_LOW);
7938                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7939
7940                 if ((int_mbox != 0) ||
7941                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7942                         intr_ok = 1;
7943                         break;
7944                 }
7945
7946                 msleep(10);
7947         }
7948
7949         tg3_disable_ints(tp);
7950
7951         free_irq(tp->pdev->irq, dev);
7952
7953         err = tg3_request_irq(tp);
7954
7955         if (err)
7956                 return err;
7957
7958         if (intr_ok)
7959                 return 0;
7960
7961         return -EIO;
7962 }
7963
7964 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7965  * successfully restored
7966  */
7967 static int tg3_test_msi(struct tg3 *tp)
7968 {
7969         struct net_device *dev = tp->dev;
7970         int err;
7971         u16 pci_cmd;
7972
7973         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7974                 return 0;
7975
7976         /* Turn off SERR reporting in case MSI terminates with Master
7977          * Abort.
7978          */
7979         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7980         pci_write_config_word(tp->pdev, PCI_COMMAND,
7981                               pci_cmd & ~PCI_COMMAND_SERR);
7982
7983         err = tg3_test_interrupt(tp);
7984
7985         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7986
7987         if (!err)
7988                 return 0;
7989
7990         /* other failures */
7991         if (err != -EIO)
7992                 return err;
7993
7994         /* MSI test failed, go back to INTx mode */
7995         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7996                "switching to INTx mode. Please report this failure to "
7997                "the PCI maintainer and include system chipset information.\n",
7998                        tp->dev->name);
7999
8000         free_irq(tp->pdev->irq, dev);
8001         pci_disable_msi(tp->pdev);
8002
8003         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8004
8005         err = tg3_request_irq(tp);
8006         if (err)
8007                 return err;
8008
8009         /* Need to reset the chip because the MSI cycle may have terminated
8010          * with Master Abort.
8011          */
8012         tg3_full_lock(tp, 1);
8013
8014         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8015         err = tg3_init_hw(tp, 1);
8016
8017         tg3_full_unlock(tp);
8018
8019         if (err)
8020                 free_irq(tp->pdev->irq, dev);
8021
8022         return err;
8023 }
8024
8025 static int tg3_open(struct net_device *dev)
8026 {
8027         struct tg3 *tp = netdev_priv(dev);
8028         int err;
8029
8030         netif_carrier_off(tp->dev);
8031
8032         err = tg3_set_power_state(tp, PCI_D0);
8033         if (err)
8034                 return err;
8035
8036         tg3_full_lock(tp, 0);
8037
8038         tg3_disable_ints(tp);
8039         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8040
8041         tg3_full_unlock(tp);
8042
8043         /* The placement of this call is tied
8044          * to the setup and use of Host TX descriptors.
8045          */
8046         err = tg3_alloc_consistent(tp);
8047         if (err)
8048                 return err;
8049
8050         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8051                 /* All MSI supporting chips should support tagged
8052                  * status.  Assert that this is the case.
8053                  */
8054                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8055                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8056                                "Not using MSI.\n", tp->dev->name);
8057                 } else if (pci_enable_msi(tp->pdev) == 0) {
8058                         u32 msi_mode;
8059
8060                         msi_mode = tr32(MSGINT_MODE);
8061                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8062                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8063                 }
8064         }
8065         err = tg3_request_irq(tp);
8066
8067         if (err) {
8068                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8069                         pci_disable_msi(tp->pdev);
8070                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8071                 }
8072                 tg3_free_consistent(tp);
8073                 return err;
8074         }
8075
8076         napi_enable(&tp->napi);
8077
8078         tg3_full_lock(tp, 0);
8079
8080         err = tg3_init_hw(tp, 1);
8081         if (err) {
8082                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8083                 tg3_free_rings(tp);
8084         } else {
8085                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8086                         tp->timer_offset = HZ;
8087                 else
8088                         tp->timer_offset = HZ / 10;
8089
8090                 BUG_ON(tp->timer_offset > HZ);
8091                 tp->timer_counter = tp->timer_multiplier =
8092                         (HZ / tp->timer_offset);
8093                 tp->asf_counter = tp->asf_multiplier =
8094                         ((HZ / tp->timer_offset) * 2);
8095
8096                 init_timer(&tp->timer);
8097                 tp->timer.expires = jiffies + tp->timer_offset;
8098                 tp->timer.data = (unsigned long) tp;
8099                 tp->timer.function = tg3_timer;
8100         }
8101
8102         tg3_full_unlock(tp);
8103
8104         if (err) {
8105                 napi_disable(&tp->napi);
8106                 free_irq(tp->pdev->irq, dev);
8107                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8108                         pci_disable_msi(tp->pdev);
8109                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8110                 }
8111                 tg3_free_consistent(tp);
8112                 return err;
8113         }
8114
8115         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8116                 err = tg3_test_msi(tp);
8117
8118                 if (err) {
8119                         tg3_full_lock(tp, 0);
8120
8121                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8122                                 pci_disable_msi(tp->pdev);
8123                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8124                         }
8125                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8126                         tg3_free_rings(tp);
8127                         tg3_free_consistent(tp);
8128
8129                         tg3_full_unlock(tp);
8130
8131                         napi_disable(&tp->napi);
8132
8133                         return err;
8134                 }
8135
8136                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8137                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8138                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8139
8140                                 tw32(PCIE_TRANSACTION_CFG,
8141                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8142                         }
8143                 }
8144         }
8145
8146         tg3_phy_start(tp);
8147
8148         tg3_full_lock(tp, 0);
8149
8150         add_timer(&tp->timer);
8151         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8152         tg3_enable_ints(tp);
8153
8154         tg3_full_unlock(tp);
8155
8156         netif_start_queue(dev);
8157
8158         return 0;
8159 }
8160
8161 #if 0
8162 /*static*/ void tg3_dump_state(struct tg3 *tp)
8163 {
8164         u32 val32, val32_2, val32_3, val32_4, val32_5;
8165         u16 val16;
8166         int i;
8167
8168         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8169         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8170         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8171                val16, val32);
8172
8173         /* MAC block */
8174         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8175                tr32(MAC_MODE), tr32(MAC_STATUS));
8176         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8177                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8178         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8179                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8180         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8181                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8182
8183         /* Send data initiator control block */
8184         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8185                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8186         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8187                tr32(SNDDATAI_STATSCTRL));
8188
8189         /* Send data completion control block */
8190         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8191
8192         /* Send BD ring selector block */
8193         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8194                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8195
8196         /* Send BD initiator control block */
8197         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8198                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8199
8200         /* Send BD completion control block */
8201         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8202
8203         /* Receive list placement control block */
8204         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8205                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8206         printk("       RCVLPC_STATSCTRL[%08x]\n",
8207                tr32(RCVLPC_STATSCTRL));
8208
8209         /* Receive data and receive BD initiator control block */
8210         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8211                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8212
8213         /* Receive data completion control block */
8214         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8215                tr32(RCVDCC_MODE));
8216
8217         /* Receive BD initiator control block */
8218         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8219                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8220
8221         /* Receive BD completion control block */
8222         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8223                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8224
8225         /* Receive list selector control block */
8226         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8227                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8228
8229         /* Mbuf cluster free block */
8230         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8231                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8232
8233         /* Host coalescing control block */
8234         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8235                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8236         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8237                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8238                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8239         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8240                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8241                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8242         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8243                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8244         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8245                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8246
8247         /* Memory arbiter control block */
8248         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8249                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8250
8251         /* Buffer manager control block */
8252         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8253                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8254         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8255                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8256         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8257                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8258                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8259                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8260
8261         /* Read DMA control block */
8262         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8263                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8264
8265         /* Write DMA control block */
8266         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8267                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8268
8269         /* DMA completion block */
8270         printk("DEBUG: DMAC_MODE[%08x]\n",
8271                tr32(DMAC_MODE));
8272
8273         /* GRC block */
8274         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8275                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8276         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8277                tr32(GRC_LOCAL_CTRL));
8278
8279         /* TG3_BDINFOs */
8280         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8281                tr32(RCVDBDI_JUMBO_BD + 0x0),
8282                tr32(RCVDBDI_JUMBO_BD + 0x4),
8283                tr32(RCVDBDI_JUMBO_BD + 0x8),
8284                tr32(RCVDBDI_JUMBO_BD + 0xc));
8285         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8286                tr32(RCVDBDI_STD_BD + 0x0),
8287                tr32(RCVDBDI_STD_BD + 0x4),
8288                tr32(RCVDBDI_STD_BD + 0x8),
8289                tr32(RCVDBDI_STD_BD + 0xc));
8290         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8291                tr32(RCVDBDI_MINI_BD + 0x0),
8292                tr32(RCVDBDI_MINI_BD + 0x4),
8293                tr32(RCVDBDI_MINI_BD + 0x8),
8294                tr32(RCVDBDI_MINI_BD + 0xc));
8295
8296         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8297         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8298         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8299         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8300         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8301                val32, val32_2, val32_3, val32_4);
8302
8303         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8304         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8305         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8306         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8307         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8308                val32, val32_2, val32_3, val32_4);
8309
8310         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8311         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8312         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8313         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8314         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8315         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8316                val32, val32_2, val32_3, val32_4, val32_5);
8317
8318         /* SW status block */
8319         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8320                tp->hw_status->status,
8321                tp->hw_status->status_tag,
8322                tp->hw_status->rx_jumbo_consumer,
8323                tp->hw_status->rx_consumer,
8324                tp->hw_status->rx_mini_consumer,
8325                tp->hw_status->idx[0].rx_producer,
8326                tp->hw_status->idx[0].tx_consumer);
8327
8328         /* SW statistics block */
8329         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8330                ((u32 *)tp->hw_stats)[0],
8331                ((u32 *)tp->hw_stats)[1],
8332                ((u32 *)tp->hw_stats)[2],
8333                ((u32 *)tp->hw_stats)[3]);
8334
8335         /* Mailboxes */
8336         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8337                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8338                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8339                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8340                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8341
8342         /* NIC side send descriptors. */
8343         for (i = 0; i < 6; i++) {
8344                 unsigned long txd;
8345
8346                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8347                         + (i * sizeof(struct tg3_tx_buffer_desc));
8348                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8349                        i,
8350                        readl(txd + 0x0), readl(txd + 0x4),
8351                        readl(txd + 0x8), readl(txd + 0xc));
8352         }
8353
8354         /* NIC side RX descriptors. */
8355         for (i = 0; i < 6; i++) {
8356                 unsigned long rxd;
8357
8358                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8359                         + (i * sizeof(struct tg3_rx_buffer_desc));
8360                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8361                        i,
8362                        readl(rxd + 0x0), readl(rxd + 0x4),
8363                        readl(rxd + 0x8), readl(rxd + 0xc));
8364                 rxd += (4 * sizeof(u32));
8365                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8366                        i,
8367                        readl(rxd + 0x0), readl(rxd + 0x4),
8368                        readl(rxd + 0x8), readl(rxd + 0xc));
8369         }
8370
8371         for (i = 0; i < 6; i++) {
8372                 unsigned long rxd;
8373
8374                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8375                         + (i * sizeof(struct tg3_rx_buffer_desc));
8376                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8377                        i,
8378                        readl(rxd + 0x0), readl(rxd + 0x4),
8379                        readl(rxd + 0x8), readl(rxd + 0xc));
8380                 rxd += (4 * sizeof(u32));
8381                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8382                        i,
8383                        readl(rxd + 0x0), readl(rxd + 0x4),
8384                        readl(rxd + 0x8), readl(rxd + 0xc));
8385         }
8386 }
8387 #endif
8388
8389 static struct net_device_stats *tg3_get_stats(struct net_device *);
8390 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8391
8392 static int tg3_close(struct net_device *dev)
8393 {
8394         struct tg3 *tp = netdev_priv(dev);
8395
8396         napi_disable(&tp->napi);
8397         cancel_work_sync(&tp->reset_task);
8398
8399         netif_stop_queue(dev);
8400
8401         del_timer_sync(&tp->timer);
8402
8403         tg3_full_lock(tp, 1);
8404 #if 0
8405         tg3_dump_state(tp);
8406 #endif
8407
8408         tg3_disable_ints(tp);
8409
8410         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8411         tg3_free_rings(tp);
8412         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8413
8414         tg3_full_unlock(tp);
8415
8416         free_irq(tp->pdev->irq, dev);
8417         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8418                 pci_disable_msi(tp->pdev);
8419                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8420         }
8421
8422         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8423                sizeof(tp->net_stats_prev));
8424         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8425                sizeof(tp->estats_prev));
8426
8427         tg3_free_consistent(tp);
8428
8429         tg3_set_power_state(tp, PCI_D3hot);
8430
8431         netif_carrier_off(tp->dev);
8432
8433         return 0;
8434 }
8435
8436 static inline unsigned long get_stat64(tg3_stat64_t *val)
8437 {
8438         unsigned long ret;
8439
8440 #if (BITS_PER_LONG == 32)
8441         ret = val->low;
8442 #else
8443         ret = ((u64)val->high << 32) | ((u64)val->low);
8444 #endif
8445         return ret;
8446 }
8447
8448 static inline u64 get_estat64(tg3_stat64_t *val)
8449 {
8450        return ((u64)val->high << 32) | ((u64)val->low);
8451 }
8452
8453 static unsigned long calc_crc_errors(struct tg3 *tp)
8454 {
8455         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8456
8457         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8458             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8459              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8460                 u32 val;
8461
8462                 spin_lock_bh(&tp->lock);
8463                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8464                         tg3_writephy(tp, MII_TG3_TEST1,
8465                                      val | MII_TG3_TEST1_CRC_EN);
8466                         tg3_readphy(tp, 0x14, &val);
8467                 } else
8468                         val = 0;
8469                 spin_unlock_bh(&tp->lock);
8470
8471                 tp->phy_crc_errors += val;
8472
8473                 return tp->phy_crc_errors;
8474         }
8475
8476         return get_stat64(&hw_stats->rx_fcs_errors);
8477 }
8478
8479 #define ESTAT_ADD(member) \
8480         estats->member =        old_estats->member + \
8481                                 get_estat64(&hw_stats->member)
8482
8483 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8484 {
8485         struct tg3_ethtool_stats *estats = &tp->estats;
8486         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8487         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8488
8489         if (!hw_stats)
8490                 return old_estats;
8491
8492         ESTAT_ADD(rx_octets);
8493         ESTAT_ADD(rx_fragments);
8494         ESTAT_ADD(rx_ucast_packets);
8495         ESTAT_ADD(rx_mcast_packets);
8496         ESTAT_ADD(rx_bcast_packets);
8497         ESTAT_ADD(rx_fcs_errors);
8498         ESTAT_ADD(rx_align_errors);
8499         ESTAT_ADD(rx_xon_pause_rcvd);
8500         ESTAT_ADD(rx_xoff_pause_rcvd);
8501         ESTAT_ADD(rx_mac_ctrl_rcvd);
8502         ESTAT_ADD(rx_xoff_entered);
8503         ESTAT_ADD(rx_frame_too_long_errors);
8504         ESTAT_ADD(rx_jabbers);
8505         ESTAT_ADD(rx_undersize_packets);
8506         ESTAT_ADD(rx_in_length_errors);
8507         ESTAT_ADD(rx_out_length_errors);
8508         ESTAT_ADD(rx_64_or_less_octet_packets);
8509         ESTAT_ADD(rx_65_to_127_octet_packets);
8510         ESTAT_ADD(rx_128_to_255_octet_packets);
8511         ESTAT_ADD(rx_256_to_511_octet_packets);
8512         ESTAT_ADD(rx_512_to_1023_octet_packets);
8513         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8514         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8515         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8516         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8517         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8518
8519         ESTAT_ADD(tx_octets);
8520         ESTAT_ADD(tx_collisions);
8521         ESTAT_ADD(tx_xon_sent);
8522         ESTAT_ADD(tx_xoff_sent);
8523         ESTAT_ADD(tx_flow_control);
8524         ESTAT_ADD(tx_mac_errors);
8525         ESTAT_ADD(tx_single_collisions);
8526         ESTAT_ADD(tx_mult_collisions);
8527         ESTAT_ADD(tx_deferred);
8528         ESTAT_ADD(tx_excessive_collisions);
8529         ESTAT_ADD(tx_late_collisions);
8530         ESTAT_ADD(tx_collide_2times);
8531         ESTAT_ADD(tx_collide_3times);
8532         ESTAT_ADD(tx_collide_4times);
8533         ESTAT_ADD(tx_collide_5times);
8534         ESTAT_ADD(tx_collide_6times);
8535         ESTAT_ADD(tx_collide_7times);
8536         ESTAT_ADD(tx_collide_8times);
8537         ESTAT_ADD(tx_collide_9times);
8538         ESTAT_ADD(tx_collide_10times);
8539         ESTAT_ADD(tx_collide_11times);
8540         ESTAT_ADD(tx_collide_12times);
8541         ESTAT_ADD(tx_collide_13times);
8542         ESTAT_ADD(tx_collide_14times);
8543         ESTAT_ADD(tx_collide_15times);
8544         ESTAT_ADD(tx_ucast_packets);
8545         ESTAT_ADD(tx_mcast_packets);
8546         ESTAT_ADD(tx_bcast_packets);
8547         ESTAT_ADD(tx_carrier_sense_errors);
8548         ESTAT_ADD(tx_discards);
8549         ESTAT_ADD(tx_errors);
8550
8551         ESTAT_ADD(dma_writeq_full);
8552         ESTAT_ADD(dma_write_prioq_full);
8553         ESTAT_ADD(rxbds_empty);
8554         ESTAT_ADD(rx_discards);
8555         ESTAT_ADD(rx_errors);
8556         ESTAT_ADD(rx_threshold_hit);
8557
8558         ESTAT_ADD(dma_readq_full);
8559         ESTAT_ADD(dma_read_prioq_full);
8560         ESTAT_ADD(tx_comp_queue_full);
8561
8562         ESTAT_ADD(ring_set_send_prod_index);
8563         ESTAT_ADD(ring_status_update);
8564         ESTAT_ADD(nic_irqs);
8565         ESTAT_ADD(nic_avoided_irqs);
8566         ESTAT_ADD(nic_tx_threshold_hit);
8567
8568         return estats;
8569 }
8570
8571 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8572 {
8573         struct tg3 *tp = netdev_priv(dev);
8574         struct net_device_stats *stats = &tp->net_stats;
8575         struct net_device_stats *old_stats = &tp->net_stats_prev;
8576         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8577
8578         if (!hw_stats)
8579                 return old_stats;
8580
8581         stats->rx_packets = old_stats->rx_packets +
8582                 get_stat64(&hw_stats->rx_ucast_packets) +
8583                 get_stat64(&hw_stats->rx_mcast_packets) +
8584                 get_stat64(&hw_stats->rx_bcast_packets);
8585
8586         stats->tx_packets = old_stats->tx_packets +
8587                 get_stat64(&hw_stats->tx_ucast_packets) +
8588                 get_stat64(&hw_stats->tx_mcast_packets) +
8589                 get_stat64(&hw_stats->tx_bcast_packets);
8590
8591         stats->rx_bytes = old_stats->rx_bytes +
8592                 get_stat64(&hw_stats->rx_octets);
8593         stats->tx_bytes = old_stats->tx_bytes +
8594                 get_stat64(&hw_stats->tx_octets);
8595
8596         stats->rx_errors = old_stats->rx_errors +
8597                 get_stat64(&hw_stats->rx_errors);
8598         stats->tx_errors = old_stats->tx_errors +
8599                 get_stat64(&hw_stats->tx_errors) +
8600                 get_stat64(&hw_stats->tx_mac_errors) +
8601                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8602                 get_stat64(&hw_stats->tx_discards);
8603
8604         stats->multicast = old_stats->multicast +
8605                 get_stat64(&hw_stats->rx_mcast_packets);
8606         stats->collisions = old_stats->collisions +
8607                 get_stat64(&hw_stats->tx_collisions);
8608
8609         stats->rx_length_errors = old_stats->rx_length_errors +
8610                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8611                 get_stat64(&hw_stats->rx_undersize_packets);
8612
8613         stats->rx_over_errors = old_stats->rx_over_errors +
8614                 get_stat64(&hw_stats->rxbds_empty);
8615         stats->rx_frame_errors = old_stats->rx_frame_errors +
8616                 get_stat64(&hw_stats->rx_align_errors);
8617         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8618                 get_stat64(&hw_stats->tx_discards);
8619         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8620                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8621
8622         stats->rx_crc_errors = old_stats->rx_crc_errors +
8623                 calc_crc_errors(tp);
8624
8625         stats->rx_missed_errors = old_stats->rx_missed_errors +
8626                 get_stat64(&hw_stats->rx_discards);
8627
8628         return stats;
8629 }
8630
8631 static inline u32 calc_crc(unsigned char *buf, int len)
8632 {
8633         u32 reg;
8634         u32 tmp;
8635         int j, k;
8636
8637         reg = 0xffffffff;
8638
8639         for (j = 0; j < len; j++) {
8640                 reg ^= buf[j];
8641
8642                 for (k = 0; k < 8; k++) {
8643                         tmp = reg & 0x01;
8644
8645                         reg >>= 1;
8646
8647                         if (tmp) {
8648                                 reg ^= 0xedb88320;
8649                         }
8650                 }
8651         }
8652
8653         return ~reg;
8654 }
8655
8656 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8657 {
8658         /* accept or reject all multicast frames */
8659         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8660         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8661         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8662         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8663 }
8664
8665 static void __tg3_set_rx_mode(struct net_device *dev)
8666 {
8667         struct tg3 *tp = netdev_priv(dev);
8668         u32 rx_mode;
8669
8670         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8671                                   RX_MODE_KEEP_VLAN_TAG);
8672
8673         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8674          * flag clear.
8675          */
8676 #if TG3_VLAN_TAG_USED
8677         if (!tp->vlgrp &&
8678             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8679                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8680 #else
8681         /* By definition, VLAN is disabled always in this
8682          * case.
8683          */
8684         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8685                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8686 #endif
8687
8688         if (dev->flags & IFF_PROMISC) {
8689                 /* Promiscuous mode. */
8690                 rx_mode |= RX_MODE_PROMISC;
8691         } else if (dev->flags & IFF_ALLMULTI) {
8692                 /* Accept all multicast. */
8693                 tg3_set_multi (tp, 1);
8694         } else if (dev->mc_count < 1) {
8695                 /* Reject all multicast. */
8696                 tg3_set_multi (tp, 0);
8697         } else {
8698                 /* Accept one or more multicast(s). */
8699                 struct dev_mc_list *mclist;
8700                 unsigned int i;
8701                 u32 mc_filter[4] = { 0, };
8702                 u32 regidx;
8703                 u32 bit;
8704                 u32 crc;
8705
8706                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8707                      i++, mclist = mclist->next) {
8708
8709                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8710                         bit = ~crc & 0x7f;
8711                         regidx = (bit & 0x60) >> 5;
8712                         bit &= 0x1f;
8713                         mc_filter[regidx] |= (1 << bit);
8714                 }
8715
8716                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8717                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8718                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8719                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8720         }
8721
8722         if (rx_mode != tp->rx_mode) {
8723                 tp->rx_mode = rx_mode;
8724                 tw32_f(MAC_RX_MODE, rx_mode);
8725                 udelay(10);
8726         }
8727 }
8728
8729 static void tg3_set_rx_mode(struct net_device *dev)
8730 {
8731         struct tg3 *tp = netdev_priv(dev);
8732
8733         if (!netif_running(dev))
8734                 return;
8735
8736         tg3_full_lock(tp, 0);
8737         __tg3_set_rx_mode(dev);
8738         tg3_full_unlock(tp);
8739 }
8740
8741 #define TG3_REGDUMP_LEN         (32 * 1024)
8742
8743 static int tg3_get_regs_len(struct net_device *dev)
8744 {
8745         return TG3_REGDUMP_LEN;
8746 }
8747
8748 static void tg3_get_regs(struct net_device *dev,
8749                 struct ethtool_regs *regs, void *_p)
8750 {
8751         u32 *p = _p;
8752         struct tg3 *tp = netdev_priv(dev);
8753         u8 *orig_p = _p;
8754         int i;
8755
8756         regs->version = 0;
8757
8758         memset(p, 0, TG3_REGDUMP_LEN);
8759
8760         if (tp->link_config.phy_is_low_power)
8761                 return;
8762
8763         tg3_full_lock(tp, 0);
8764
8765 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8766 #define GET_REG32_LOOP(base,len)                \
8767 do {    p = (u32 *)(orig_p + (base));           \
8768         for (i = 0; i < len; i += 4)            \
8769                 __GET_REG32((base) + i);        \
8770 } while (0)
8771 #define GET_REG32_1(reg)                        \
8772 do {    p = (u32 *)(orig_p + (reg));            \
8773         __GET_REG32((reg));                     \
8774 } while (0)
8775
8776         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8777         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8778         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8779         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8780         GET_REG32_1(SNDDATAC_MODE);
8781         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8782         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8783         GET_REG32_1(SNDBDC_MODE);
8784         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8785         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8786         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8787         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8788         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8789         GET_REG32_1(RCVDCC_MODE);
8790         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8791         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8792         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8793         GET_REG32_1(MBFREE_MODE);
8794         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8795         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8796         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8797         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8798         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8799         GET_REG32_1(RX_CPU_MODE);
8800         GET_REG32_1(RX_CPU_STATE);
8801         GET_REG32_1(RX_CPU_PGMCTR);
8802         GET_REG32_1(RX_CPU_HWBKPT);
8803         GET_REG32_1(TX_CPU_MODE);
8804         GET_REG32_1(TX_CPU_STATE);
8805         GET_REG32_1(TX_CPU_PGMCTR);
8806         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8807         GET_REG32_LOOP(FTQ_RESET, 0x120);
8808         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8809         GET_REG32_1(DMAC_MODE);
8810         GET_REG32_LOOP(GRC_MODE, 0x4c);
8811         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8812                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8813
8814 #undef __GET_REG32
8815 #undef GET_REG32_LOOP
8816 #undef GET_REG32_1
8817
8818         tg3_full_unlock(tp);
8819 }
8820
8821 static int tg3_get_eeprom_len(struct net_device *dev)
8822 {
8823         struct tg3 *tp = netdev_priv(dev);
8824
8825         return tp->nvram_size;
8826 }
8827
8828 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8829 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8830 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8831
8832 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8833 {
8834         struct tg3 *tp = netdev_priv(dev);
8835         int ret;
8836         u8  *pd;
8837         u32 i, offset, len, b_offset, b_count;
8838         __le32 val;
8839
8840         if (tp->link_config.phy_is_low_power)
8841                 return -EAGAIN;
8842
8843         offset = eeprom->offset;
8844         len = eeprom->len;
8845         eeprom->len = 0;
8846
8847         eeprom->magic = TG3_EEPROM_MAGIC;
8848
8849         if (offset & 3) {
8850                 /* adjustments to start on required 4 byte boundary */
8851                 b_offset = offset & 3;
8852                 b_count = 4 - b_offset;
8853                 if (b_count > len) {
8854                         /* i.e. offset=1 len=2 */
8855                         b_count = len;
8856                 }
8857                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8858                 if (ret)
8859                         return ret;
8860                 memcpy(data, ((char*)&val) + b_offset, b_count);
8861                 len -= b_count;
8862                 offset += b_count;
8863                 eeprom->len += b_count;
8864         }
8865
8866         /* read bytes upto the last 4 byte boundary */
8867         pd = &data[eeprom->len];
8868         for (i = 0; i < (len - (len & 3)); i += 4) {
8869                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8870                 if (ret) {
8871                         eeprom->len += i;
8872                         return ret;
8873                 }
8874                 memcpy(pd + i, &val, 4);
8875         }
8876         eeprom->len += i;
8877
8878         if (len & 3) {
8879                 /* read last bytes not ending on 4 byte boundary */
8880                 pd = &data[eeprom->len];
8881                 b_count = len & 3;
8882                 b_offset = offset + len - b_count;
8883                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8884                 if (ret)
8885                         return ret;
8886                 memcpy(pd, &val, b_count);
8887                 eeprom->len += b_count;
8888         }
8889         return 0;
8890 }
8891
8892 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8893
8894 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8895 {
8896         struct tg3 *tp = netdev_priv(dev);
8897         int ret;
8898         u32 offset, len, b_offset, odd_len;
8899         u8 *buf;
8900         __le32 start, end;
8901
8902         if (tp->link_config.phy_is_low_power)
8903                 return -EAGAIN;
8904
8905         if (eeprom->magic != TG3_EEPROM_MAGIC)
8906                 return -EINVAL;
8907
8908         offset = eeprom->offset;
8909         len = eeprom->len;
8910
8911         if ((b_offset = (offset & 3))) {
8912                 /* adjustments to start on required 4 byte boundary */
8913                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8914                 if (ret)
8915                         return ret;
8916                 len += b_offset;
8917                 offset &= ~3;
8918                 if (len < 4)
8919                         len = 4;
8920         }
8921
8922         odd_len = 0;
8923         if (len & 3) {
8924                 /* adjustments to end on required 4 byte boundary */
8925                 odd_len = 1;
8926                 len = (len + 3) & ~3;
8927                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8928                 if (ret)
8929                         return ret;
8930         }
8931
8932         buf = data;
8933         if (b_offset || odd_len) {
8934                 buf = kmalloc(len, GFP_KERNEL);
8935                 if (!buf)
8936                         return -ENOMEM;
8937                 if (b_offset)
8938                         memcpy(buf, &start, 4);
8939                 if (odd_len)
8940                         memcpy(buf+len-4, &end, 4);
8941                 memcpy(buf + b_offset, data, eeprom->len);
8942         }
8943
8944         ret = tg3_nvram_write_block(tp, offset, len, buf);
8945
8946         if (buf != data)
8947                 kfree(buf);
8948
8949         return ret;
8950 }
8951
8952 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8953 {
8954         struct tg3 *tp = netdev_priv(dev);
8955
8956         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8957                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8958                         return -EAGAIN;
8959                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8960         }
8961
8962         cmd->supported = (SUPPORTED_Autoneg);
8963
8964         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8965                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8966                                    SUPPORTED_1000baseT_Full);
8967
8968         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8969                 cmd->supported |= (SUPPORTED_100baseT_Half |
8970                                   SUPPORTED_100baseT_Full |
8971                                   SUPPORTED_10baseT_Half |
8972                                   SUPPORTED_10baseT_Full |
8973                                   SUPPORTED_TP);
8974                 cmd->port = PORT_TP;
8975         } else {
8976                 cmd->supported |= SUPPORTED_FIBRE;
8977                 cmd->port = PORT_FIBRE;
8978         }
8979
8980         cmd->advertising = tp->link_config.advertising;
8981         if (netif_running(dev)) {
8982                 cmd->speed = tp->link_config.active_speed;
8983                 cmd->duplex = tp->link_config.active_duplex;
8984         }
8985         cmd->phy_address = PHY_ADDR;
8986         cmd->transceiver = 0;
8987         cmd->autoneg = tp->link_config.autoneg;
8988         cmd->maxtxpkt = 0;
8989         cmd->maxrxpkt = 0;
8990         return 0;
8991 }
8992
8993 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8994 {
8995         struct tg3 *tp = netdev_priv(dev);
8996
8997         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8998                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8999                         return -EAGAIN;
9000                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9001         }
9002
9003         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9004                 /* These are the only valid advertisement bits allowed.  */
9005                 if (cmd->autoneg == AUTONEG_ENABLE &&
9006                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9007                                           ADVERTISED_1000baseT_Full |
9008                                           ADVERTISED_Autoneg |
9009                                           ADVERTISED_FIBRE)))
9010                         return -EINVAL;
9011                 /* Fiber can only do SPEED_1000.  */
9012                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9013                          (cmd->speed != SPEED_1000))
9014                         return -EINVAL;
9015         /* Copper cannot force SPEED_1000.  */
9016         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9017                    (cmd->speed == SPEED_1000))
9018                 return -EINVAL;
9019         else if ((cmd->speed == SPEED_1000) &&
9020                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9021                 return -EINVAL;
9022
9023         tg3_full_lock(tp, 0);
9024
9025         tp->link_config.autoneg = cmd->autoneg;
9026         if (cmd->autoneg == AUTONEG_ENABLE) {
9027                 tp->link_config.advertising = (cmd->advertising |
9028                                               ADVERTISED_Autoneg);
9029                 tp->link_config.speed = SPEED_INVALID;
9030                 tp->link_config.duplex = DUPLEX_INVALID;
9031         } else {
9032                 tp->link_config.advertising = 0;
9033                 tp->link_config.speed = cmd->speed;
9034                 tp->link_config.duplex = cmd->duplex;
9035         }
9036
9037         tp->link_config.orig_speed = tp->link_config.speed;
9038         tp->link_config.orig_duplex = tp->link_config.duplex;
9039         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9040
9041         if (netif_running(dev))
9042                 tg3_setup_phy(tp, 1);
9043
9044         tg3_full_unlock(tp);
9045
9046         return 0;
9047 }
9048
9049 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9050 {
9051         struct tg3 *tp = netdev_priv(dev);
9052
9053         strcpy(info->driver, DRV_MODULE_NAME);
9054         strcpy(info->version, DRV_MODULE_VERSION);
9055         strcpy(info->fw_version, tp->fw_ver);
9056         strcpy(info->bus_info, pci_name(tp->pdev));
9057 }
9058
9059 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9060 {
9061         struct tg3 *tp = netdev_priv(dev);
9062
9063         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9064             device_can_wakeup(&tp->pdev->dev))
9065                 wol->supported = WAKE_MAGIC;
9066         else
9067                 wol->supported = 0;
9068         wol->wolopts = 0;
9069         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9070                 wol->wolopts = WAKE_MAGIC;
9071         memset(&wol->sopass, 0, sizeof(wol->sopass));
9072 }
9073
9074 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9075 {
9076         struct tg3 *tp = netdev_priv(dev);
9077         struct device *dp = &tp->pdev->dev;
9078
9079         if (wol->wolopts & ~WAKE_MAGIC)
9080                 return -EINVAL;
9081         if ((wol->wolopts & WAKE_MAGIC) &&
9082             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9083                 return -EINVAL;
9084
9085         spin_lock_bh(&tp->lock);
9086         if (wol->wolopts & WAKE_MAGIC) {
9087                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9088                 device_set_wakeup_enable(dp, true);
9089         } else {
9090                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9091                 device_set_wakeup_enable(dp, false);
9092         }
9093         spin_unlock_bh(&tp->lock);
9094
9095         return 0;
9096 }
9097
9098 static u32 tg3_get_msglevel(struct net_device *dev)
9099 {
9100         struct tg3 *tp = netdev_priv(dev);
9101         return tp->msg_enable;
9102 }
9103
9104 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9105 {
9106         struct tg3 *tp = netdev_priv(dev);
9107         tp->msg_enable = value;
9108 }
9109
9110 static int tg3_set_tso(struct net_device *dev, u32 value)
9111 {
9112         struct tg3 *tp = netdev_priv(dev);
9113
9114         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9115                 if (value)
9116                         return -EINVAL;
9117                 return 0;
9118         }
9119         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9120             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9121                 if (value) {
9122                         dev->features |= NETIF_F_TSO6;
9123                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9124                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9125                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9126                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9127                                 dev->features |= NETIF_F_TSO_ECN;
9128                 } else
9129                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9130         }
9131         return ethtool_op_set_tso(dev, value);
9132 }
9133
9134 static int tg3_nway_reset(struct net_device *dev)
9135 {
9136         struct tg3 *tp = netdev_priv(dev);
9137         int r;
9138
9139         if (!netif_running(dev))
9140                 return -EAGAIN;
9141
9142         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9143                 return -EINVAL;
9144
9145         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9146                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9147                         return -EAGAIN;
9148                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9149         } else {
9150                 u32 bmcr;
9151
9152                 spin_lock_bh(&tp->lock);
9153                 r = -EINVAL;
9154                 tg3_readphy(tp, MII_BMCR, &bmcr);
9155                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9156                     ((bmcr & BMCR_ANENABLE) ||
9157                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9158                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9159                                                    BMCR_ANENABLE);
9160                         r = 0;
9161                 }
9162                 spin_unlock_bh(&tp->lock);
9163         }
9164
9165         return r;
9166 }
9167
9168 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9169 {
9170         struct tg3 *tp = netdev_priv(dev);
9171
9172         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9173         ering->rx_mini_max_pending = 0;
9174         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9175                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9176         else
9177                 ering->rx_jumbo_max_pending = 0;
9178
9179         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9180
9181         ering->rx_pending = tp->rx_pending;
9182         ering->rx_mini_pending = 0;
9183         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9184                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9185         else
9186                 ering->rx_jumbo_pending = 0;
9187
9188         ering->tx_pending = tp->tx_pending;
9189 }
9190
9191 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9192 {
9193         struct tg3 *tp = netdev_priv(dev);
9194         int irq_sync = 0, err = 0;
9195
9196         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9197             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9198             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9199             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9200             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9201              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9202                 return -EINVAL;
9203
9204         if (netif_running(dev)) {
9205                 tg3_phy_stop(tp);
9206                 tg3_netif_stop(tp);
9207                 irq_sync = 1;
9208         }
9209
9210         tg3_full_lock(tp, irq_sync);
9211
9212         tp->rx_pending = ering->rx_pending;
9213
9214         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9215             tp->rx_pending > 63)
9216                 tp->rx_pending = 63;
9217         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9218         tp->tx_pending = ering->tx_pending;
9219
9220         if (netif_running(dev)) {
9221                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9222                 err = tg3_restart_hw(tp, 1);
9223                 if (!err)
9224                         tg3_netif_start(tp);
9225         }
9226
9227         tg3_full_unlock(tp);
9228
9229         if (irq_sync && !err)
9230                 tg3_phy_start(tp);
9231
9232         return err;
9233 }
9234
9235 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9236 {
9237         struct tg3 *tp = netdev_priv(dev);
9238
9239         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9240
9241         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9242                 epause->rx_pause = 1;
9243         else
9244                 epause->rx_pause = 0;
9245
9246         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9247                 epause->tx_pause = 1;
9248         else
9249                 epause->tx_pause = 0;
9250 }
9251
9252 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9253 {
9254         struct tg3 *tp = netdev_priv(dev);
9255         int err = 0;
9256
9257         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9258                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9259                         return -EAGAIN;
9260
9261                 if (epause->autoneg) {
9262                         u32 newadv;
9263                         struct phy_device *phydev;
9264
9265                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9266
9267                         if (epause->rx_pause) {
9268                                 if (epause->tx_pause)
9269                                         newadv = ADVERTISED_Pause;
9270                                 else
9271                                         newadv = ADVERTISED_Pause |
9272                                                  ADVERTISED_Asym_Pause;
9273                         } else if (epause->tx_pause) {
9274                                 newadv = ADVERTISED_Asym_Pause;
9275                         } else
9276                                 newadv = 0;
9277
9278                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9279                                 u32 oldadv = phydev->advertising &
9280                                              (ADVERTISED_Pause |
9281                                               ADVERTISED_Asym_Pause);
9282                                 if (oldadv != newadv) {
9283                                         phydev->advertising &=
9284                                                 ~(ADVERTISED_Pause |
9285                                                   ADVERTISED_Asym_Pause);
9286                                         phydev->advertising |= newadv;
9287                                         err = phy_start_aneg(phydev);
9288                                 }
9289                         } else {
9290                                 tp->link_config.advertising &=
9291                                                 ~(ADVERTISED_Pause |
9292                                                   ADVERTISED_Asym_Pause);
9293                                 tp->link_config.advertising |= newadv;
9294                         }
9295                 } else {
9296                         if (epause->rx_pause)
9297                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9298                         else
9299                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9300
9301                         if (epause->tx_pause)
9302                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9303                         else
9304                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9305
9306                         if (netif_running(dev))
9307                                 tg3_setup_flow_control(tp, 0, 0);
9308                 }
9309         } else {
9310                 int irq_sync = 0;
9311
9312                 if (netif_running(dev)) {
9313                         tg3_netif_stop(tp);
9314                         irq_sync = 1;
9315                 }
9316
9317                 tg3_full_lock(tp, irq_sync);
9318
9319                 if (epause->autoneg)
9320                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9321                 else
9322                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9323                 if (epause->rx_pause)
9324                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9325                 else
9326                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9327                 if (epause->tx_pause)
9328                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9329                 else
9330                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9331
9332                 if (netif_running(dev)) {
9333                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9334                         err = tg3_restart_hw(tp, 1);
9335                         if (!err)
9336                                 tg3_netif_start(tp);
9337                 }
9338
9339                 tg3_full_unlock(tp);
9340         }
9341
9342         return err;
9343 }
9344
9345 static u32 tg3_get_rx_csum(struct net_device *dev)
9346 {
9347         struct tg3 *tp = netdev_priv(dev);
9348         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9349 }
9350
9351 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9352 {
9353         struct tg3 *tp = netdev_priv(dev);
9354
9355         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9356                 if (data != 0)
9357                         return -EINVAL;
9358                 return 0;
9359         }
9360
9361         spin_lock_bh(&tp->lock);
9362         if (data)
9363                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9364         else
9365                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9366         spin_unlock_bh(&tp->lock);
9367
9368         return 0;
9369 }
9370
9371 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9372 {
9373         struct tg3 *tp = netdev_priv(dev);
9374
9375         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9376                 if (data != 0)
9377                         return -EINVAL;
9378                 return 0;
9379         }
9380
9381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9385             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9386                 ethtool_op_set_tx_ipv6_csum(dev, data);
9387         else
9388                 ethtool_op_set_tx_csum(dev, data);
9389
9390         return 0;
9391 }
9392
9393 static int tg3_get_sset_count (struct net_device *dev, int sset)
9394 {
9395         switch (sset) {
9396         case ETH_SS_TEST:
9397                 return TG3_NUM_TEST;
9398         case ETH_SS_STATS:
9399                 return TG3_NUM_STATS;
9400         default:
9401                 return -EOPNOTSUPP;
9402         }
9403 }
9404
9405 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9406 {
9407         switch (stringset) {
9408         case ETH_SS_STATS:
9409                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9410                 break;
9411         case ETH_SS_TEST:
9412                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9413                 break;
9414         default:
9415                 WARN_ON(1);     /* we need a WARN() */
9416                 break;
9417         }
9418 }
9419
9420 static int tg3_phys_id(struct net_device *dev, u32 data)
9421 {
9422         struct tg3 *tp = netdev_priv(dev);
9423         int i;
9424
9425         if (!netif_running(tp->dev))
9426                 return -EAGAIN;
9427
9428         if (data == 0)
9429                 data = UINT_MAX / 2;
9430
9431         for (i = 0; i < (data * 2); i++) {
9432                 if ((i % 2) == 0)
9433                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9434                                            LED_CTRL_1000MBPS_ON |
9435                                            LED_CTRL_100MBPS_ON |
9436                                            LED_CTRL_10MBPS_ON |
9437                                            LED_CTRL_TRAFFIC_OVERRIDE |
9438                                            LED_CTRL_TRAFFIC_BLINK |
9439                                            LED_CTRL_TRAFFIC_LED);
9440
9441                 else
9442                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9443                                            LED_CTRL_TRAFFIC_OVERRIDE);
9444
9445                 if (msleep_interruptible(500))
9446                         break;
9447         }
9448         tw32(MAC_LED_CTRL, tp->led_ctrl);
9449         return 0;
9450 }
9451
9452 static void tg3_get_ethtool_stats (struct net_device *dev,
9453                                    struct ethtool_stats *estats, u64 *tmp_stats)
9454 {
9455         struct tg3 *tp = netdev_priv(dev);
9456         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9457 }
9458
9459 #define NVRAM_TEST_SIZE 0x100
9460 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9461 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9462 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9463 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9464 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9465
9466 static int tg3_test_nvram(struct tg3 *tp)
9467 {
9468         u32 csum, magic;
9469         __le32 *buf;
9470         int i, j, k, err = 0, size;
9471
9472         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9473                 return -EIO;
9474
9475         if (magic == TG3_EEPROM_MAGIC)
9476                 size = NVRAM_TEST_SIZE;
9477         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9478                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9479                     TG3_EEPROM_SB_FORMAT_1) {
9480                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9481                         case TG3_EEPROM_SB_REVISION_0:
9482                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9483                                 break;
9484                         case TG3_EEPROM_SB_REVISION_2:
9485                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9486                                 break;
9487                         case TG3_EEPROM_SB_REVISION_3:
9488                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9489                                 break;
9490                         default:
9491                                 return 0;
9492                         }
9493                 } else
9494                         return 0;
9495         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9496                 size = NVRAM_SELFBOOT_HW_SIZE;
9497         else
9498                 return -EIO;
9499
9500         buf = kmalloc(size, GFP_KERNEL);
9501         if (buf == NULL)
9502                 return -ENOMEM;
9503
9504         err = -EIO;
9505         for (i = 0, j = 0; i < size; i += 4, j++) {
9506                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9507                         break;
9508         }
9509         if (i < size)
9510                 goto out;
9511
9512         /* Selfboot format */
9513         magic = swab32(le32_to_cpu(buf[0]));
9514         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9515             TG3_EEPROM_MAGIC_FW) {
9516                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9517
9518                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9519                     TG3_EEPROM_SB_REVISION_2) {
9520                         /* For rev 2, the csum doesn't include the MBA. */
9521                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9522                                 csum8 += buf8[i];
9523                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9524                                 csum8 += buf8[i];
9525                 } else {
9526                         for (i = 0; i < size; i++)
9527                                 csum8 += buf8[i];
9528                 }
9529
9530                 if (csum8 == 0) {
9531                         err = 0;
9532                         goto out;
9533                 }
9534
9535                 err = -EIO;
9536                 goto out;
9537         }
9538
9539         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9540             TG3_EEPROM_MAGIC_HW) {
9541                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9542                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9543                 u8 *buf8 = (u8 *) buf;
9544
9545                 /* Separate the parity bits and the data bytes.  */
9546                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9547                         if ((i == 0) || (i == 8)) {
9548                                 int l;
9549                                 u8 msk;
9550
9551                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9552                                         parity[k++] = buf8[i] & msk;
9553                                 i++;
9554                         }
9555                         else if (i == 16) {
9556                                 int l;
9557                                 u8 msk;
9558
9559                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9560                                         parity[k++] = buf8[i] & msk;
9561                                 i++;
9562
9563                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9564                                         parity[k++] = buf8[i] & msk;
9565                                 i++;
9566                         }
9567                         data[j++] = buf8[i];
9568                 }
9569
9570                 err = -EIO;
9571                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9572                         u8 hw8 = hweight8(data[i]);
9573
9574                         if ((hw8 & 0x1) && parity[i])
9575                                 goto out;
9576                         else if (!(hw8 & 0x1) && !parity[i])
9577                                 goto out;
9578                 }
9579                 err = 0;
9580                 goto out;
9581         }
9582
9583         /* Bootstrap checksum at offset 0x10 */
9584         csum = calc_crc((unsigned char *) buf, 0x10);
9585         if(csum != le32_to_cpu(buf[0x10/4]))
9586                 goto out;
9587
9588         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9589         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9590         if (csum != le32_to_cpu(buf[0xfc/4]))
9591                  goto out;
9592
9593         err = 0;
9594
9595 out:
9596         kfree(buf);
9597         return err;
9598 }
9599
9600 #define TG3_SERDES_TIMEOUT_SEC  2
9601 #define TG3_COPPER_TIMEOUT_SEC  6
9602
9603 static int tg3_test_link(struct tg3 *tp)
9604 {
9605         int i, max;
9606
9607         if (!netif_running(tp->dev))
9608                 return -ENODEV;
9609
9610         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9611                 max = TG3_SERDES_TIMEOUT_SEC;
9612         else
9613                 max = TG3_COPPER_TIMEOUT_SEC;
9614
9615         for (i = 0; i < max; i++) {
9616                 if (netif_carrier_ok(tp->dev))
9617                         return 0;
9618
9619                 if (msleep_interruptible(1000))
9620                         break;
9621         }
9622
9623         return -EIO;
9624 }
9625
9626 /* Only test the commonly used registers */
9627 static int tg3_test_registers(struct tg3 *tp)
9628 {
9629         int i, is_5705, is_5750;
9630         u32 offset, read_mask, write_mask, val, save_val, read_val;
9631         static struct {
9632                 u16 offset;
9633                 u16 flags;
9634 #define TG3_FL_5705     0x1
9635 #define TG3_FL_NOT_5705 0x2
9636 #define TG3_FL_NOT_5788 0x4
9637 #define TG3_FL_NOT_5750 0x8
9638                 u32 read_mask;
9639                 u32 write_mask;
9640         } reg_tbl[] = {
9641                 /* MAC Control Registers */
9642                 { MAC_MODE, TG3_FL_NOT_5705,
9643                         0x00000000, 0x00ef6f8c },
9644                 { MAC_MODE, TG3_FL_5705,
9645                         0x00000000, 0x01ef6b8c },
9646                 { MAC_STATUS, TG3_FL_NOT_5705,
9647                         0x03800107, 0x00000000 },
9648                 { MAC_STATUS, TG3_FL_5705,
9649                         0x03800100, 0x00000000 },
9650                 { MAC_ADDR_0_HIGH, 0x0000,
9651                         0x00000000, 0x0000ffff },
9652                 { MAC_ADDR_0_LOW, 0x0000,
9653                         0x00000000, 0xffffffff },
9654                 { MAC_RX_MTU_SIZE, 0x0000,
9655                         0x00000000, 0x0000ffff },
9656                 { MAC_TX_MODE, 0x0000,
9657                         0x00000000, 0x00000070 },
9658                 { MAC_TX_LENGTHS, 0x0000,
9659                         0x00000000, 0x00003fff },
9660                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9661                         0x00000000, 0x000007fc },
9662                 { MAC_RX_MODE, TG3_FL_5705,
9663                         0x00000000, 0x000007dc },
9664                 { MAC_HASH_REG_0, 0x0000,
9665                         0x00000000, 0xffffffff },
9666                 { MAC_HASH_REG_1, 0x0000,
9667                         0x00000000, 0xffffffff },
9668                 { MAC_HASH_REG_2, 0x0000,
9669                         0x00000000, 0xffffffff },
9670                 { MAC_HASH_REG_3, 0x0000,
9671                         0x00000000, 0xffffffff },
9672
9673                 /* Receive Data and Receive BD Initiator Control Registers. */
9674                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9675                         0x00000000, 0xffffffff },
9676                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9677                         0x00000000, 0xffffffff },
9678                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9679                         0x00000000, 0x00000003 },
9680                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9681                         0x00000000, 0xffffffff },
9682                 { RCVDBDI_STD_BD+0, 0x0000,
9683                         0x00000000, 0xffffffff },
9684                 { RCVDBDI_STD_BD+4, 0x0000,
9685                         0x00000000, 0xffffffff },
9686                 { RCVDBDI_STD_BD+8, 0x0000,
9687                         0x00000000, 0xffff0002 },
9688                 { RCVDBDI_STD_BD+0xc, 0x0000,
9689                         0x00000000, 0xffffffff },
9690
9691                 /* Receive BD Initiator Control Registers. */
9692                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9693                         0x00000000, 0xffffffff },
9694                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9695                         0x00000000, 0x000003ff },
9696                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9697                         0x00000000, 0xffffffff },
9698
9699                 /* Host Coalescing Control Registers. */
9700                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9701                         0x00000000, 0x00000004 },
9702                 { HOSTCC_MODE, TG3_FL_5705,
9703                         0x00000000, 0x000000f6 },
9704                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9705                         0x00000000, 0xffffffff },
9706                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9707                         0x00000000, 0x000003ff },
9708                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9709                         0x00000000, 0xffffffff },
9710                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9711                         0x00000000, 0x000003ff },
9712                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9713                         0x00000000, 0xffffffff },
9714                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9715                         0x00000000, 0x000000ff },
9716                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9717                         0x00000000, 0xffffffff },
9718                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9719                         0x00000000, 0x000000ff },
9720                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9721                         0x00000000, 0xffffffff },
9722                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9725                         0x00000000, 0xffffffff },
9726                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9727                         0x00000000, 0x000000ff },
9728                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9729                         0x00000000, 0xffffffff },
9730                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9731                         0x00000000, 0x000000ff },
9732                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9733                         0x00000000, 0xffffffff },
9734                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9735                         0x00000000, 0xffffffff },
9736                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9737                         0x00000000, 0xffffffff },
9738                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9739                         0x00000000, 0xffffffff },
9740                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9741                         0x00000000, 0xffffffff },
9742                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9743                         0xffffffff, 0x00000000 },
9744                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9745                         0xffffffff, 0x00000000 },
9746
9747                 /* Buffer Manager Control Registers. */
9748                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9749                         0x00000000, 0x007fff80 },
9750                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9751                         0x00000000, 0x007fffff },
9752                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9753                         0x00000000, 0x0000003f },
9754                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9755                         0x00000000, 0x000001ff },
9756                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9757                         0x00000000, 0x000001ff },
9758                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9759                         0xffffffff, 0x00000000 },
9760                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9761                         0xffffffff, 0x00000000 },
9762
9763                 /* Mailbox Registers */
9764                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9765                         0x00000000, 0x000001ff },
9766                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9767                         0x00000000, 0x000001ff },
9768                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9769                         0x00000000, 0x000007ff },
9770                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9771                         0x00000000, 0x000001ff },
9772
9773                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9774         };
9775
9776         is_5705 = is_5750 = 0;
9777         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9778                 is_5705 = 1;
9779                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9780                         is_5750 = 1;
9781         }
9782
9783         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9784                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9785                         continue;
9786
9787                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9788                         continue;
9789
9790                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9791                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9792                         continue;
9793
9794                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9795                         continue;
9796
9797                 offset = (u32) reg_tbl[i].offset;
9798                 read_mask = reg_tbl[i].read_mask;
9799                 write_mask = reg_tbl[i].write_mask;
9800
9801                 /* Save the original register content */
9802                 save_val = tr32(offset);
9803
9804                 /* Determine the read-only value. */
9805                 read_val = save_val & read_mask;
9806
9807                 /* Write zero to the register, then make sure the read-only bits
9808                  * are not changed and the read/write bits are all zeros.
9809                  */
9810                 tw32(offset, 0);
9811
9812                 val = tr32(offset);
9813
9814                 /* Test the read-only and read/write bits. */
9815                 if (((val & read_mask) != read_val) || (val & write_mask))
9816                         goto out;
9817
9818                 /* Write ones to all the bits defined by RdMask and WrMask, then
9819                  * make sure the read-only bits are not changed and the
9820                  * read/write bits are all ones.
9821                  */
9822                 tw32(offset, read_mask | write_mask);
9823
9824                 val = tr32(offset);
9825
9826                 /* Test the read-only bits. */
9827                 if ((val & read_mask) != read_val)
9828                         goto out;
9829
9830                 /* Test the read/write bits. */
9831                 if ((val & write_mask) != write_mask)
9832                         goto out;
9833
9834                 tw32(offset, save_val);
9835         }
9836
9837         return 0;
9838
9839 out:
9840         if (netif_msg_hw(tp))
9841                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9842                        offset);
9843         tw32(offset, save_val);
9844         return -EIO;
9845 }
9846
9847 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9848 {
9849         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9850         int i;
9851         u32 j;
9852
9853         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9854                 for (j = 0; j < len; j += 4) {
9855                         u32 val;
9856
9857                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9858                         tg3_read_mem(tp, offset + j, &val);
9859                         if (val != test_pattern[i])
9860                                 return -EIO;
9861                 }
9862         }
9863         return 0;
9864 }
9865
9866 static int tg3_test_memory(struct tg3 *tp)
9867 {
9868         static struct mem_entry {
9869                 u32 offset;
9870                 u32 len;
9871         } mem_tbl_570x[] = {
9872                 { 0x00000000, 0x00b50},
9873                 { 0x00002000, 0x1c000},
9874                 { 0xffffffff, 0x00000}
9875         }, mem_tbl_5705[] = {
9876                 { 0x00000100, 0x0000c},
9877                 { 0x00000200, 0x00008},
9878                 { 0x00004000, 0x00800},
9879                 { 0x00006000, 0x01000},
9880                 { 0x00008000, 0x02000},
9881                 { 0x00010000, 0x0e000},
9882                 { 0xffffffff, 0x00000}
9883         }, mem_tbl_5755[] = {
9884                 { 0x00000200, 0x00008},
9885                 { 0x00004000, 0x00800},
9886                 { 0x00006000, 0x00800},
9887                 { 0x00008000, 0x02000},
9888                 { 0x00010000, 0x0c000},
9889                 { 0xffffffff, 0x00000}
9890         }, mem_tbl_5906[] = {
9891                 { 0x00000200, 0x00008},
9892                 { 0x00004000, 0x00400},
9893                 { 0x00006000, 0x00400},
9894                 { 0x00008000, 0x01000},
9895                 { 0x00010000, 0x01000},
9896                 { 0xffffffff, 0x00000}
9897         };
9898         struct mem_entry *mem_tbl;
9899         int err = 0;
9900         int i;
9901
9902         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9904                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9906                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9907                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9908                         mem_tbl = mem_tbl_5755;
9909                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9910                         mem_tbl = mem_tbl_5906;
9911                 else
9912                         mem_tbl = mem_tbl_5705;
9913         } else
9914                 mem_tbl = mem_tbl_570x;
9915
9916         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9917                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9918                     mem_tbl[i].len)) != 0)
9919                         break;
9920         }
9921
9922         return err;
9923 }
9924
9925 #define TG3_MAC_LOOPBACK        0
9926 #define TG3_PHY_LOOPBACK        1
9927
9928 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9929 {
9930         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9931         u32 desc_idx;
9932         struct sk_buff *skb, *rx_skb;
9933         u8 *tx_data;
9934         dma_addr_t map;
9935         int num_pkts, tx_len, rx_len, i, err;
9936         struct tg3_rx_buffer_desc *desc;
9937
9938         if (loopback_mode == TG3_MAC_LOOPBACK) {
9939                 /* HW errata - mac loopback fails in some cases on 5780.
9940                  * Normal traffic and PHY loopback are not affected by
9941                  * errata.
9942                  */
9943                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9944                         return 0;
9945
9946                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9947                            MAC_MODE_PORT_INT_LPBACK;
9948                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9949                         mac_mode |= MAC_MODE_LINK_POLARITY;
9950                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9951                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9952                 else
9953                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9954                 tw32(MAC_MODE, mac_mode);
9955         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9956                 u32 val;
9957
9958                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9959                         u32 phytest;
9960
9961                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9962                                 u32 phy;
9963
9964                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9965                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9966                                 if (!tg3_readphy(tp, 0x1b, &phy))
9967                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9968                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9969                         }
9970                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9971                 } else
9972                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9973
9974                 tg3_phy_toggle_automdix(tp, 0);
9975
9976                 tg3_writephy(tp, MII_BMCR, val);
9977                 udelay(40);
9978
9979                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9980                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9981                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9982                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9983                 } else
9984                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9985
9986                 /* reset to prevent losing 1st rx packet intermittently */
9987                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9988                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9989                         udelay(10);
9990                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9991                 }
9992                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9993                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9994                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9995                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9996                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9997                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9998                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9999                 }
10000                 tw32(MAC_MODE, mac_mode);
10001         }
10002         else
10003                 return -EINVAL;
10004
10005         err = -EIO;
10006
10007         tx_len = 1514;
10008         skb = netdev_alloc_skb(tp->dev, tx_len);
10009         if (!skb)
10010                 return -ENOMEM;
10011
10012         tx_data = skb_put(skb, tx_len);
10013         memcpy(tx_data, tp->dev->dev_addr, 6);
10014         memset(tx_data + 6, 0x0, 8);
10015
10016         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10017
10018         for (i = 14; i < tx_len; i++)
10019                 tx_data[i] = (u8) (i & 0xff);
10020
10021         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10022
10023         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10024              HOSTCC_MODE_NOW);
10025
10026         udelay(10);
10027
10028         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10029
10030         num_pkts = 0;
10031
10032         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10033
10034         tp->tx_prod++;
10035         num_pkts++;
10036
10037         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10038                      tp->tx_prod);
10039         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10040
10041         udelay(10);
10042
10043         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10044         for (i = 0; i < 25; i++) {
10045                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10046                        HOSTCC_MODE_NOW);
10047
10048                 udelay(10);
10049
10050                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10051                 rx_idx = tp->hw_status->idx[0].rx_producer;
10052                 if ((tx_idx == tp->tx_prod) &&
10053                     (rx_idx == (rx_start_idx + num_pkts)))
10054                         break;
10055         }
10056
10057         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10058         dev_kfree_skb(skb);
10059
10060         if (tx_idx != tp->tx_prod)
10061                 goto out;
10062
10063         if (rx_idx != rx_start_idx + num_pkts)
10064                 goto out;
10065
10066         desc = &tp->rx_rcb[rx_start_idx];
10067         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10068         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10069         if (opaque_key != RXD_OPAQUE_RING_STD)
10070                 goto out;
10071
10072         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10073             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10074                 goto out;
10075
10076         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10077         if (rx_len != tx_len)
10078                 goto out;
10079
10080         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10081
10082         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10083         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10084
10085         for (i = 14; i < tx_len; i++) {
10086                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10087                         goto out;
10088         }
10089         err = 0;
10090
10091         /* tg3_free_rings will unmap and free the rx_skb */
10092 out:
10093         return err;
10094 }
10095
10096 #define TG3_MAC_LOOPBACK_FAILED         1
10097 #define TG3_PHY_LOOPBACK_FAILED         2
10098 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10099                                          TG3_PHY_LOOPBACK_FAILED)
10100
10101 static int tg3_test_loopback(struct tg3 *tp)
10102 {
10103         int err = 0;
10104         u32 cpmuctrl = 0;
10105
10106         if (!netif_running(tp->dev))
10107                 return TG3_LOOPBACK_FAILED;
10108
10109         err = tg3_reset_hw(tp, 1);
10110         if (err)
10111                 return TG3_LOOPBACK_FAILED;
10112
10113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10116                 int i;
10117                 u32 status;
10118
10119                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10120
10121                 /* Wait for up to 40 microseconds to acquire lock. */
10122                 for (i = 0; i < 4; i++) {
10123                         status = tr32(TG3_CPMU_MUTEX_GNT);
10124                         if (status == CPMU_MUTEX_GNT_DRIVER)
10125                                 break;
10126                         udelay(10);
10127                 }
10128
10129                 if (status != CPMU_MUTEX_GNT_DRIVER)
10130                         return TG3_LOOPBACK_FAILED;
10131
10132                 /* Turn off link-based power management. */
10133                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10134                 tw32(TG3_CPMU_CTRL,
10135                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10136                                   CPMU_CTRL_LINK_AWARE_MODE));
10137         }
10138
10139         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10140                 err |= TG3_MAC_LOOPBACK_FAILED;
10141
10142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10143             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10145                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10146
10147                 /* Release the mutex */
10148                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10149         }
10150
10151         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10152             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10153                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10154                         err |= TG3_PHY_LOOPBACK_FAILED;
10155         }
10156
10157         return err;
10158 }
10159
10160 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10161                           u64 *data)
10162 {
10163         struct tg3 *tp = netdev_priv(dev);
10164
10165         if (tp->link_config.phy_is_low_power)
10166                 tg3_set_power_state(tp, PCI_D0);
10167
10168         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10169
10170         if (tg3_test_nvram(tp) != 0) {
10171                 etest->flags |= ETH_TEST_FL_FAILED;
10172                 data[0] = 1;
10173         }
10174         if (tg3_test_link(tp) != 0) {
10175                 etest->flags |= ETH_TEST_FL_FAILED;
10176                 data[1] = 1;
10177         }
10178         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10179                 int err, err2 = 0, irq_sync = 0;
10180
10181                 if (netif_running(dev)) {
10182                         tg3_phy_stop(tp);
10183                         tg3_netif_stop(tp);
10184                         irq_sync = 1;
10185                 }
10186
10187                 tg3_full_lock(tp, irq_sync);
10188
10189                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10190                 err = tg3_nvram_lock(tp);
10191                 tg3_halt_cpu(tp, RX_CPU_BASE);
10192                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10193                         tg3_halt_cpu(tp, TX_CPU_BASE);
10194                 if (!err)
10195                         tg3_nvram_unlock(tp);
10196
10197                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10198                         tg3_phy_reset(tp);
10199
10200                 if (tg3_test_registers(tp) != 0) {
10201                         etest->flags |= ETH_TEST_FL_FAILED;
10202                         data[2] = 1;
10203                 }
10204                 if (tg3_test_memory(tp) != 0) {
10205                         etest->flags |= ETH_TEST_FL_FAILED;
10206                         data[3] = 1;
10207                 }
10208                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10209                         etest->flags |= ETH_TEST_FL_FAILED;
10210
10211                 tg3_full_unlock(tp);
10212
10213                 if (tg3_test_interrupt(tp) != 0) {
10214                         etest->flags |= ETH_TEST_FL_FAILED;
10215                         data[5] = 1;
10216                 }
10217
10218                 tg3_full_lock(tp, 0);
10219
10220                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10221                 if (netif_running(dev)) {
10222                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10223                         err2 = tg3_restart_hw(tp, 1);
10224                         if (!err2)
10225                                 tg3_netif_start(tp);
10226                 }
10227
10228                 tg3_full_unlock(tp);
10229
10230                 if (irq_sync && !err2)
10231                         tg3_phy_start(tp);
10232         }
10233         if (tp->link_config.phy_is_low_power)
10234                 tg3_set_power_state(tp, PCI_D3hot);
10235
10236 }
10237
10238 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10239 {
10240         struct mii_ioctl_data *data = if_mii(ifr);
10241         struct tg3 *tp = netdev_priv(dev);
10242         int err;
10243
10244         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10245                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10246                         return -EAGAIN;
10247                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10248         }
10249
10250         switch(cmd) {
10251         case SIOCGMIIPHY:
10252                 data->phy_id = PHY_ADDR;
10253
10254                 /* fallthru */
10255         case SIOCGMIIREG: {
10256                 u32 mii_regval;
10257
10258                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10259                         break;                  /* We have no PHY */
10260
10261                 if (tp->link_config.phy_is_low_power)
10262                         return -EAGAIN;
10263
10264                 spin_lock_bh(&tp->lock);
10265                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10266                 spin_unlock_bh(&tp->lock);
10267
10268                 data->val_out = mii_regval;
10269
10270                 return err;
10271         }
10272
10273         case SIOCSMIIREG:
10274                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10275                         break;                  /* We have no PHY */
10276
10277                 if (!capable(CAP_NET_ADMIN))
10278                         return -EPERM;
10279
10280                 if (tp->link_config.phy_is_low_power)
10281                         return -EAGAIN;
10282
10283                 spin_lock_bh(&tp->lock);
10284                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10285                 spin_unlock_bh(&tp->lock);
10286
10287                 return err;
10288
10289         default:
10290                 /* do nothing */
10291                 break;
10292         }
10293         return -EOPNOTSUPP;
10294 }
10295
10296 #if TG3_VLAN_TAG_USED
10297 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10298 {
10299         struct tg3 *tp = netdev_priv(dev);
10300
10301         if (netif_running(dev))
10302                 tg3_netif_stop(tp);
10303
10304         tg3_full_lock(tp, 0);
10305
10306         tp->vlgrp = grp;
10307
10308         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10309         __tg3_set_rx_mode(dev);
10310
10311         if (netif_running(dev))
10312                 tg3_netif_start(tp);
10313
10314         tg3_full_unlock(tp);
10315 }
10316 #endif
10317
10318 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10319 {
10320         struct tg3 *tp = netdev_priv(dev);
10321
10322         memcpy(ec, &tp->coal, sizeof(*ec));
10323         return 0;
10324 }
10325
10326 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10327 {
10328         struct tg3 *tp = netdev_priv(dev);
10329         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10330         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10331
10332         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10333                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10334                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10335                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10336                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10337         }
10338
10339         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10340             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10341             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10342             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10343             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10344             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10345             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10346             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10347             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10348             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10349                 return -EINVAL;
10350
10351         /* No rx interrupts will be generated if both are zero */
10352         if ((ec->rx_coalesce_usecs == 0) &&
10353             (ec->rx_max_coalesced_frames == 0))
10354                 return -EINVAL;
10355
10356         /* No tx interrupts will be generated if both are zero */
10357         if ((ec->tx_coalesce_usecs == 0) &&
10358             (ec->tx_max_coalesced_frames == 0))
10359                 return -EINVAL;
10360
10361         /* Only copy relevant parameters, ignore all others. */
10362         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10363         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10364         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10365         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10366         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10367         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10368         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10369         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10370         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10371
10372         if (netif_running(dev)) {
10373                 tg3_full_lock(tp, 0);
10374                 __tg3_set_coalesce(tp, &tp->coal);
10375                 tg3_full_unlock(tp);
10376         }
10377         return 0;
10378 }
10379
10380 static const struct ethtool_ops tg3_ethtool_ops = {
10381         .get_settings           = tg3_get_settings,
10382         .set_settings           = tg3_set_settings,
10383         .get_drvinfo            = tg3_get_drvinfo,
10384         .get_regs_len           = tg3_get_regs_len,
10385         .get_regs               = tg3_get_regs,
10386         .get_wol                = tg3_get_wol,
10387         .set_wol                = tg3_set_wol,
10388         .get_msglevel           = tg3_get_msglevel,
10389         .set_msglevel           = tg3_set_msglevel,
10390         .nway_reset             = tg3_nway_reset,
10391         .get_link               = ethtool_op_get_link,
10392         .get_eeprom_len         = tg3_get_eeprom_len,
10393         .get_eeprom             = tg3_get_eeprom,
10394         .set_eeprom             = tg3_set_eeprom,
10395         .get_ringparam          = tg3_get_ringparam,
10396         .set_ringparam          = tg3_set_ringparam,
10397         .get_pauseparam         = tg3_get_pauseparam,
10398         .set_pauseparam         = tg3_set_pauseparam,
10399         .get_rx_csum            = tg3_get_rx_csum,
10400         .set_rx_csum            = tg3_set_rx_csum,
10401         .set_tx_csum            = tg3_set_tx_csum,
10402         .set_sg                 = ethtool_op_set_sg,
10403         .set_tso                = tg3_set_tso,
10404         .self_test              = tg3_self_test,
10405         .get_strings            = tg3_get_strings,
10406         .phys_id                = tg3_phys_id,
10407         .get_ethtool_stats      = tg3_get_ethtool_stats,
10408         .get_coalesce           = tg3_get_coalesce,
10409         .set_coalesce           = tg3_set_coalesce,
10410         .get_sset_count         = tg3_get_sset_count,
10411 };
10412
10413 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10414 {
10415         u32 cursize, val, magic;
10416
10417         tp->nvram_size = EEPROM_CHIP_SIZE;
10418
10419         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10420                 return;
10421
10422         if ((magic != TG3_EEPROM_MAGIC) &&
10423             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10424             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10425                 return;
10426
10427         /*
10428          * Size the chip by reading offsets at increasing powers of two.
10429          * When we encounter our validation signature, we know the addressing
10430          * has wrapped around, and thus have our chip size.
10431          */
10432         cursize = 0x10;
10433
10434         while (cursize < tp->nvram_size) {
10435                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10436                         return;
10437
10438                 if (val == magic)
10439                         break;
10440
10441                 cursize <<= 1;
10442         }
10443
10444         tp->nvram_size = cursize;
10445 }
10446
10447 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10448 {
10449         u32 val;
10450
10451         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10452                 return;
10453
10454         /* Selfboot format */
10455         if (val != TG3_EEPROM_MAGIC) {
10456                 tg3_get_eeprom_size(tp);
10457                 return;
10458         }
10459
10460         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10461                 if (val != 0) {
10462                         tp->nvram_size = (val >> 16) * 1024;
10463                         return;
10464                 }
10465         }
10466         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10467 }
10468
10469 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10470 {
10471         u32 nvcfg1;
10472
10473         nvcfg1 = tr32(NVRAM_CFG1);
10474         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10475                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10476         }
10477         else {
10478                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10479                 tw32(NVRAM_CFG1, nvcfg1);
10480         }
10481
10482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10483             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10484                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10485                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10486                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10487                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10488                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10489                                 break;
10490                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10491                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10492                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10493                                 break;
10494                         case FLASH_VENDOR_ATMEL_EEPROM:
10495                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10496                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10497                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10498                                 break;
10499                         case FLASH_VENDOR_ST:
10500                                 tp->nvram_jedecnum = JEDEC_ST;
10501                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10502                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10503                                 break;
10504                         case FLASH_VENDOR_SAIFUN:
10505                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10506                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10507                                 break;
10508                         case FLASH_VENDOR_SST_SMALL:
10509                         case FLASH_VENDOR_SST_LARGE:
10510                                 tp->nvram_jedecnum = JEDEC_SST;
10511                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10512                                 break;
10513                 }
10514         }
10515         else {
10516                 tp->nvram_jedecnum = JEDEC_ATMEL;
10517                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10518                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10519         }
10520 }
10521
10522 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10523 {
10524         u32 nvcfg1;
10525
10526         nvcfg1 = tr32(NVRAM_CFG1);
10527
10528         /* NVRAM protection for TPM */
10529         if (nvcfg1 & (1 << 27))
10530                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10531
10532         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10533                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10534                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10535                         tp->nvram_jedecnum = JEDEC_ATMEL;
10536                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10537                         break;
10538                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10539                         tp->nvram_jedecnum = JEDEC_ATMEL;
10540                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10541                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10542                         break;
10543                 case FLASH_5752VENDOR_ST_M45PE10:
10544                 case FLASH_5752VENDOR_ST_M45PE20:
10545                 case FLASH_5752VENDOR_ST_M45PE40:
10546                         tp->nvram_jedecnum = JEDEC_ST;
10547                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10548                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10549                         break;
10550         }
10551
10552         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10553                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10554                         case FLASH_5752PAGE_SIZE_256:
10555                                 tp->nvram_pagesize = 256;
10556                                 break;
10557                         case FLASH_5752PAGE_SIZE_512:
10558                                 tp->nvram_pagesize = 512;
10559                                 break;
10560                         case FLASH_5752PAGE_SIZE_1K:
10561                                 tp->nvram_pagesize = 1024;
10562                                 break;
10563                         case FLASH_5752PAGE_SIZE_2K:
10564                                 tp->nvram_pagesize = 2048;
10565                                 break;
10566                         case FLASH_5752PAGE_SIZE_4K:
10567                                 tp->nvram_pagesize = 4096;
10568                                 break;
10569                         case FLASH_5752PAGE_SIZE_264:
10570                                 tp->nvram_pagesize = 264;
10571                                 break;
10572                 }
10573         }
10574         else {
10575                 /* For eeprom, set pagesize to maximum eeprom size */
10576                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10577
10578                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10579                 tw32(NVRAM_CFG1, nvcfg1);
10580         }
10581 }
10582
10583 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10584 {
10585         u32 nvcfg1, protect = 0;
10586
10587         nvcfg1 = tr32(NVRAM_CFG1);
10588
10589         /* NVRAM protection for TPM */
10590         if (nvcfg1 & (1 << 27)) {
10591                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10592                 protect = 1;
10593         }
10594
10595         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10596         switch (nvcfg1) {
10597                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10598                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10599                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10600                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10601                         tp->nvram_jedecnum = JEDEC_ATMEL;
10602                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10603                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10604                         tp->nvram_pagesize = 264;
10605                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10606                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10607                                 tp->nvram_size = (protect ? 0x3e200 :
10608                                                   TG3_NVRAM_SIZE_512KB);
10609                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10610                                 tp->nvram_size = (protect ? 0x1f200 :
10611                                                   TG3_NVRAM_SIZE_256KB);
10612                         else
10613                                 tp->nvram_size = (protect ? 0x1f200 :
10614                                                   TG3_NVRAM_SIZE_128KB);
10615                         break;
10616                 case FLASH_5752VENDOR_ST_M45PE10:
10617                 case FLASH_5752VENDOR_ST_M45PE20:
10618                 case FLASH_5752VENDOR_ST_M45PE40:
10619                         tp->nvram_jedecnum = JEDEC_ST;
10620                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10621                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10622                         tp->nvram_pagesize = 256;
10623                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10624                                 tp->nvram_size = (protect ?
10625                                                   TG3_NVRAM_SIZE_64KB :
10626                                                   TG3_NVRAM_SIZE_128KB);
10627                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10628                                 tp->nvram_size = (protect ?
10629                                                   TG3_NVRAM_SIZE_64KB :
10630                                                   TG3_NVRAM_SIZE_256KB);
10631                         else
10632                                 tp->nvram_size = (protect ?
10633                                                   TG3_NVRAM_SIZE_128KB :
10634                                                   TG3_NVRAM_SIZE_512KB);
10635                         break;
10636         }
10637 }
10638
10639 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10640 {
10641         u32 nvcfg1;
10642
10643         nvcfg1 = tr32(NVRAM_CFG1);
10644
10645         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10646                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10647                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10648                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10649                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10650                         tp->nvram_jedecnum = JEDEC_ATMEL;
10651                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10652                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10653
10654                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10655                         tw32(NVRAM_CFG1, nvcfg1);
10656                         break;
10657                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10658                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10659                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10660                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10661                         tp->nvram_jedecnum = JEDEC_ATMEL;
10662                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10663                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10664                         tp->nvram_pagesize = 264;
10665                         break;
10666                 case FLASH_5752VENDOR_ST_M45PE10:
10667                 case FLASH_5752VENDOR_ST_M45PE20:
10668                 case FLASH_5752VENDOR_ST_M45PE40:
10669                         tp->nvram_jedecnum = JEDEC_ST;
10670                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10671                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10672                         tp->nvram_pagesize = 256;
10673                         break;
10674         }
10675 }
10676
10677 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10678 {
10679         u32 nvcfg1, protect = 0;
10680
10681         nvcfg1 = tr32(NVRAM_CFG1);
10682
10683         /* NVRAM protection for TPM */
10684         if (nvcfg1 & (1 << 27)) {
10685                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10686                 protect = 1;
10687         }
10688
10689         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10690         switch (nvcfg1) {
10691                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10692                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10693                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10694                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10695                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10696                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10697                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10698                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10699                         tp->nvram_jedecnum = JEDEC_ATMEL;
10700                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10701                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10702                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10703                         tp->nvram_pagesize = 256;
10704                         break;
10705                 case FLASH_5761VENDOR_ST_A_M45PE20:
10706                 case FLASH_5761VENDOR_ST_A_M45PE40:
10707                 case FLASH_5761VENDOR_ST_A_M45PE80:
10708                 case FLASH_5761VENDOR_ST_A_M45PE16:
10709                 case FLASH_5761VENDOR_ST_M_M45PE20:
10710                 case FLASH_5761VENDOR_ST_M_M45PE40:
10711                 case FLASH_5761VENDOR_ST_M_M45PE80:
10712                 case FLASH_5761VENDOR_ST_M_M45PE16:
10713                         tp->nvram_jedecnum = JEDEC_ST;
10714                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10715                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10716                         tp->nvram_pagesize = 256;
10717                         break;
10718         }
10719
10720         if (protect) {
10721                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10722         } else {
10723                 switch (nvcfg1) {
10724                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10725                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10726                         case FLASH_5761VENDOR_ST_A_M45PE16:
10727                         case FLASH_5761VENDOR_ST_M_M45PE16:
10728                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10729                                 break;
10730                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10731                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10732                         case FLASH_5761VENDOR_ST_A_M45PE80:
10733                         case FLASH_5761VENDOR_ST_M_M45PE80:
10734                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10735                                 break;
10736                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10737                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10738                         case FLASH_5761VENDOR_ST_A_M45PE40:
10739                         case FLASH_5761VENDOR_ST_M_M45PE40:
10740                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10741                                 break;
10742                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10743                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10744                         case FLASH_5761VENDOR_ST_A_M45PE20:
10745                         case FLASH_5761VENDOR_ST_M_M45PE20:
10746                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10747                                 break;
10748                 }
10749         }
10750 }
10751
10752 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10753 {
10754         tp->nvram_jedecnum = JEDEC_ATMEL;
10755         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10756         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10757 }
10758
10759 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10760 static void __devinit tg3_nvram_init(struct tg3 *tp)
10761 {
10762         tw32_f(GRC_EEPROM_ADDR,
10763              (EEPROM_ADDR_FSM_RESET |
10764               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10765                EEPROM_ADDR_CLKPERD_SHIFT)));
10766
10767         msleep(1);
10768
10769         /* Enable seeprom accesses. */
10770         tw32_f(GRC_LOCAL_CTRL,
10771              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10772         udelay(100);
10773
10774         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10775             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10776                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10777
10778                 if (tg3_nvram_lock(tp)) {
10779                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10780                                "tg3_nvram_init failed.\n", tp->dev->name);
10781                         return;
10782                 }
10783                 tg3_enable_nvram_access(tp);
10784
10785                 tp->nvram_size = 0;
10786
10787                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10788                         tg3_get_5752_nvram_info(tp);
10789                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10790                         tg3_get_5755_nvram_info(tp);
10791                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10792                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10793                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10794                         tg3_get_5787_nvram_info(tp);
10795                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10796                         tg3_get_5761_nvram_info(tp);
10797                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10798                         tg3_get_5906_nvram_info(tp);
10799                 else
10800                         tg3_get_nvram_info(tp);
10801
10802                 if (tp->nvram_size == 0)
10803                         tg3_get_nvram_size(tp);
10804
10805                 tg3_disable_nvram_access(tp);
10806                 tg3_nvram_unlock(tp);
10807
10808         } else {
10809                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10810
10811                 tg3_get_eeprom_size(tp);
10812         }
10813 }
10814
10815 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10816                                         u32 offset, u32 *val)
10817 {
10818         u32 tmp;
10819         int i;
10820
10821         if (offset > EEPROM_ADDR_ADDR_MASK ||
10822             (offset % 4) != 0)
10823                 return -EINVAL;
10824
10825         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10826                                         EEPROM_ADDR_DEVID_MASK |
10827                                         EEPROM_ADDR_READ);
10828         tw32(GRC_EEPROM_ADDR,
10829              tmp |
10830              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10831              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10832               EEPROM_ADDR_ADDR_MASK) |
10833              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10834
10835         for (i = 0; i < 1000; i++) {
10836                 tmp = tr32(GRC_EEPROM_ADDR);
10837
10838                 if (tmp & EEPROM_ADDR_COMPLETE)
10839                         break;
10840                 msleep(1);
10841         }
10842         if (!(tmp & EEPROM_ADDR_COMPLETE))
10843                 return -EBUSY;
10844
10845         *val = tr32(GRC_EEPROM_DATA);
10846         return 0;
10847 }
10848
10849 #define NVRAM_CMD_TIMEOUT 10000
10850
10851 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10852 {
10853         int i;
10854
10855         tw32(NVRAM_CMD, nvram_cmd);
10856         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10857                 udelay(10);
10858                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10859                         udelay(10);
10860                         break;
10861                 }
10862         }
10863         if (i == NVRAM_CMD_TIMEOUT) {
10864                 return -EBUSY;
10865         }
10866         return 0;
10867 }
10868
10869 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10870 {
10871         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10872             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10873             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10874            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10875             (tp->nvram_jedecnum == JEDEC_ATMEL))
10876
10877                 addr = ((addr / tp->nvram_pagesize) <<
10878                         ATMEL_AT45DB0X1B_PAGE_POS) +
10879                        (addr % tp->nvram_pagesize);
10880
10881         return addr;
10882 }
10883
10884 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10885 {
10886         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10887             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10888             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10889            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10890             (tp->nvram_jedecnum == JEDEC_ATMEL))
10891
10892                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10893                         tp->nvram_pagesize) +
10894                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10895
10896         return addr;
10897 }
10898
10899 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10900 {
10901         int ret;
10902
10903         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10904                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10905
10906         offset = tg3_nvram_phys_addr(tp, offset);
10907
10908         if (offset > NVRAM_ADDR_MSK)
10909                 return -EINVAL;
10910
10911         ret = tg3_nvram_lock(tp);
10912         if (ret)
10913                 return ret;
10914
10915         tg3_enable_nvram_access(tp);
10916
10917         tw32(NVRAM_ADDR, offset);
10918         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10919                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10920
10921         if (ret == 0)
10922                 *val = swab32(tr32(NVRAM_RDDATA));
10923
10924         tg3_disable_nvram_access(tp);
10925
10926         tg3_nvram_unlock(tp);
10927
10928         return ret;
10929 }
10930
10931 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10932 {
10933         u32 v;
10934         int res = tg3_nvram_read(tp, offset, &v);
10935         if (!res)
10936                 *val = cpu_to_le32(v);
10937         return res;
10938 }
10939
10940 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10941 {
10942         int err;
10943         u32 tmp;
10944
10945         err = tg3_nvram_read(tp, offset, &tmp);
10946         *val = swab32(tmp);
10947         return err;
10948 }
10949
10950 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10951                                     u32 offset, u32 len, u8 *buf)
10952 {
10953         int i, j, rc = 0;
10954         u32 val;
10955
10956         for (i = 0; i < len; i += 4) {
10957                 u32 addr;
10958                 __le32 data;
10959
10960                 addr = offset + i;
10961
10962                 memcpy(&data, buf + i, 4);
10963
10964                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10965
10966                 val = tr32(GRC_EEPROM_ADDR);
10967                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10968
10969                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10970                         EEPROM_ADDR_READ);
10971                 tw32(GRC_EEPROM_ADDR, val |
10972                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10973                         (addr & EEPROM_ADDR_ADDR_MASK) |
10974                         EEPROM_ADDR_START |
10975                         EEPROM_ADDR_WRITE);
10976
10977                 for (j = 0; j < 1000; j++) {
10978                         val = tr32(GRC_EEPROM_ADDR);
10979
10980                         if (val & EEPROM_ADDR_COMPLETE)
10981                                 break;
10982                         msleep(1);
10983                 }
10984                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10985                         rc = -EBUSY;
10986                         break;
10987                 }
10988         }
10989
10990         return rc;
10991 }
10992
10993 /* offset and length are dword aligned */
10994 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10995                 u8 *buf)
10996 {
10997         int ret = 0;
10998         u32 pagesize = tp->nvram_pagesize;
10999         u32 pagemask = pagesize - 1;
11000         u32 nvram_cmd;
11001         u8 *tmp;
11002
11003         tmp = kmalloc(pagesize, GFP_KERNEL);
11004         if (tmp == NULL)
11005                 return -ENOMEM;
11006
11007         while (len) {
11008                 int j;
11009                 u32 phy_addr, page_off, size;
11010
11011                 phy_addr = offset & ~pagemask;
11012
11013                 for (j = 0; j < pagesize; j += 4) {
11014                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11015                                                 (__le32 *) (tmp + j))))
11016                                 break;
11017                 }
11018                 if (ret)
11019                         break;
11020
11021                 page_off = offset & pagemask;
11022                 size = pagesize;
11023                 if (len < size)
11024                         size = len;
11025
11026                 len -= size;
11027
11028                 memcpy(tmp + page_off, buf, size);
11029
11030                 offset = offset + (pagesize - page_off);
11031
11032                 tg3_enable_nvram_access(tp);
11033
11034                 /*
11035                  * Before we can erase the flash page, we need
11036                  * to issue a special "write enable" command.
11037                  */
11038                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11039
11040                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11041                         break;
11042
11043                 /* Erase the target page */
11044                 tw32(NVRAM_ADDR, phy_addr);
11045
11046                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11047                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11048
11049                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11050                         break;
11051
11052                 /* Issue another write enable to start the write. */
11053                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11054
11055                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11056                         break;
11057
11058                 for (j = 0; j < pagesize; j += 4) {
11059                         __be32 data;
11060
11061                         data = *((__be32 *) (tmp + j));
11062                         /* swab32(le32_to_cpu(data)), actually */
11063                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11064
11065                         tw32(NVRAM_ADDR, phy_addr + j);
11066
11067                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11068                                 NVRAM_CMD_WR;
11069
11070                         if (j == 0)
11071                                 nvram_cmd |= NVRAM_CMD_FIRST;
11072                         else if (j == (pagesize - 4))
11073                                 nvram_cmd |= NVRAM_CMD_LAST;
11074
11075                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11076                                 break;
11077                 }
11078                 if (ret)
11079                         break;
11080         }
11081
11082         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11083         tg3_nvram_exec_cmd(tp, nvram_cmd);
11084
11085         kfree(tmp);
11086
11087         return ret;
11088 }
11089
11090 /* offset and length are dword aligned */
11091 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11092                 u8 *buf)
11093 {
11094         int i, ret = 0;
11095
11096         for (i = 0; i < len; i += 4, offset += 4) {
11097                 u32 page_off, phy_addr, nvram_cmd;
11098                 __be32 data;
11099
11100                 memcpy(&data, buf + i, 4);
11101                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11102
11103                 page_off = offset % tp->nvram_pagesize;
11104
11105                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11106
11107                 tw32(NVRAM_ADDR, phy_addr);
11108
11109                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11110
11111                 if ((page_off == 0) || (i == 0))
11112                         nvram_cmd |= NVRAM_CMD_FIRST;
11113                 if (page_off == (tp->nvram_pagesize - 4))
11114                         nvram_cmd |= NVRAM_CMD_LAST;
11115
11116                 if (i == (len - 4))
11117                         nvram_cmd |= NVRAM_CMD_LAST;
11118
11119                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11120                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11121                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11122                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11123                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11124                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11125                     (tp->nvram_jedecnum == JEDEC_ST) &&
11126                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11127
11128                         if ((ret = tg3_nvram_exec_cmd(tp,
11129                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11130                                 NVRAM_CMD_DONE)))
11131
11132                                 break;
11133                 }
11134                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11135                         /* We always do complete word writes to eeprom. */
11136                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11137                 }
11138
11139                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11140                         break;
11141         }
11142         return ret;
11143 }
11144
11145 /* offset and length are dword aligned */
11146 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11147 {
11148         int ret;
11149
11150         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11151                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11152                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11153                 udelay(40);
11154         }
11155
11156         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11157                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11158         }
11159         else {
11160                 u32 grc_mode;
11161
11162                 ret = tg3_nvram_lock(tp);
11163                 if (ret)
11164                         return ret;
11165
11166                 tg3_enable_nvram_access(tp);
11167                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11168                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11169                         tw32(NVRAM_WRITE1, 0x406);
11170
11171                 grc_mode = tr32(GRC_MODE);
11172                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11173
11174                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11175                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11176
11177                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11178                                 buf);
11179                 }
11180                 else {
11181                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11182                                 buf);
11183                 }
11184
11185                 grc_mode = tr32(GRC_MODE);
11186                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11187
11188                 tg3_disable_nvram_access(tp);
11189                 tg3_nvram_unlock(tp);
11190         }
11191
11192         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11193                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11194                 udelay(40);
11195         }
11196
11197         return ret;
11198 }
11199
11200 struct subsys_tbl_ent {
11201         u16 subsys_vendor, subsys_devid;
11202         u32 phy_id;
11203 };
11204
11205 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11206         /* Broadcom boards. */
11207         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11208         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11209         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11210         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11211         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11212         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11213         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11214         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11215         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11216         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11217         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11218
11219         /* 3com boards. */
11220         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11221         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11222         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11223         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11224         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11225
11226         /* DELL boards. */
11227         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11228         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11229         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11230         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11231
11232         /* Compaq boards. */
11233         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11234         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11235         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11236         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11237         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11238
11239         /* IBM boards. */
11240         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11241 };
11242
11243 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11244 {
11245         int i;
11246
11247         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11248                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11249                      tp->pdev->subsystem_vendor) &&
11250                     (subsys_id_to_phy_id[i].subsys_devid ==
11251                      tp->pdev->subsystem_device))
11252                         return &subsys_id_to_phy_id[i];
11253         }
11254         return NULL;
11255 }
11256
11257 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11258 {
11259         u32 val;
11260         u16 pmcsr;
11261
11262         /* On some early chips the SRAM cannot be accessed in D3hot state,
11263          * so need make sure we're in D0.
11264          */
11265         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11266         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11267         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11268         msleep(1);
11269
11270         /* Make sure register accesses (indirect or otherwise)
11271          * will function correctly.
11272          */
11273         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11274                                tp->misc_host_ctrl);
11275
11276         /* The memory arbiter has to be enabled in order for SRAM accesses
11277          * to succeed.  Normally on powerup the tg3 chip firmware will make
11278          * sure it is enabled, but other entities such as system netboot
11279          * code might disable it.
11280          */
11281         val = tr32(MEMARB_MODE);
11282         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11283
11284         tp->phy_id = PHY_ID_INVALID;
11285         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11286
11287         /* Assume an onboard device and WOL capable by default.  */
11288         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11289
11290         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11291                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11292                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11293                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11294                 }
11295                 val = tr32(VCPU_CFGSHDW);
11296                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11297                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11298                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11299                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11300                     device_may_wakeup(&tp->pdev->dev))
11301                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11302                 return;
11303         }
11304
11305         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11306         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11307                 u32 nic_cfg, led_cfg;
11308                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11309                 int eeprom_phy_serdes = 0;
11310
11311                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11312                 tp->nic_sram_data_cfg = nic_cfg;
11313
11314                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11315                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11316                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11317                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11318                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11319                     (ver > 0) && (ver < 0x100))
11320                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11321
11322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11323                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11324
11325                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11326                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11327                         eeprom_phy_serdes = 1;
11328
11329                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11330                 if (nic_phy_id != 0) {
11331                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11332                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11333
11334                         eeprom_phy_id  = (id1 >> 16) << 10;
11335                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11336                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11337                 } else
11338                         eeprom_phy_id = 0;
11339
11340                 tp->phy_id = eeprom_phy_id;
11341                 if (eeprom_phy_serdes) {
11342                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11343                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11344                         else
11345                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11346                 }
11347
11348                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11349                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11350                                     SHASTA_EXT_LED_MODE_MASK);
11351                 else
11352                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11353
11354                 switch (led_cfg) {
11355                 default:
11356                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11357                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11358                         break;
11359
11360                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11361                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11362                         break;
11363
11364                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11365                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11366
11367                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11368                          * read on some older 5700/5701 bootcode.
11369                          */
11370                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11371                             ASIC_REV_5700 ||
11372                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11373                             ASIC_REV_5701)
11374                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11375
11376                         break;
11377
11378                 case SHASTA_EXT_LED_SHARED:
11379                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11380                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11381                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11382                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11383                                                  LED_CTRL_MODE_PHY_2);
11384                         break;
11385
11386                 case SHASTA_EXT_LED_MAC:
11387                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11388                         break;
11389
11390                 case SHASTA_EXT_LED_COMBO:
11391                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11392                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11393                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11394                                                  LED_CTRL_MODE_PHY_2);
11395                         break;
11396
11397                 }
11398
11399                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11400                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11401                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11402                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11403
11404                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11405                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11406
11407                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11408                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11409                         if ((tp->pdev->subsystem_vendor ==
11410                              PCI_VENDOR_ID_ARIMA) &&
11411                             (tp->pdev->subsystem_device == 0x205a ||
11412                              tp->pdev->subsystem_device == 0x2063))
11413                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11414                 } else {
11415                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11416                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11417                 }
11418
11419                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11420                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11421                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11422                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11423                 }
11424                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11425                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11426                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11427                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11428                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11429
11430                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11431                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11432                     device_may_wakeup(&tp->pdev->dev))
11433                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11434
11435                 if (cfg2 & (1 << 17))
11436                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11437
11438                 /* serdes signal pre-emphasis in register 0x590 set by */
11439                 /* bootcode if bit 18 is set */
11440                 if (cfg2 & (1 << 18))
11441                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11442
11443                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11444                         u32 cfg3;
11445
11446                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11447                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11448                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11449                 }
11450
11451                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11452                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11453                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11454                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11455                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11456                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11457         }
11458 }
11459
11460 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11461 {
11462         int i;
11463         u32 val;
11464
11465         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11466         tw32(OTP_CTRL, cmd);
11467
11468         /* Wait for up to 1 ms for command to execute. */
11469         for (i = 0; i < 100; i++) {
11470                 val = tr32(OTP_STATUS);
11471                 if (val & OTP_STATUS_CMD_DONE)
11472                         break;
11473                 udelay(10);
11474         }
11475
11476         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11477 }
11478
11479 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11480  * configuration is a 32-bit value that straddles the alignment boundary.
11481  * We do two 32-bit reads and then shift and merge the results.
11482  */
11483 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11484 {
11485         u32 bhalf_otp, thalf_otp;
11486
11487         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11488
11489         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11490                 return 0;
11491
11492         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11493
11494         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11495                 return 0;
11496
11497         thalf_otp = tr32(OTP_READ_DATA);
11498
11499         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11500
11501         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11502                 return 0;
11503
11504         bhalf_otp = tr32(OTP_READ_DATA);
11505
11506         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11507 }
11508
11509 static int __devinit tg3_phy_probe(struct tg3 *tp)
11510 {
11511         u32 hw_phy_id_1, hw_phy_id_2;
11512         u32 hw_phy_id, hw_phy_id_masked;
11513         int err;
11514
11515         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11516                 return tg3_phy_init(tp);
11517
11518         /* Reading the PHY ID register can conflict with ASF
11519          * firwmare access to the PHY hardware.
11520          */
11521         err = 0;
11522         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11523             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11524                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11525         } else {
11526                 /* Now read the physical PHY_ID from the chip and verify
11527                  * that it is sane.  If it doesn't look good, we fall back
11528                  * to either the hard-coded table based PHY_ID and failing
11529                  * that the value found in the eeprom area.
11530                  */
11531                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11532                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11533
11534                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11535                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11536                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11537
11538                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11539         }
11540
11541         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11542                 tp->phy_id = hw_phy_id;
11543                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11544                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11545                 else
11546                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11547         } else {
11548                 if (tp->phy_id != PHY_ID_INVALID) {
11549                         /* Do nothing, phy ID already set up in
11550                          * tg3_get_eeprom_hw_cfg().
11551                          */
11552                 } else {
11553                         struct subsys_tbl_ent *p;
11554
11555                         /* No eeprom signature?  Try the hardcoded
11556                          * subsys device table.
11557                          */
11558                         p = lookup_by_subsys(tp);
11559                         if (!p)
11560                                 return -ENODEV;
11561
11562                         tp->phy_id = p->phy_id;
11563                         if (!tp->phy_id ||
11564                             tp->phy_id == PHY_ID_BCM8002)
11565                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11566                 }
11567         }
11568
11569         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11570             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11571             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11572                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11573
11574                 tg3_readphy(tp, MII_BMSR, &bmsr);
11575                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11576                     (bmsr & BMSR_LSTATUS))
11577                         goto skip_phy_reset;
11578
11579                 err = tg3_phy_reset(tp);
11580                 if (err)
11581                         return err;
11582
11583                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11584                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11585                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11586                 tg3_ctrl = 0;
11587                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11588                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11589                                     MII_TG3_CTRL_ADV_1000_FULL);
11590                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11591                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11592                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11593                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11594                 }
11595
11596                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11597                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11598                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11599                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11600                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11601
11602                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11603                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11604
11605                         tg3_writephy(tp, MII_BMCR,
11606                                      BMCR_ANENABLE | BMCR_ANRESTART);
11607                 }
11608                 tg3_phy_set_wirespeed(tp);
11609
11610                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11611                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11612                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11613         }
11614
11615 skip_phy_reset:
11616         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11617                 err = tg3_init_5401phy_dsp(tp);
11618                 if (err)
11619                         return err;
11620         }
11621
11622         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11623                 err = tg3_init_5401phy_dsp(tp);
11624         }
11625
11626         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11627                 tp->link_config.advertising =
11628                         (ADVERTISED_1000baseT_Half |
11629                          ADVERTISED_1000baseT_Full |
11630                          ADVERTISED_Autoneg |
11631                          ADVERTISED_FIBRE);
11632         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11633                 tp->link_config.advertising &=
11634                         ~(ADVERTISED_1000baseT_Half |
11635                           ADVERTISED_1000baseT_Full);
11636
11637         return err;
11638 }
11639
11640 static void __devinit tg3_read_partno(struct tg3 *tp)
11641 {
11642         unsigned char vpd_data[256];
11643         unsigned int i;
11644         u32 magic;
11645
11646         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11647                 goto out_not_found;
11648
11649         if (magic == TG3_EEPROM_MAGIC) {
11650                 for (i = 0; i < 256; i += 4) {
11651                         u32 tmp;
11652
11653                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11654                                 goto out_not_found;
11655
11656                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11657                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11658                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11659                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11660                 }
11661         } else {
11662                 int vpd_cap;
11663
11664                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11665                 for (i = 0; i < 256; i += 4) {
11666                         u32 tmp, j = 0;
11667                         __le32 v;
11668                         u16 tmp16;
11669
11670                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11671                                               i);
11672                         while (j++ < 100) {
11673                                 pci_read_config_word(tp->pdev, vpd_cap +
11674                                                      PCI_VPD_ADDR, &tmp16);
11675                                 if (tmp16 & 0x8000)
11676                                         break;
11677                                 msleep(1);
11678                         }
11679                         if (!(tmp16 & 0x8000))
11680                                 goto out_not_found;
11681
11682                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11683                                               &tmp);
11684                         v = cpu_to_le32(tmp);
11685                         memcpy(&vpd_data[i], &v, 4);
11686                 }
11687         }
11688
11689         /* Now parse and find the part number. */
11690         for (i = 0; i < 254; ) {
11691                 unsigned char val = vpd_data[i];
11692                 unsigned int block_end;
11693
11694                 if (val == 0x82 || val == 0x91) {
11695                         i = (i + 3 +
11696                              (vpd_data[i + 1] +
11697                               (vpd_data[i + 2] << 8)));
11698                         continue;
11699                 }
11700
11701                 if (val != 0x90)
11702                         goto out_not_found;
11703
11704                 block_end = (i + 3 +
11705                              (vpd_data[i + 1] +
11706                               (vpd_data[i + 2] << 8)));
11707                 i += 3;
11708
11709                 if (block_end > 256)
11710                         goto out_not_found;
11711
11712                 while (i < (block_end - 2)) {
11713                         if (vpd_data[i + 0] == 'P' &&
11714                             vpd_data[i + 1] == 'N') {
11715                                 int partno_len = vpd_data[i + 2];
11716
11717                                 i += 3;
11718                                 if (partno_len > 24 || (partno_len + i) > 256)
11719                                         goto out_not_found;
11720
11721                                 memcpy(tp->board_part_number,
11722                                        &vpd_data[i], partno_len);
11723
11724                                 /* Success. */
11725                                 return;
11726                         }
11727                         i += 3 + vpd_data[i + 2];
11728                 }
11729
11730                 /* Part number not found. */
11731                 goto out_not_found;
11732         }
11733
11734 out_not_found:
11735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11736                 strcpy(tp->board_part_number, "BCM95906");
11737         else
11738                 strcpy(tp->board_part_number, "none");
11739 }
11740
11741 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11742 {
11743         u32 val;
11744
11745         if (tg3_nvram_read_swab(tp, offset, &val) ||
11746             (val & 0xfc000000) != 0x0c000000 ||
11747             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11748             val != 0)
11749                 return 0;
11750
11751         return 1;
11752 }
11753
11754 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11755 {
11756         u32 val, offset, start;
11757         u32 ver_offset;
11758         int i, bcnt;
11759
11760         if (tg3_nvram_read_swab(tp, 0, &val))
11761                 return;
11762
11763         if (val != TG3_EEPROM_MAGIC)
11764                 return;
11765
11766         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11767             tg3_nvram_read_swab(tp, 0x4, &start))
11768                 return;
11769
11770         offset = tg3_nvram_logical_addr(tp, offset);
11771
11772         if (!tg3_fw_img_is_valid(tp, offset) ||
11773             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11774                 return;
11775
11776         offset = offset + ver_offset - start;
11777         for (i = 0; i < 16; i += 4) {
11778                 __le32 v;
11779                 if (tg3_nvram_read_le(tp, offset + i, &v))
11780                         return;
11781
11782                 memcpy(tp->fw_ver + i, &v, 4);
11783         }
11784
11785         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11786              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11787                 return;
11788
11789         for (offset = TG3_NVM_DIR_START;
11790              offset < TG3_NVM_DIR_END;
11791              offset += TG3_NVM_DIRENT_SIZE) {
11792                 if (tg3_nvram_read_swab(tp, offset, &val))
11793                         return;
11794
11795                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11796                         break;
11797         }
11798
11799         if (offset == TG3_NVM_DIR_END)
11800                 return;
11801
11802         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11803                 start = 0x08000000;
11804         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11805                 return;
11806
11807         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11808             !tg3_fw_img_is_valid(tp, offset) ||
11809             tg3_nvram_read_swab(tp, offset + 8, &val))
11810                 return;
11811
11812         offset += val - start;
11813
11814         bcnt = strlen(tp->fw_ver);
11815
11816         tp->fw_ver[bcnt++] = ',';
11817         tp->fw_ver[bcnt++] = ' ';
11818
11819         for (i = 0; i < 4; i++) {
11820                 __le32 v;
11821                 if (tg3_nvram_read_le(tp, offset, &v))
11822                         return;
11823
11824                 offset += sizeof(v);
11825
11826                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11827                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11828                         break;
11829                 }
11830
11831                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11832                 bcnt += sizeof(v);
11833         }
11834
11835         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11836 }
11837
11838 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11839
11840 static int __devinit tg3_get_invariants(struct tg3 *tp)
11841 {
11842         static struct pci_device_id write_reorder_chipsets[] = {
11843                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11844                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11845                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11846                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11847                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11848                              PCI_DEVICE_ID_VIA_8385_0) },
11849                 { },
11850         };
11851         u32 misc_ctrl_reg;
11852         u32 cacheline_sz_reg;
11853         u32 pci_state_reg, grc_misc_cfg;
11854         u32 val;
11855         u16 pci_cmd;
11856         int err, pcie_cap;
11857
11858         /* Force memory write invalidate off.  If we leave it on,
11859          * then on 5700_BX chips we have to enable a workaround.
11860          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11861          * to match the cacheline size.  The Broadcom driver have this
11862          * workaround but turns MWI off all the times so never uses
11863          * it.  This seems to suggest that the workaround is insufficient.
11864          */
11865         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11866         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11867         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11868
11869         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11870          * has the register indirect write enable bit set before
11871          * we try to access any of the MMIO registers.  It is also
11872          * critical that the PCI-X hw workaround situation is decided
11873          * before that as well.
11874          */
11875         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11876                               &misc_ctrl_reg);
11877
11878         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11879                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11881                 u32 prod_id_asic_rev;
11882
11883                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11884                                       &prod_id_asic_rev);
11885                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11886         }
11887
11888         /* Wrong chip ID in 5752 A0. This code can be removed later
11889          * as A0 is not in production.
11890          */
11891         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11892                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11893
11894         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11895          * we need to disable memory and use config. cycles
11896          * only to access all registers. The 5702/03 chips
11897          * can mistakenly decode the special cycles from the
11898          * ICH chipsets as memory write cycles, causing corruption
11899          * of register and memory space. Only certain ICH bridges
11900          * will drive special cycles with non-zero data during the
11901          * address phase which can fall within the 5703's address
11902          * range. This is not an ICH bug as the PCI spec allows
11903          * non-zero address during special cycles. However, only
11904          * these ICH bridges are known to drive non-zero addresses
11905          * during special cycles.
11906          *
11907          * Since special cycles do not cross PCI bridges, we only
11908          * enable this workaround if the 5703 is on the secondary
11909          * bus of these ICH bridges.
11910          */
11911         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11912             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11913                 static struct tg3_dev_id {
11914                         u32     vendor;
11915                         u32     device;
11916                         u32     rev;
11917                 } ich_chipsets[] = {
11918                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11919                           PCI_ANY_ID },
11920                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11921                           PCI_ANY_ID },
11922                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11923                           0xa },
11924                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11925                           PCI_ANY_ID },
11926                         { },
11927                 };
11928                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11929                 struct pci_dev *bridge = NULL;
11930
11931                 while (pci_id->vendor != 0) {
11932                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11933                                                 bridge);
11934                         if (!bridge) {
11935                                 pci_id++;
11936                                 continue;
11937                         }
11938                         if (pci_id->rev != PCI_ANY_ID) {
11939                                 if (bridge->revision > pci_id->rev)
11940                                         continue;
11941                         }
11942                         if (bridge->subordinate &&
11943                             (bridge->subordinate->number ==
11944                              tp->pdev->bus->number)) {
11945
11946                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11947                                 pci_dev_put(bridge);
11948                                 break;
11949                         }
11950                 }
11951         }
11952
11953         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11954                 static struct tg3_dev_id {
11955                         u32     vendor;
11956                         u32     device;
11957                 } bridge_chipsets[] = {
11958                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11959                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11960                         { },
11961                 };
11962                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11963                 struct pci_dev *bridge = NULL;
11964
11965                 while (pci_id->vendor != 0) {
11966                         bridge = pci_get_device(pci_id->vendor,
11967                                                 pci_id->device,
11968                                                 bridge);
11969                         if (!bridge) {
11970                                 pci_id++;
11971                                 continue;
11972                         }
11973                         if (bridge->subordinate &&
11974                             (bridge->subordinate->number <=
11975                              tp->pdev->bus->number) &&
11976                             (bridge->subordinate->subordinate >=
11977                              tp->pdev->bus->number)) {
11978                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11979                                 pci_dev_put(bridge);
11980                                 break;
11981                         }
11982                 }
11983         }
11984
11985         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11986          * DMA addresses > 40-bit. This bridge may have other additional
11987          * 57xx devices behind it in some 4-port NIC designs for example.
11988          * Any tg3 device found behind the bridge will also need the 40-bit
11989          * DMA workaround.
11990          */
11991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11993                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11994                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11995                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11996         }
11997         else {
11998                 struct pci_dev *bridge = NULL;
11999
12000                 do {
12001                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12002                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12003                                                 bridge);
12004                         if (bridge && bridge->subordinate &&
12005                             (bridge->subordinate->number <=
12006                              tp->pdev->bus->number) &&
12007                             (bridge->subordinate->subordinate >=
12008                              tp->pdev->bus->number)) {
12009                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12010                                 pci_dev_put(bridge);
12011                                 break;
12012                         }
12013                 } while (bridge);
12014         }
12015
12016         /* Initialize misc host control in PCI block. */
12017         tp->misc_host_ctrl |= (misc_ctrl_reg &
12018                                MISC_HOST_CTRL_CHIPREV);
12019         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12020                                tp->misc_host_ctrl);
12021
12022         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12023                               &cacheline_sz_reg);
12024
12025         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12026         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12027         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12028         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12029
12030         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12031             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12032                 tp->pdev_peer = tg3_find_peer(tp);
12033
12034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12041             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12042             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12043                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12044
12045         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12046             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12047                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12048
12049         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12050                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12051                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12052                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12053                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12054                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12055                      tp->pdev_peer == tp->pdev))
12056                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12057
12058                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12060                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12062                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12063                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12064                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12065                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12066                 } else {
12067                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12069                                 ASIC_REV_5750 &&
12070                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12071                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12072                 }
12073         }
12074
12075         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12076              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12077                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12078
12079         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12080         if (pcie_cap != 0) {
12081                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12082
12083                 pcie_set_readrq(tp->pdev, 4096);
12084
12085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12086                         u16 lnkctl;
12087
12088                         pci_read_config_word(tp->pdev,
12089                                              pcie_cap + PCI_EXP_LNKCTL,
12090                                              &lnkctl);
12091                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12092                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12093                 }
12094         }
12095
12096         /* If we have an AMD 762 or VIA K8T800 chipset, write
12097          * reordering to the mailbox registers done by the host
12098          * controller can cause major troubles.  We read back from
12099          * every mailbox register write to force the writes to be
12100          * posted to the chip in order.
12101          */
12102         if (pci_dev_present(write_reorder_chipsets) &&
12103             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12104                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12105
12106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12107             tp->pci_lat_timer < 64) {
12108                 tp->pci_lat_timer = 64;
12109
12110                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12111                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12112                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12113                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12114
12115                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12116                                        cacheline_sz_reg);
12117         }
12118
12119         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12120             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12121                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12122                 if (!tp->pcix_cap) {
12123                         printk(KERN_ERR PFX "Cannot find PCI-X "
12124                                             "capability, aborting.\n");
12125                         return -EIO;
12126                 }
12127         }
12128
12129         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12130                               &pci_state_reg);
12131
12132         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12133                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12134
12135                 /* If this is a 5700 BX chipset, and we are in PCI-X
12136                  * mode, enable register write workaround.
12137                  *
12138                  * The workaround is to use indirect register accesses
12139                  * for all chip writes not to mailbox registers.
12140                  */
12141                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12142                         u32 pm_reg;
12143
12144                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12145
12146                         /* The chip can have it's power management PCI config
12147                          * space registers clobbered due to this bug.
12148                          * So explicitly force the chip into D0 here.
12149                          */
12150                         pci_read_config_dword(tp->pdev,
12151                                               tp->pm_cap + PCI_PM_CTRL,
12152                                               &pm_reg);
12153                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12154                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12155                         pci_write_config_dword(tp->pdev,
12156                                                tp->pm_cap + PCI_PM_CTRL,
12157                                                pm_reg);
12158
12159                         /* Also, force SERR#/PERR# in PCI command. */
12160                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12161                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12162                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12163                 }
12164         }
12165
12166         /* 5700 BX chips need to have their TX producer index mailboxes
12167          * written twice to workaround a bug.
12168          */
12169         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12170                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12171
12172         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12173                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12174         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12175                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12176
12177         /* Chip-specific fixup from Broadcom driver */
12178         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12179             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12180                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12181                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12182         }
12183
12184         /* Default fast path register access methods */
12185         tp->read32 = tg3_read32;
12186         tp->write32 = tg3_write32;
12187         tp->read32_mbox = tg3_read32;
12188         tp->write32_mbox = tg3_write32;
12189         tp->write32_tx_mbox = tg3_write32;
12190         tp->write32_rx_mbox = tg3_write32;
12191
12192         /* Various workaround register access methods */
12193         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12194                 tp->write32 = tg3_write_indirect_reg32;
12195         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12196                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12197                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12198                 /*
12199                  * Back to back register writes can cause problems on these
12200                  * chips, the workaround is to read back all reg writes
12201                  * except those to mailbox regs.
12202                  *
12203                  * See tg3_write_indirect_reg32().
12204                  */
12205                 tp->write32 = tg3_write_flush_reg32;
12206         }
12207
12208
12209         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12210             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12211                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12212                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12213                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12214         }
12215
12216         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12217                 tp->read32 = tg3_read_indirect_reg32;
12218                 tp->write32 = tg3_write_indirect_reg32;
12219                 tp->read32_mbox = tg3_read_indirect_mbox;
12220                 tp->write32_mbox = tg3_write_indirect_mbox;
12221                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12222                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12223
12224                 iounmap(tp->regs);
12225                 tp->regs = NULL;
12226
12227                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12228                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12229                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12230         }
12231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12232                 tp->read32_mbox = tg3_read32_mbox_5906;
12233                 tp->write32_mbox = tg3_write32_mbox_5906;
12234                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12235                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12236         }
12237
12238         if (tp->write32 == tg3_write_indirect_reg32 ||
12239             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12240              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12241               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12242                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12243
12244         /* Get eeprom hw config before calling tg3_set_power_state().
12245          * In particular, the TG3_FLG2_IS_NIC flag must be
12246          * determined before calling tg3_set_power_state() so that
12247          * we know whether or not to switch out of Vaux power.
12248          * When the flag is set, it means that GPIO1 is used for eeprom
12249          * write protect and also implies that it is a LOM where GPIOs
12250          * are not used to switch power.
12251          */
12252         tg3_get_eeprom_hw_cfg(tp);
12253
12254         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12255                 /* Allow reads and writes to the
12256                  * APE register and memory space.
12257                  */
12258                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12259                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12260                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12261                                        pci_state_reg);
12262         }
12263
12264         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12265             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12267                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12268
12269                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12270                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12271                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12272                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12273                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12274         }
12275
12276         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12277          * GPIO1 driven high will bring 5700's external PHY out of reset.
12278          * It is also used as eeprom write protect on LOMs.
12279          */
12280         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12281         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12282             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12283                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12284                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12285         /* Unused GPIO3 must be driven as output on 5752 because there
12286          * are no pull-up resistors on unused GPIO pins.
12287          */
12288         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12289                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12290
12291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12292                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12293
12294         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12295                 /* Turn off the debug UART. */
12296                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12297                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12298                         /* Keep VMain power. */
12299                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12300                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12301         }
12302
12303         /* Force the chip into D0. */
12304         err = tg3_set_power_state(tp, PCI_D0);
12305         if (err) {
12306                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12307                        pci_name(tp->pdev));
12308                 return err;
12309         }
12310
12311         /* 5700 B0 chips do not support checksumming correctly due
12312          * to hardware bugs.
12313          */
12314         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12315                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12316
12317         /* Derive initial jumbo mode from MTU assigned in
12318          * ether_setup() via the alloc_etherdev() call
12319          */
12320         if (tp->dev->mtu > ETH_DATA_LEN &&
12321             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12322                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12323
12324         /* Determine WakeOnLan speed to use. */
12325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12326             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12327             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12328             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12329                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12330         } else {
12331                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12332         }
12333
12334         /* A few boards don't want Ethernet@WireSpeed phy feature */
12335         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12336             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12337              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12338              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12339             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12340             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12341                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12342
12343         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12344             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12345                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12346         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12347                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12348
12349         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12351                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12352                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12353                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12354                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12355                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12356                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12357                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12358                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12359                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12360                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12361                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12362         }
12363
12364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12365             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12366                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12367                 if (tp->phy_otp == 0)
12368                         tp->phy_otp = TG3_OTP_DEFAULT;
12369         }
12370
12371         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12372                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12373         else
12374                 tp->mi_mode = MAC_MI_MODE_BASE;
12375
12376         tp->coalesce_mode = 0;
12377         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12378             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12379                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12380
12381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12382                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12383
12384         err = tg3_mdio_init(tp);
12385         if (err)
12386                 return err;
12387
12388         /* Initialize data/descriptor byte/word swapping. */
12389         val = tr32(GRC_MODE);
12390         val &= GRC_MODE_HOST_STACKUP;
12391         tw32(GRC_MODE, val | tp->grc_mode);
12392
12393         tg3_switch_clocks(tp);
12394
12395         /* Clear this out for sanity. */
12396         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12397
12398         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12399                               &pci_state_reg);
12400         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12401             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12402                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12403
12404                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12405                     chiprevid == CHIPREV_ID_5701_B0 ||
12406                     chiprevid == CHIPREV_ID_5701_B2 ||
12407                     chiprevid == CHIPREV_ID_5701_B5) {
12408                         void __iomem *sram_base;
12409
12410                         /* Write some dummy words into the SRAM status block
12411                          * area, see if it reads back correctly.  If the return
12412                          * value is bad, force enable the PCIX workaround.
12413                          */
12414                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12415
12416                         writel(0x00000000, sram_base);
12417                         writel(0x00000000, sram_base + 4);
12418                         writel(0xffffffff, sram_base + 4);
12419                         if (readl(sram_base) != 0x00000000)
12420                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12421                 }
12422         }
12423
12424         udelay(50);
12425         tg3_nvram_init(tp);
12426
12427         grc_misc_cfg = tr32(GRC_MISC_CFG);
12428         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12429
12430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12431             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12432              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12433                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12434
12435         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12436             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12437                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12438         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12439                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12440                                       HOSTCC_MODE_CLRTICK_TXBD);
12441
12442                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12443                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12444                                        tp->misc_host_ctrl);
12445         }
12446
12447         /* Preserve the APE MAC_MODE bits */
12448         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12449                 tp->mac_mode = tr32(MAC_MODE) |
12450                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12451         else
12452                 tp->mac_mode = TG3_DEF_MAC_MODE;
12453
12454         /* these are limited to 10/100 only */
12455         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12456              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12457             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12458              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12459              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12460               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12461               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12462             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12463              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12464               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12465               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12466             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12467                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12468
12469         err = tg3_phy_probe(tp);
12470         if (err) {
12471                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12472                        pci_name(tp->pdev), err);
12473                 /* ... but do not return immediately ... */
12474                 tg3_mdio_fini(tp);
12475         }
12476
12477         tg3_read_partno(tp);
12478         tg3_read_fw_ver(tp);
12479
12480         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12481                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12482         } else {
12483                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12484                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12485                 else
12486                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12487         }
12488
12489         /* 5700 {AX,BX} chips have a broken status block link
12490          * change bit implementation, so we must use the
12491          * status register in those cases.
12492          */
12493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12494                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12495         else
12496                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12497
12498         /* The led_ctrl is set during tg3_phy_probe, here we might
12499          * have to force the link status polling mechanism based
12500          * upon subsystem IDs.
12501          */
12502         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12503             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12504             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12505                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12506                                   TG3_FLAG_USE_LINKCHG_REG);
12507         }
12508
12509         /* For all SERDES we poll the MAC status register. */
12510         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12511                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12512         else
12513                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12514
12515         /* All chips before 5787 can get confused if TX buffers
12516          * straddle the 4GB address boundary in some cases.
12517          */
12518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12524                 tp->dev->hard_start_xmit = tg3_start_xmit;
12525         else
12526                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12527
12528         tp->rx_offset = 2;
12529         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12530             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12531                 tp->rx_offset = 0;
12532
12533         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12534
12535         /* Increment the rx prod index on the rx std ring by at most
12536          * 8 for these chips to workaround hw errata.
12537          */
12538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12539             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12540             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12541                 tp->rx_std_max_post = 8;
12542
12543         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12544                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12545                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12546
12547         return err;
12548 }
12549
12550 #ifdef CONFIG_SPARC
12551 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12552 {
12553         struct net_device *dev = tp->dev;
12554         struct pci_dev *pdev = tp->pdev;
12555         struct device_node *dp = pci_device_to_OF_node(pdev);
12556         const unsigned char *addr;
12557         int len;
12558
12559         addr = of_get_property(dp, "local-mac-address", &len);
12560         if (addr && len == 6) {
12561                 memcpy(dev->dev_addr, addr, 6);
12562                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12563                 return 0;
12564         }
12565         return -ENODEV;
12566 }
12567
12568 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12569 {
12570         struct net_device *dev = tp->dev;
12571
12572         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12573         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12574         return 0;
12575 }
12576 #endif
12577
12578 static int __devinit tg3_get_device_address(struct tg3 *tp)
12579 {
12580         struct net_device *dev = tp->dev;
12581         u32 hi, lo, mac_offset;
12582         int addr_ok = 0;
12583
12584 #ifdef CONFIG_SPARC
12585         if (!tg3_get_macaddr_sparc(tp))
12586                 return 0;
12587 #endif
12588
12589         mac_offset = 0x7c;
12590         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12591             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12592                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12593                         mac_offset = 0xcc;
12594                 if (tg3_nvram_lock(tp))
12595                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12596                 else
12597                         tg3_nvram_unlock(tp);
12598         }
12599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12600                 mac_offset = 0x10;
12601
12602         /* First try to get it from MAC address mailbox. */
12603         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12604         if ((hi >> 16) == 0x484b) {
12605                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12606                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12607
12608                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12609                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12610                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12611                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12612                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12613
12614                 /* Some old bootcode may report a 0 MAC address in SRAM */
12615                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12616         }
12617         if (!addr_ok) {
12618                 /* Next, try NVRAM. */
12619                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12620                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12621                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12622                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12623                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12624                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12625                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12626                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12627                 }
12628                 /* Finally just fetch it out of the MAC control regs. */
12629                 else {
12630                         hi = tr32(MAC_ADDR_0_HIGH);
12631                         lo = tr32(MAC_ADDR_0_LOW);
12632
12633                         dev->dev_addr[5] = lo & 0xff;
12634                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12635                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12636                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12637                         dev->dev_addr[1] = hi & 0xff;
12638                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12639                 }
12640         }
12641
12642         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12643 #ifdef CONFIG_SPARC
12644                 if (!tg3_get_default_macaddr_sparc(tp))
12645                         return 0;
12646 #endif
12647                 return -EINVAL;
12648         }
12649         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12650         return 0;
12651 }
12652
12653 #define BOUNDARY_SINGLE_CACHELINE       1
12654 #define BOUNDARY_MULTI_CACHELINE        2
12655
12656 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12657 {
12658         int cacheline_size;
12659         u8 byte;
12660         int goal;
12661
12662         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12663         if (byte == 0)
12664                 cacheline_size = 1024;
12665         else
12666                 cacheline_size = (int) byte * 4;
12667
12668         /* On 5703 and later chips, the boundary bits have no
12669          * effect.
12670          */
12671         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12672             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12673             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12674                 goto out;
12675
12676 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12677         goal = BOUNDARY_MULTI_CACHELINE;
12678 #else
12679 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12680         goal = BOUNDARY_SINGLE_CACHELINE;
12681 #else
12682         goal = 0;
12683 #endif
12684 #endif
12685
12686         if (!goal)
12687                 goto out;
12688
12689         /* PCI controllers on most RISC systems tend to disconnect
12690          * when a device tries to burst across a cache-line boundary.
12691          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12692          *
12693          * Unfortunately, for PCI-E there are only limited
12694          * write-side controls for this, and thus for reads
12695          * we will still get the disconnects.  We'll also waste
12696          * these PCI cycles for both read and write for chips
12697          * other than 5700 and 5701 which do not implement the
12698          * boundary bits.
12699          */
12700         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12701             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12702                 switch (cacheline_size) {
12703                 case 16:
12704                 case 32:
12705                 case 64:
12706                 case 128:
12707                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12708                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12709                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12710                         } else {
12711                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12712                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12713                         }
12714                         break;
12715
12716                 case 256:
12717                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12718                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12719                         break;
12720
12721                 default:
12722                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12723                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12724                         break;
12725                 }
12726         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12727                 switch (cacheline_size) {
12728                 case 16:
12729                 case 32:
12730                 case 64:
12731                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12732                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12733                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12734                                 break;
12735                         }
12736                         /* fallthrough */
12737                 case 128:
12738                 default:
12739                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12740                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12741                         break;
12742                 }
12743         } else {
12744                 switch (cacheline_size) {
12745                 case 16:
12746                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12747                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12748                                         DMA_RWCTRL_WRITE_BNDRY_16);
12749                                 break;
12750                         }
12751                         /* fallthrough */
12752                 case 32:
12753                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12754                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12755                                         DMA_RWCTRL_WRITE_BNDRY_32);
12756                                 break;
12757                         }
12758                         /* fallthrough */
12759                 case 64:
12760                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12761                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12762                                         DMA_RWCTRL_WRITE_BNDRY_64);
12763                                 break;
12764                         }
12765                         /* fallthrough */
12766                 case 128:
12767                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12768                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12769                                         DMA_RWCTRL_WRITE_BNDRY_128);
12770                                 break;
12771                         }
12772                         /* fallthrough */
12773                 case 256:
12774                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12775                                 DMA_RWCTRL_WRITE_BNDRY_256);
12776                         break;
12777                 case 512:
12778                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12779                                 DMA_RWCTRL_WRITE_BNDRY_512);
12780                         break;
12781                 case 1024:
12782                 default:
12783                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12784                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12785                         break;
12786                 }
12787         }
12788
12789 out:
12790         return val;
12791 }
12792
12793 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12794 {
12795         struct tg3_internal_buffer_desc test_desc;
12796         u32 sram_dma_descs;
12797         int i, ret;
12798
12799         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12800
12801         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12802         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12803         tw32(RDMAC_STATUS, 0);
12804         tw32(WDMAC_STATUS, 0);
12805
12806         tw32(BUFMGR_MODE, 0);
12807         tw32(FTQ_RESET, 0);
12808
12809         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12810         test_desc.addr_lo = buf_dma & 0xffffffff;
12811         test_desc.nic_mbuf = 0x00002100;
12812         test_desc.len = size;
12813
12814         /*
12815          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12816          * the *second* time the tg3 driver was getting loaded after an
12817          * initial scan.
12818          *
12819          * Broadcom tells me:
12820          *   ...the DMA engine is connected to the GRC block and a DMA
12821          *   reset may affect the GRC block in some unpredictable way...
12822          *   The behavior of resets to individual blocks has not been tested.
12823          *
12824          * Broadcom noted the GRC reset will also reset all sub-components.
12825          */
12826         if (to_device) {
12827                 test_desc.cqid_sqid = (13 << 8) | 2;
12828
12829                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12830                 udelay(40);
12831         } else {
12832                 test_desc.cqid_sqid = (16 << 8) | 7;
12833
12834                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12835                 udelay(40);
12836         }
12837         test_desc.flags = 0x00000005;
12838
12839         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12840                 u32 val;
12841
12842                 val = *(((u32 *)&test_desc) + i);
12843                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12844                                        sram_dma_descs + (i * sizeof(u32)));
12845                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12846         }
12847         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12848
12849         if (to_device) {
12850                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12851         } else {
12852                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12853         }
12854
12855         ret = -ENODEV;
12856         for (i = 0; i < 40; i++) {
12857                 u32 val;
12858
12859                 if (to_device)
12860                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12861                 else
12862                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12863                 if ((val & 0xffff) == sram_dma_descs) {
12864                         ret = 0;
12865                         break;
12866                 }
12867
12868                 udelay(100);
12869         }
12870
12871         return ret;
12872 }
12873
12874 #define TEST_BUFFER_SIZE        0x2000
12875
12876 static int __devinit tg3_test_dma(struct tg3 *tp)
12877 {
12878         dma_addr_t buf_dma;
12879         u32 *buf, saved_dma_rwctrl;
12880         int ret;
12881
12882         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12883         if (!buf) {
12884                 ret = -ENOMEM;
12885                 goto out_nofree;
12886         }
12887
12888         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12889                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12890
12891         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12892
12893         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12894                 /* DMA read watermark not used on PCIE */
12895                 tp->dma_rwctrl |= 0x00180000;
12896         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12898                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12899                         tp->dma_rwctrl |= 0x003f0000;
12900                 else
12901                         tp->dma_rwctrl |= 0x003f000f;
12902         } else {
12903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12904                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12905                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12906                         u32 read_water = 0x7;
12907
12908                         /* If the 5704 is behind the EPB bridge, we can
12909                          * do the less restrictive ONE_DMA workaround for
12910                          * better performance.
12911                          */
12912                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12913                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12914                                 tp->dma_rwctrl |= 0x8000;
12915                         else if (ccval == 0x6 || ccval == 0x7)
12916                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12917
12918                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12919                                 read_water = 4;
12920                         /* Set bit 23 to enable PCIX hw bug fix */
12921                         tp->dma_rwctrl |=
12922                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12923                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12924                                 (1 << 23);
12925                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12926                         /* 5780 always in PCIX mode */
12927                         tp->dma_rwctrl |= 0x00144000;
12928                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12929                         /* 5714 always in PCIX mode */
12930                         tp->dma_rwctrl |= 0x00148000;
12931                 } else {
12932                         tp->dma_rwctrl |= 0x001b000f;
12933                 }
12934         }
12935
12936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12938                 tp->dma_rwctrl &= 0xfffffff0;
12939
12940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12942                 /* Remove this if it causes problems for some boards. */
12943                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12944
12945                 /* On 5700/5701 chips, we need to set this bit.
12946                  * Otherwise the chip will issue cacheline transactions
12947                  * to streamable DMA memory with not all the byte
12948                  * enables turned on.  This is an error on several
12949                  * RISC PCI controllers, in particular sparc64.
12950                  *
12951                  * On 5703/5704 chips, this bit has been reassigned
12952                  * a different meaning.  In particular, it is used
12953                  * on those chips to enable a PCI-X workaround.
12954                  */
12955                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12956         }
12957
12958         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12959
12960 #if 0
12961         /* Unneeded, already done by tg3_get_invariants.  */
12962         tg3_switch_clocks(tp);
12963 #endif
12964
12965         ret = 0;
12966         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12967             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12968                 goto out;
12969
12970         /* It is best to perform DMA test with maximum write burst size
12971          * to expose the 5700/5701 write DMA bug.
12972          */
12973         saved_dma_rwctrl = tp->dma_rwctrl;
12974         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12975         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12976
12977         while (1) {
12978                 u32 *p = buf, i;
12979
12980                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12981                         p[i] = i;
12982
12983                 /* Send the buffer to the chip. */
12984                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12985                 if (ret) {
12986                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12987                         break;
12988                 }
12989
12990 #if 0
12991                 /* validate data reached card RAM correctly. */
12992                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12993                         u32 val;
12994                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12995                         if (le32_to_cpu(val) != p[i]) {
12996                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12997                                 /* ret = -ENODEV here? */
12998                         }
12999                         p[i] = 0;
13000                 }
13001 #endif
13002                 /* Now read it back. */
13003                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13004                 if (ret) {
13005                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13006
13007                         break;
13008                 }
13009
13010                 /* Verify it. */
13011                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13012                         if (p[i] == i)
13013                                 continue;
13014
13015                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13016                             DMA_RWCTRL_WRITE_BNDRY_16) {
13017                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13018                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13019                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13020                                 break;
13021                         } else {
13022                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13023                                 ret = -ENODEV;
13024                                 goto out;
13025                         }
13026                 }
13027
13028                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13029                         /* Success. */
13030                         ret = 0;
13031                         break;
13032                 }
13033         }
13034         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13035             DMA_RWCTRL_WRITE_BNDRY_16) {
13036                 static struct pci_device_id dma_wait_state_chipsets[] = {
13037                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13038                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13039                         { },
13040                 };
13041
13042                 /* DMA test passed without adjusting DMA boundary,
13043                  * now look for chipsets that are known to expose the
13044                  * DMA bug without failing the test.
13045                  */
13046                 if (pci_dev_present(dma_wait_state_chipsets)) {
13047                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13048                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13049                 }
13050                 else
13051                         /* Safe to use the calculated DMA boundary. */
13052                         tp->dma_rwctrl = saved_dma_rwctrl;
13053
13054                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13055         }
13056
13057 out:
13058         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13059 out_nofree:
13060         return ret;
13061 }
13062
13063 static void __devinit tg3_init_link_config(struct tg3 *tp)
13064 {
13065         tp->link_config.advertising =
13066                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13067                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13068                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13069                  ADVERTISED_Autoneg | ADVERTISED_MII);
13070         tp->link_config.speed = SPEED_INVALID;
13071         tp->link_config.duplex = DUPLEX_INVALID;
13072         tp->link_config.autoneg = AUTONEG_ENABLE;
13073         tp->link_config.active_speed = SPEED_INVALID;
13074         tp->link_config.active_duplex = DUPLEX_INVALID;
13075         tp->link_config.phy_is_low_power = 0;
13076         tp->link_config.orig_speed = SPEED_INVALID;
13077         tp->link_config.orig_duplex = DUPLEX_INVALID;
13078         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13079 }
13080
13081 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13082 {
13083         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13084                 tp->bufmgr_config.mbuf_read_dma_low_water =
13085                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13086                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13087                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13088                 tp->bufmgr_config.mbuf_high_water =
13089                         DEFAULT_MB_HIGH_WATER_5705;
13090                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13091                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13092                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13093                         tp->bufmgr_config.mbuf_high_water =
13094                                 DEFAULT_MB_HIGH_WATER_5906;
13095                 }
13096
13097                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13098                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13099                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13100                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13101                 tp->bufmgr_config.mbuf_high_water_jumbo =
13102                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13103         } else {
13104                 tp->bufmgr_config.mbuf_read_dma_low_water =
13105                         DEFAULT_MB_RDMA_LOW_WATER;
13106                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13107                         DEFAULT_MB_MACRX_LOW_WATER;
13108                 tp->bufmgr_config.mbuf_high_water =
13109                         DEFAULT_MB_HIGH_WATER;
13110
13111                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13112                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13113                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13114                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13115                 tp->bufmgr_config.mbuf_high_water_jumbo =
13116                         DEFAULT_MB_HIGH_WATER_JUMBO;
13117         }
13118
13119         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13120         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13121 }
13122
13123 static char * __devinit tg3_phy_string(struct tg3 *tp)
13124 {
13125         switch (tp->phy_id & PHY_ID_MASK) {
13126         case PHY_ID_BCM5400:    return "5400";
13127         case PHY_ID_BCM5401:    return "5401";
13128         case PHY_ID_BCM5411:    return "5411";
13129         case PHY_ID_BCM5701:    return "5701";
13130         case PHY_ID_BCM5703:    return "5703";
13131         case PHY_ID_BCM5704:    return "5704";
13132         case PHY_ID_BCM5705:    return "5705";
13133         case PHY_ID_BCM5750:    return "5750";
13134         case PHY_ID_BCM5752:    return "5752";
13135         case PHY_ID_BCM5714:    return "5714";
13136         case PHY_ID_BCM5780:    return "5780";
13137         case PHY_ID_BCM5755:    return "5755";
13138         case PHY_ID_BCM5787:    return "5787";
13139         case PHY_ID_BCM5784:    return "5784";
13140         case PHY_ID_BCM5756:    return "5722/5756";
13141         case PHY_ID_BCM5906:    return "5906";
13142         case PHY_ID_BCM5761:    return "5761";
13143         case PHY_ID_BCM8002:    return "8002/serdes";
13144         case 0:                 return "serdes";
13145         default:                return "unknown";
13146         }
13147 }
13148
13149 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13150 {
13151         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13152                 strcpy(str, "PCI Express");
13153                 return str;
13154         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13155                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13156
13157                 strcpy(str, "PCIX:");
13158
13159                 if ((clock_ctrl == 7) ||
13160                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13161                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13162                         strcat(str, "133MHz");
13163                 else if (clock_ctrl == 0)
13164                         strcat(str, "33MHz");
13165                 else if (clock_ctrl == 2)
13166                         strcat(str, "50MHz");
13167                 else if (clock_ctrl == 4)
13168                         strcat(str, "66MHz");
13169                 else if (clock_ctrl == 6)
13170                         strcat(str, "100MHz");
13171         } else {
13172                 strcpy(str, "PCI:");
13173                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13174                         strcat(str, "66MHz");
13175                 else
13176                         strcat(str, "33MHz");
13177         }
13178         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13179                 strcat(str, ":32-bit");
13180         else
13181                 strcat(str, ":64-bit");
13182         return str;
13183 }
13184
13185 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13186 {
13187         struct pci_dev *peer;
13188         unsigned int func, devnr = tp->pdev->devfn & ~7;
13189
13190         for (func = 0; func < 8; func++) {
13191                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13192                 if (peer && peer != tp->pdev)
13193                         break;
13194                 pci_dev_put(peer);
13195         }
13196         /* 5704 can be configured in single-port mode, set peer to
13197          * tp->pdev in that case.
13198          */
13199         if (!peer) {
13200                 peer = tp->pdev;
13201                 return peer;
13202         }
13203
13204         /*
13205          * We don't need to keep the refcount elevated; there's no way
13206          * to remove one half of this device without removing the other
13207          */
13208         pci_dev_put(peer);
13209
13210         return peer;
13211 }
13212
13213 static void __devinit tg3_init_coal(struct tg3 *tp)
13214 {
13215         struct ethtool_coalesce *ec = &tp->coal;
13216
13217         memset(ec, 0, sizeof(*ec));
13218         ec->cmd = ETHTOOL_GCOALESCE;
13219         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13220         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13221         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13222         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13223         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13224         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13225         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13226         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13227         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13228
13229         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13230                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13231                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13232                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13233                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13234                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13235         }
13236
13237         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13238                 ec->rx_coalesce_usecs_irq = 0;
13239                 ec->tx_coalesce_usecs_irq = 0;
13240                 ec->stats_block_coalesce_usecs = 0;
13241         }
13242 }
13243
13244 static int __devinit tg3_init_one(struct pci_dev *pdev,
13245                                   const struct pci_device_id *ent)
13246 {
13247         static int tg3_version_printed = 0;
13248         resource_size_t tg3reg_base;
13249         unsigned long tg3reg_len;
13250         struct net_device *dev;
13251         struct tg3 *tp;
13252         int err, pm_cap;
13253         char str[40];
13254         u64 dma_mask, persist_dma_mask;
13255         DECLARE_MAC_BUF(mac);
13256
13257         if (tg3_version_printed++ == 0)
13258                 printk(KERN_INFO "%s", version);
13259
13260         err = pci_enable_device(pdev);
13261         if (err) {
13262                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13263                        "aborting.\n");
13264                 return err;
13265         }
13266
13267         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13268                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13269                        "base address, aborting.\n");
13270                 err = -ENODEV;
13271                 goto err_out_disable_pdev;
13272         }
13273
13274         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13275         if (err) {
13276                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13277                        "aborting.\n");
13278                 goto err_out_disable_pdev;
13279         }
13280
13281         pci_set_master(pdev);
13282
13283         /* Find power-management capability. */
13284         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13285         if (pm_cap == 0) {
13286                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13287                        "aborting.\n");
13288                 err = -EIO;
13289                 goto err_out_free_res;
13290         }
13291
13292         tg3reg_base = pci_resource_start(pdev, 0);
13293         tg3reg_len = pci_resource_len(pdev, 0);
13294
13295         dev = alloc_etherdev(sizeof(*tp));
13296         if (!dev) {
13297                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13298                 err = -ENOMEM;
13299                 goto err_out_free_res;
13300         }
13301
13302         SET_NETDEV_DEV(dev, &pdev->dev);
13303
13304 #if TG3_VLAN_TAG_USED
13305         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13306         dev->vlan_rx_register = tg3_vlan_rx_register;
13307 #endif
13308
13309         tp = netdev_priv(dev);
13310         tp->pdev = pdev;
13311         tp->dev = dev;
13312         tp->pm_cap = pm_cap;
13313         tp->rx_mode = TG3_DEF_RX_MODE;
13314         tp->tx_mode = TG3_DEF_TX_MODE;
13315
13316         if (tg3_debug > 0)
13317                 tp->msg_enable = tg3_debug;
13318         else
13319                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13320
13321         /* The word/byte swap controls here control register access byte
13322          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13323          * setting below.
13324          */
13325         tp->misc_host_ctrl =
13326                 MISC_HOST_CTRL_MASK_PCI_INT |
13327                 MISC_HOST_CTRL_WORD_SWAP |
13328                 MISC_HOST_CTRL_INDIR_ACCESS |
13329                 MISC_HOST_CTRL_PCISTATE_RW;
13330
13331         /* The NONFRM (non-frame) byte/word swap controls take effect
13332          * on descriptor entries, anything which isn't packet data.
13333          *
13334          * The StrongARM chips on the board (one for tx, one for rx)
13335          * are running in big-endian mode.
13336          */
13337         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13338                         GRC_MODE_WSWAP_NONFRM_DATA);
13339 #ifdef __BIG_ENDIAN
13340         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13341 #endif
13342         spin_lock_init(&tp->lock);
13343         spin_lock_init(&tp->indirect_lock);
13344         INIT_WORK(&tp->reset_task, tg3_reset_task);
13345
13346         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13347         if (!tp->regs) {
13348                 printk(KERN_ERR PFX "Cannot map device registers, "
13349                        "aborting.\n");
13350                 err = -ENOMEM;
13351                 goto err_out_free_dev;
13352         }
13353
13354         tg3_init_link_config(tp);
13355
13356         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13357         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13358         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13359
13360         dev->open = tg3_open;
13361         dev->stop = tg3_close;
13362         dev->get_stats = tg3_get_stats;
13363         dev->set_multicast_list = tg3_set_rx_mode;
13364         dev->set_mac_address = tg3_set_mac_addr;
13365         dev->do_ioctl = tg3_ioctl;
13366         dev->tx_timeout = tg3_tx_timeout;
13367         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13368         dev->ethtool_ops = &tg3_ethtool_ops;
13369         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13370         dev->change_mtu = tg3_change_mtu;
13371         dev->irq = pdev->irq;
13372 #ifdef CONFIG_NET_POLL_CONTROLLER
13373         dev->poll_controller = tg3_poll_controller;
13374 #endif
13375
13376         err = tg3_get_invariants(tp);
13377         if (err) {
13378                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13379                        "aborting.\n");
13380                 goto err_out_iounmap;
13381         }
13382
13383         /* The EPB bridge inside 5714, 5715, and 5780 and any
13384          * device behind the EPB cannot support DMA addresses > 40-bit.
13385          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13386          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13387          * do DMA address check in tg3_start_xmit().
13388          */
13389         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13390                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13391         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13392                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13393 #ifdef CONFIG_HIGHMEM
13394                 dma_mask = DMA_64BIT_MASK;
13395 #endif
13396         } else
13397                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13398
13399         /* Configure DMA attributes. */
13400         if (dma_mask > DMA_32BIT_MASK) {
13401                 err = pci_set_dma_mask(pdev, dma_mask);
13402                 if (!err) {
13403                         dev->features |= NETIF_F_HIGHDMA;
13404                         err = pci_set_consistent_dma_mask(pdev,
13405                                                           persist_dma_mask);
13406                         if (err < 0) {
13407                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13408                                        "DMA for consistent allocations\n");
13409                                 goto err_out_iounmap;
13410                         }
13411                 }
13412         }
13413         if (err || dma_mask == DMA_32BIT_MASK) {
13414                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13415                 if (err) {
13416                         printk(KERN_ERR PFX "No usable DMA configuration, "
13417                                "aborting.\n");
13418                         goto err_out_iounmap;
13419                 }
13420         }
13421
13422         tg3_init_bufmgr_config(tp);
13423
13424         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13425                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13426         }
13427         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13429             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13430             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13431             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13432                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13433         } else {
13434                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13435         }
13436
13437         /* TSO is on by default on chips that support hardware TSO.
13438          * Firmware TSO on older chips gives lower performance, so it
13439          * is off by default, but can be enabled using ethtool.
13440          */
13441         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13442                 dev->features |= NETIF_F_TSO;
13443                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13444                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13445                         dev->features |= NETIF_F_TSO6;
13446                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13447                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13448                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13449                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13450                         dev->features |= NETIF_F_TSO_ECN;
13451         }
13452
13453
13454         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13455             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13456             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13457                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13458                 tp->rx_pending = 63;
13459         }
13460
13461         err = tg3_get_device_address(tp);
13462         if (err) {
13463                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13464                        "aborting.\n");
13465                 goto err_out_iounmap;
13466         }
13467
13468         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13469                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13470                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13471                                "base address for APE, aborting.\n");
13472                         err = -ENODEV;
13473                         goto err_out_iounmap;
13474                 }
13475
13476                 tg3reg_base = pci_resource_start(pdev, 2);
13477                 tg3reg_len = pci_resource_len(pdev, 2);
13478
13479                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13480                 if (!tp->aperegs) {
13481                         printk(KERN_ERR PFX "Cannot map APE registers, "
13482                                "aborting.\n");
13483                         err = -ENOMEM;
13484                         goto err_out_iounmap;
13485                 }
13486
13487                 tg3_ape_lock_init(tp);
13488         }
13489
13490         /*
13491          * Reset chip in case UNDI or EFI driver did not shutdown
13492          * DMA self test will enable WDMAC and we'll see (spurious)
13493          * pending DMA on the PCI bus at that point.
13494          */
13495         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13496             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13497                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13498                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13499         }
13500
13501         err = tg3_test_dma(tp);
13502         if (err) {
13503                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13504                 goto err_out_apeunmap;
13505         }
13506
13507         /* Tigon3 can do ipv4 only... and some chips have buggy
13508          * checksumming.
13509          */
13510         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13511                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13512                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13513                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13514                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13515                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13517                         dev->features |= NETIF_F_IPV6_CSUM;
13518
13519                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13520         } else
13521                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13522
13523         /* flow control autonegotiation is default behavior */
13524         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13525         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13526
13527         tg3_init_coal(tp);
13528
13529         pci_set_drvdata(pdev, dev);
13530
13531         err = register_netdev(dev);
13532         if (err) {
13533                 printk(KERN_ERR PFX "Cannot register net device, "
13534                        "aborting.\n");
13535                 goto err_out_apeunmap;
13536         }
13537
13538         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13539                "(%s) %s Ethernet %s\n",
13540                dev->name,
13541                tp->board_part_number,
13542                tp->pci_chip_rev_id,
13543                tg3_phy_string(tp),
13544                tg3_bus_string(tp, str),
13545                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13546                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13547                  "10/100/1000Base-T")),
13548                print_mac(mac, dev->dev_addr));
13549
13550         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13551                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13552                dev->name,
13553                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13554                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13555                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13556                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13557                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13558                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13559         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13560                dev->name, tp->dma_rwctrl,
13561                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13562                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13563
13564         return 0;
13565
13566 err_out_apeunmap:
13567         if (tp->aperegs) {
13568                 iounmap(tp->aperegs);
13569                 tp->aperegs = NULL;
13570         }
13571
13572 err_out_iounmap:
13573         if (tp->regs) {
13574                 iounmap(tp->regs);
13575                 tp->regs = NULL;
13576         }
13577
13578 err_out_free_dev:
13579         free_netdev(dev);
13580
13581 err_out_free_res:
13582         pci_release_regions(pdev);
13583
13584 err_out_disable_pdev:
13585         pci_disable_device(pdev);
13586         pci_set_drvdata(pdev, NULL);
13587         return err;
13588 }
13589
13590 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13591 {
13592         struct net_device *dev = pci_get_drvdata(pdev);
13593
13594         if (dev) {
13595                 struct tg3 *tp = netdev_priv(dev);
13596
13597                 flush_scheduled_work();
13598
13599                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13600                         tg3_phy_fini(tp);
13601                         tg3_mdio_fini(tp);
13602                 }
13603
13604                 unregister_netdev(dev);
13605                 if (tp->aperegs) {
13606                         iounmap(tp->aperegs);
13607                         tp->aperegs = NULL;
13608                 }
13609                 if (tp->regs) {
13610                         iounmap(tp->regs);
13611                         tp->regs = NULL;
13612                 }
13613                 free_netdev(dev);
13614                 pci_release_regions(pdev);
13615                 pci_disable_device(pdev);
13616                 pci_set_drvdata(pdev, NULL);
13617         }
13618 }
13619
13620 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13621 {
13622         struct net_device *dev = pci_get_drvdata(pdev);
13623         struct tg3 *tp = netdev_priv(dev);
13624         pci_power_t target_state;
13625         int err;
13626
13627         /* PCI register 4 needs to be saved whether netif_running() or not.
13628          * MSI address and data need to be saved if using MSI and
13629          * netif_running().
13630          */
13631         pci_save_state(pdev);
13632
13633         if (!netif_running(dev))
13634                 return 0;
13635
13636         flush_scheduled_work();
13637         tg3_phy_stop(tp);
13638         tg3_netif_stop(tp);
13639
13640         del_timer_sync(&tp->timer);
13641
13642         tg3_full_lock(tp, 1);
13643         tg3_disable_ints(tp);
13644         tg3_full_unlock(tp);
13645
13646         netif_device_detach(dev);
13647
13648         tg3_full_lock(tp, 0);
13649         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13650         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13651         tg3_full_unlock(tp);
13652
13653         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13654
13655         err = tg3_set_power_state(tp, target_state);
13656         if (err) {
13657                 int err2;
13658
13659                 tg3_full_lock(tp, 0);
13660
13661                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13662                 err2 = tg3_restart_hw(tp, 1);
13663                 if (err2)
13664                         goto out;
13665
13666                 tp->timer.expires = jiffies + tp->timer_offset;
13667                 add_timer(&tp->timer);
13668
13669                 netif_device_attach(dev);
13670                 tg3_netif_start(tp);
13671
13672 out:
13673                 tg3_full_unlock(tp);
13674
13675                 if (!err2)
13676                         tg3_phy_start(tp);
13677         }
13678
13679         return err;
13680 }
13681
13682 static int tg3_resume(struct pci_dev *pdev)
13683 {
13684         struct net_device *dev = pci_get_drvdata(pdev);
13685         struct tg3 *tp = netdev_priv(dev);
13686         int err;
13687
13688         pci_restore_state(tp->pdev);
13689
13690         if (!netif_running(dev))
13691                 return 0;
13692
13693         err = tg3_set_power_state(tp, PCI_D0);
13694         if (err)
13695                 return err;
13696
13697         netif_device_attach(dev);
13698
13699         tg3_full_lock(tp, 0);
13700
13701         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13702         err = tg3_restart_hw(tp, 1);
13703         if (err)
13704                 goto out;
13705
13706         tp->timer.expires = jiffies + tp->timer_offset;
13707         add_timer(&tp->timer);
13708
13709         tg3_netif_start(tp);
13710
13711 out:
13712         tg3_full_unlock(tp);
13713
13714         if (!err)
13715                 tg3_phy_start(tp);
13716
13717         return err;
13718 }
13719
13720 static struct pci_driver tg3_driver = {
13721         .name           = DRV_MODULE_NAME,
13722         .id_table       = tg3_pci_tbl,
13723         .probe          = tg3_init_one,
13724         .remove         = __devexit_p(tg3_remove_one),
13725         .suspend        = tg3_suspend,
13726         .resume         = tg3_resume
13727 };
13728
13729 static int __init tg3_init(void)
13730 {
13731         return pci_register_driver(&tg3_driver);
13732 }
13733
13734 static void __exit tg3_cleanup(void)
13735 {
13736         pci_unregister_driver(&tg3_driver);
13737 }
13738
13739 module_init(tg3_init);
13740 module_exit(tg3_cleanup);