]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
tg3: Preserve DASH connectivity when WOL enabled
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.94"
73 #define DRV_MODULE_RELDATE      "August 14, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221         {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227         const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229         { "rx_octets" },
230         { "rx_fragments" },
231         { "rx_ucast_packets" },
232         { "rx_mcast_packets" },
233         { "rx_bcast_packets" },
234         { "rx_fcs_errors" },
235         { "rx_align_errors" },
236         { "rx_xon_pause_rcvd" },
237         { "rx_xoff_pause_rcvd" },
238         { "rx_mac_ctrl_rcvd" },
239         { "rx_xoff_entered" },
240         { "rx_frame_too_long_errors" },
241         { "rx_jabbers" },
242         { "rx_undersize_packets" },
243         { "rx_in_length_errors" },
244         { "rx_out_length_errors" },
245         { "rx_64_or_less_octet_packets" },
246         { "rx_65_to_127_octet_packets" },
247         { "rx_128_to_255_octet_packets" },
248         { "rx_256_to_511_octet_packets" },
249         { "rx_512_to_1023_octet_packets" },
250         { "rx_1024_to_1522_octet_packets" },
251         { "rx_1523_to_2047_octet_packets" },
252         { "rx_2048_to_4095_octet_packets" },
253         { "rx_4096_to_8191_octet_packets" },
254         { "rx_8192_to_9022_octet_packets" },
255
256         { "tx_octets" },
257         { "tx_collisions" },
258
259         { "tx_xon_sent" },
260         { "tx_xoff_sent" },
261         { "tx_flow_control" },
262         { "tx_mac_errors" },
263         { "tx_single_collisions" },
264         { "tx_mult_collisions" },
265         { "tx_deferred" },
266         { "tx_excessive_collisions" },
267         { "tx_late_collisions" },
268         { "tx_collide_2times" },
269         { "tx_collide_3times" },
270         { "tx_collide_4times" },
271         { "tx_collide_5times" },
272         { "tx_collide_6times" },
273         { "tx_collide_7times" },
274         { "tx_collide_8times" },
275         { "tx_collide_9times" },
276         { "tx_collide_10times" },
277         { "tx_collide_11times" },
278         { "tx_collide_12times" },
279         { "tx_collide_13times" },
280         { "tx_collide_14times" },
281         { "tx_collide_15times" },
282         { "tx_ucast_packets" },
283         { "tx_mcast_packets" },
284         { "tx_bcast_packets" },
285         { "tx_carrier_sense_errors" },
286         { "tx_discards" },
287         { "tx_errors" },
288
289         { "dma_writeq_full" },
290         { "dma_write_prioq_full" },
291         { "rxbds_empty" },
292         { "rx_discards" },
293         { "rx_errors" },
294         { "rx_threshold_hit" },
295
296         { "dma_readq_full" },
297         { "dma_read_prioq_full" },
298         { "tx_comp_queue_full" },
299
300         { "ring_set_send_prod_index" },
301         { "ring_status_update" },
302         { "nic_irqs" },
303         { "nic_avoided_irqs" },
304         { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308         const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310         { "nvram test     (online) " },
311         { "link test      (online) " },
312         { "register test  (offline)" },
313         { "memory test    (offline)" },
314         { "loopback test  (offline)" },
315         { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320         writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325         return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335         return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long flags;
341
342         spin_lock_irqsave(&tp->indirect_lock, flags);
343         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345         spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350         writel(val, tp->regs + off);
351         readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356         unsigned long flags;
357         u32 val;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386         /* In indirect mode when disabling interrupts, we also need
387          * to clear the interrupt bit in the GRC local ctrl register.
388          */
389         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390             (val == 0x1)) {
391                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393         }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398         unsigned long flags;
399         u32 val;
400
401         spin_lock_irqsave(&tp->indirect_lock, flags);
402         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405         return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409  * where it is unsafe to read back the register without some delay.
410  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412  */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 /* Non-posted methods */
418                 tp->write32(tp, off, val);
419         else {
420                 /* Posted method */
421                 tg3_write32(tp, off, val);
422                 if (usec_wait)
423                         udelay(usec_wait);
424                 tp->read32(tp, off);
425         }
426         /* Wait again after the read for the posted method to guarantee that
427          * the wait time is met.
428          */
429         if (usec_wait)
430                 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435         tp->write32_mbox(tp, off, val);
436         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443         void __iomem *mbox = tp->regs + off;
444         writel(val, mbox);
445         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446                 writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448                 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453         return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val)           tp->write32(tp, reg, val)
468 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg)               tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474         unsigned long flags;
475
476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478                 return;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485                 /* Always leave this as zero. */
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487         } else {
488                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         }
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499         unsigned long flags;
500
501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503                 *val = 0;
504                 return;
505         }
506
507         spin_lock_irqsave(&tp->indirect_lock, flags);
508         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         } else {
515                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518                 /* Always leave this as zero. */
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         }
521         spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526         int i;
527
528         /* Make sure the driver hasn't any stale locks. */
529         for (i = 0; i < 8; i++)
530                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531                                 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536         int i, off;
537         int ret = 0;
538         u32 status;
539
540         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541                 return 0;
542
543         switch (locknum) {
544                 case TG3_APE_LOCK_GRC:
545                 case TG3_APE_LOCK_MEM:
546                         break;
547                 default:
548                         return -EINVAL;
549         }
550
551         off = 4 * locknum;
552
553         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555         /* Wait for up to 1 millisecond to acquire lock. */
556         for (i = 0; i < 100; i++) {
557                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558                 if (status == APE_LOCK_GRANT_DRIVER)
559                         break;
560                 udelay(10);
561         }
562
563         if (status != APE_LOCK_GRANT_DRIVER) {
564                 /* Revoke the lock request. */
565                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566                                 APE_LOCK_GRANT_DRIVER);
567
568                 ret = -EBUSY;
569         }
570
571         return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576         int off;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return;
587         }
588
589         off = 4 * locknum;
590         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595         tw32(TG3PCI_MISC_HOST_CTRL,
596              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             (tp->hw_status->status & SD_STATUS_UPDATED))
604                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605         else
606                 tw32(HOSTCC_MODE, tp->coalesce_mode |
607                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612         tp->irq_sync = 0;
613         wmb();
614
615         tw32(TG3PCI_MISC_HOST_CTRL,
616              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                        (tp->last_tag << 24));
619         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621                                (tp->last_tag << 24));
622         tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627         struct tg3_hw_status *sblk = tp->hw_status;
628         unsigned int work_exists = 0;
629
630         /* check for phy events */
631         if (!(tp->tg3_flags &
632               (TG3_FLAG_USE_LINKCHG_REG |
633                TG3_FLAG_POLL_SERDES))) {
634                 if (sblk->status & SD_STATUS_LINK_CHG)
635                         work_exists = 1;
636         }
637         /* check for RX/TX work to do */
638         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640                 work_exists = 1;
641
642         return work_exists;
643 }
644
645 /* tg3_restart_ints
646  *  similar to tg3_enable_ints, but it accurately determines whether there
647  *  is new work pending and can return without flushing the PIO write
648  *  which reenables interrupts
649  */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653                      tp->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661             tg3_has_work(tp))
662                 tw32(HOSTCC_MODE, tp->coalesce_mode |
663                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668         tp->dev->trans_start = jiffies; /* prevent tx timeout */
669         napi_disable(&tp->napi);
670         netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675         netif_wake_queue(tp->dev);
676         /* NOTE: unconditional netif_wake_queue is only appropriate
677          * so long as all callers are assured to have free tx slots
678          * (such as after tg3_init_hw)
679          */
680         napi_enable(&tp->napi);
681         tp->hw_status->status |= SD_STATUS_UPDATED;
682         tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688         u32 orig_clock_ctrl;
689
690         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692                 return;
693
694         orig_clock_ctrl = clock_ctrl;
695         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696                        CLOCK_CTRL_CLKRUN_OENABLE |
697                        0x1f);
698         tp->pci_clock_ctrl = clock_ctrl;
699
700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
703                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704                 }
705         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                             clock_ctrl |
708                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709                             40);
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
712                             40);
713         }
714         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS  5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721         u32 frame_val;
722         unsigned int loops;
723         int ret;
724
725         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726                 tw32_f(MAC_MI_MODE,
727                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728                 udelay(80);
729         }
730
731         *val = 0x0;
732
733         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734                       MI_COM_PHY_ADDR_MASK);
735         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736                       MI_COM_REG_ADDR_MASK);
737         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739         tw32_f(MAC_MI_COM, frame_val);
740
741         loops = PHY_BUSY_LOOPS;
742         while (loops != 0) {
743                 udelay(10);
744                 frame_val = tr32(MAC_MI_COM);
745
746                 if ((frame_val & MI_COM_BUSY) == 0) {
747                         udelay(5);
748                         frame_val = tr32(MAC_MI_COM);
749                         break;
750                 }
751                 loops -= 1;
752         }
753
754         ret = -EBUSY;
755         if (loops != 0) {
756                 *val = frame_val & MI_COM_DATA_MASK;
757                 ret = 0;
758         }
759
760         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761                 tw32_f(MAC_MI_MODE, tp->mi_mode);
762                 udelay(80);
763         }
764
765         return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770         u32 frame_val;
771         unsigned int loops;
772         int ret;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776                 return 0;
777
778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779                 tw32_f(MAC_MI_MODE,
780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781                 udelay(80);
782         }
783
784         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785                       MI_COM_PHY_ADDR_MASK);
786         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787                       MI_COM_REG_ADDR_MASK);
788         frame_val |= (val & MI_COM_DATA_MASK);
789         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791         tw32_f(MAC_MI_COM, frame_val);
792
793         loops = PHY_BUSY_LOOPS;
794         while (loops != 0) {
795                 udelay(10);
796                 frame_val = tr32(MAC_MI_COM);
797                 if ((frame_val & MI_COM_BUSY) == 0) {
798                         udelay(5);
799                         frame_val = tr32(MAC_MI_COM);
800                         break;
801                 }
802                 loops -= 1;
803         }
804
805         ret = -EBUSY;
806         if (loops != 0)
807                 ret = 0;
808
809         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810                 tw32_f(MAC_MI_MODE, tp->mi_mode);
811                 udelay(80);
812         }
813
814         return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819         u32 phy_control;
820         int limit, err;
821
822         /* OK, reset it, and poll the BMCR_RESET bit until it
823          * clears or we time out.
824          */
825         phy_control = BMCR_RESET;
826         err = tg3_writephy(tp, MII_BMCR, phy_control);
827         if (err != 0)
828                 return -EBUSY;
829
830         limit = 5000;
831         while (limit--) {
832                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833                 if (err != 0)
834                         return -EBUSY;
835
836                 if ((phy_control & BMCR_RESET) == 0) {
837                         udelay(40);
838                         break;
839                 }
840                 udelay(10);
841         }
842         if (limit <= 0)
843                 return -EBUSY;
844
845         return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850         struct tg3 *tp = (struct tg3 *)bp->priv;
851         u32 val;
852
853         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854                 return -EAGAIN;
855
856         if (tg3_readphy(tp, reg, &val))
857                 return -EIO;
858
859         return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864         struct tg3 *tp = (struct tg3 *)bp->priv;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_writephy(tp, reg, val))
870                 return -EIO;
871
872         return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877         return 0;
878 }
879
880 static void tg3_mdio_config(struct tg3 *tp)
881 {
882         u32 val;
883
884         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
885             PHY_INTERFACE_MODE_RGMII)
886                 return;
887
888         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
890         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895         }
896         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900                 val |= MAC_PHYCFG2_INBAND_ENABLE;
901         tw32(MAC_PHYCFG2, val);
902
903         val = tr32(MAC_EXT_RGMII_MODE);
904         val &= ~(MAC_RGMII_MODE_RX_INT_B |
905                  MAC_RGMII_MODE_RX_QUALITY |
906                  MAC_RGMII_MODE_RX_ACTIVITY |
907                  MAC_RGMII_MODE_RX_ENG_DET |
908                  MAC_RGMII_MODE_TX_ENABLE |
909                  MAC_RGMII_MODE_TX_LOWPWR |
910                  MAC_RGMII_MODE_TX_RESET);
911         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913                         val |= MAC_RGMII_MODE_RX_INT_B |
914                                MAC_RGMII_MODE_RX_QUALITY |
915                                MAC_RGMII_MODE_RX_ACTIVITY |
916                                MAC_RGMII_MODE_RX_ENG_DET;
917                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918                         val |= MAC_RGMII_MODE_TX_ENABLE |
919                                MAC_RGMII_MODE_TX_LOWPWR |
920                                MAC_RGMII_MODE_TX_RESET;
921         }
922         tw32(MAC_EXT_RGMII_MODE, val);
923 }
924
925 static void tg3_mdio_start(struct tg3 *tp)
926 {
927         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
928                 mutex_lock(&tp->mdio_bus->mdio_lock);
929                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
930                 mutex_unlock(&tp->mdio_bus->mdio_lock);
931         }
932
933         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934         tw32_f(MAC_MI_MODE, tp->mi_mode);
935         udelay(80);
936
937         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938                 tg3_mdio_config(tp);
939 }
940
941 static void tg3_mdio_stop(struct tg3 *tp)
942 {
943         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
944                 mutex_lock(&tp->mdio_bus->mdio_lock);
945                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
946                 mutex_unlock(&tp->mdio_bus->mdio_lock);
947         }
948 }
949
950 static int tg3_mdio_init(struct tg3 *tp)
951 {
952         int i;
953         u32 reg;
954         struct phy_device *phydev;
955
956         tg3_mdio_start(tp);
957
958         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960                 return 0;
961
962         tp->mdio_bus = mdiobus_alloc();
963         if (tp->mdio_bus == NULL)
964                 return -ENOMEM;
965
966         tp->mdio_bus->name     = "tg3 mdio bus";
967         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
968                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
969         tp->mdio_bus->priv     = tp;
970         tp->mdio_bus->parent   = &tp->pdev->dev;
971         tp->mdio_bus->read     = &tg3_mdio_read;
972         tp->mdio_bus->write    = &tg3_mdio_write;
973         tp->mdio_bus->reset    = &tg3_mdio_reset;
974         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975         tp->mdio_bus->irq      = &tp->mdio_irq[0];
976
977         for (i = 0; i < PHY_MAX_ADDR; i++)
978                 tp->mdio_bus->irq[i] = PHY_POLL;
979
980         /* The bus registration will look for all the PHYs on the mdio bus.
981          * Unfortunately, it does not ensure the PHY is powered up before
982          * accessing the PHY ID registers.  A chip reset is the
983          * quickest way to bring the device back to an operational state..
984          */
985         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986                 tg3_bmcr_reset(tp);
987
988         i = mdiobus_register(tp->mdio_bus);
989         if (i) {
990                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991                         tp->dev->name, i);
992                 return i;
993         }
994
995         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
997         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
998
999         switch (phydev->phy_id) {
1000         case TG3_PHY_ID_BCM50610:
1001                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008                 break;
1009         case TG3_PHY_ID_BCMAC131:
1010                 phydev->interface = PHY_INTERFACE_MODE_MII;
1011                 break;
1012         }
1013
1014         tg3_mdio_config(tp);
1015
1016         return 0;
1017 }
1018
1019 static void tg3_mdio_fini(struct tg3 *tp)
1020 {
1021         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1023                 mdiobus_unregister(tp->mdio_bus);
1024                 mdiobus_free(tp->mdio_bus);
1025                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026         }
1027 }
1028
1029 /* tp->lock is held. */
1030 static inline void tg3_generate_fw_event(struct tg3 *tp)
1031 {
1032         u32 val;
1033
1034         val = tr32(GRC_RX_CPU_EVENT);
1035         val |= GRC_RX_CPU_DRIVER_EVENT;
1036         tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038         tp->last_event_jiffies = jiffies;
1039 }
1040
1041 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043 /* tp->lock is held. */
1044 static void tg3_wait_for_event_ack(struct tg3 *tp)
1045 {
1046         int i;
1047         unsigned int delay_cnt;
1048         long time_remain;
1049
1050         /* If enough time has passed, no wait is necessary. */
1051         time_remain = (long)(tp->last_event_jiffies + 1 +
1052                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053                       (long)jiffies;
1054         if (time_remain < 0)
1055                 return;
1056
1057         /* Check if we can shorten the wait time. */
1058         delay_cnt = jiffies_to_usecs(time_remain);
1059         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061         delay_cnt = (delay_cnt >> 3) + 1;
1062
1063         for (i = 0; i < delay_cnt; i++) {
1064                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065                         break;
1066                 udelay(8);
1067         }
1068 }
1069
1070 /* tp->lock is held. */
1071 static void tg3_ump_link_report(struct tg3 *tp)
1072 {
1073         u32 reg;
1074         u32 val;
1075
1076         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1078                 return;
1079
1080         tg3_wait_for_event_ack(tp);
1081
1082         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_BMCR, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_BMSR, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093         val = 0;
1094         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095                 val = reg << 16;
1096         if (!tg3_readphy(tp, MII_LPA, &reg))
1097                 val |= (reg & 0xffff);
1098         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100         val = 0;
1101         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103                         val = reg << 16;
1104                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105                         val |= (reg & 0xffff);
1106         }
1107         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110                 val = reg << 16;
1111         else
1112                 val = 0;
1113         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
1115         tg3_generate_fw_event(tp);
1116 }
1117
1118 static void tg3_link_report(struct tg3 *tp)
1119 {
1120         if (!netif_carrier_ok(tp->dev)) {
1121                 if (netif_msg_link(tp))
1122                         printk(KERN_INFO PFX "%s: Link is down.\n",
1123                                tp->dev->name);
1124                 tg3_ump_link_report(tp);
1125         } else if (netif_msg_link(tp)) {
1126                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127                        tp->dev->name,
1128                        (tp->link_config.active_speed == SPEED_1000 ?
1129                         1000 :
1130                         (tp->link_config.active_speed == SPEED_100 ?
1131                          100 : 10)),
1132                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1133                         "full" : "half"));
1134
1135                 printk(KERN_INFO PFX
1136                        "%s: Flow control is %s for TX and %s for RX.\n",
1137                        tp->dev->name,
1138                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139                        "on" : "off",
1140                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141                        "on" : "off");
1142                 tg3_ump_link_report(tp);
1143         }
1144 }
1145
1146 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147 {
1148         u16 miireg;
1149
1150         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151                 miireg = ADVERTISE_PAUSE_CAP;
1152         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153                 miireg = ADVERTISE_PAUSE_ASYM;
1154         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156         else
1157                 miireg = 0;
1158
1159         return miireg;
1160 }
1161
1162 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163 {
1164         u16 miireg;
1165
1166         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167                 miireg = ADVERTISE_1000XPAUSE;
1168         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169                 miireg = ADVERTISE_1000XPSE_ASYM;
1170         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172         else
1173                 miireg = 0;
1174
1175         return miireg;
1176 }
1177
1178 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179 {
1180         u8 cap = 0;
1181
1182         if (lcladv & ADVERTISE_PAUSE_CAP) {
1183                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184                         if (rmtadv & LPA_PAUSE_CAP)
1185                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186                         else if (rmtadv & LPA_PAUSE_ASYM)
1187                                 cap = TG3_FLOW_CTRL_RX;
1188                 } else {
1189                         if (rmtadv & LPA_PAUSE_CAP)
1190                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191                 }
1192         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194                         cap = TG3_FLOW_CTRL_TX;
1195         }
1196
1197         return cap;
1198 }
1199
1200 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201 {
1202         u8 cap = 0;
1203
1204         if (lcladv & ADVERTISE_1000XPAUSE) {
1205                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206                         if (rmtadv & LPA_1000XPAUSE)
1207                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209                                 cap = TG3_FLOW_CTRL_RX;
1210                 } else {
1211                         if (rmtadv & LPA_1000XPAUSE)
1212                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213                 }
1214         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216                         cap = TG3_FLOW_CTRL_TX;
1217         }
1218
1219         return cap;
1220 }
1221
1222 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1223 {
1224         u8 autoneg;
1225         u8 flowctrl = 0;
1226         u32 old_rx_mode = tp->rx_mode;
1227         u32 old_tx_mode = tp->tx_mode;
1228
1229         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1230                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1231         else
1232                 autoneg = tp->link_config.autoneg;
1233
1234         if (autoneg == AUTONEG_ENABLE &&
1235             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1237                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1238                 else
1239                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240         } else
1241                 flowctrl = tp->link_config.flowctrl;
1242
1243         tp->link_config.active_flowctrl = flowctrl;
1244
1245         if (flowctrl & TG3_FLOW_CTRL_RX)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode)
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252
1253         if (flowctrl & TG3_FLOW_CTRL_TX)
1254                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255         else
1256                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
1258         if (old_tx_mode != tp->tx_mode)
1259                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1260 }
1261
1262 static void tg3_adjust_link(struct net_device *dev)
1263 {
1264         u8 oldflowctrl, linkmesg = 0;
1265         u32 mac_mode, lcl_adv, rmt_adv;
1266         struct tg3 *tp = netdev_priv(dev);
1267         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1268
1269         spin_lock(&tp->lock);
1270
1271         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272                                     MAC_MODE_HALF_DUPLEX);
1273
1274         oldflowctrl = tp->link_config.active_flowctrl;
1275
1276         if (phydev->link) {
1277                 lcl_adv = 0;
1278                 rmt_adv = 0;
1279
1280                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1282                 else
1283                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285                 if (phydev->duplex == DUPLEX_HALF)
1286                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1287                 else {
1288                         lcl_adv = tg3_advert_flowctrl_1000T(
1289                                   tp->link_config.flowctrl);
1290
1291                         if (phydev->pause)
1292                                 rmt_adv = LPA_PAUSE_CAP;
1293                         if (phydev->asym_pause)
1294                                 rmt_adv |= LPA_PAUSE_ASYM;
1295                 }
1296
1297                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298         } else
1299                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301         if (mac_mode != tp->mac_mode) {
1302                 tp->mac_mode = mac_mode;
1303                 tw32_f(MAC_MODE, tp->mac_mode);
1304                 udelay(40);
1305         }
1306
1307         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308                 tw32(MAC_TX_LENGTHS,
1309                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310                       (6 << TX_LENGTHS_IPG_SHIFT) |
1311                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312         else
1313                 tw32(MAC_TX_LENGTHS,
1314                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315                       (6 << TX_LENGTHS_IPG_SHIFT) |
1316                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320             phydev->speed != tp->link_config.active_speed ||
1321             phydev->duplex != tp->link_config.active_duplex ||
1322             oldflowctrl != tp->link_config.active_flowctrl)
1323             linkmesg = 1;
1324
1325         tp->link_config.active_speed = phydev->speed;
1326         tp->link_config.active_duplex = phydev->duplex;
1327
1328         spin_unlock(&tp->lock);
1329
1330         if (linkmesg)
1331                 tg3_link_report(tp);
1332 }
1333
1334 static int tg3_phy_init(struct tg3 *tp)
1335 {
1336         struct phy_device *phydev;
1337
1338         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339                 return 0;
1340
1341         /* Bring the PHY back to a known state. */
1342         tg3_bmcr_reset(tp);
1343
1344         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1345
1346         /* Attach the MAC to the PHY. */
1347         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348                              phydev->dev_flags, phydev->interface);
1349         if (IS_ERR(phydev)) {
1350                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351                 return PTR_ERR(phydev);
1352         }
1353
1354         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356         /* Mask with MAC supported features. */
1357         phydev->supported &= (PHY_GBIT_FEATURES |
1358                               SUPPORTED_Pause |
1359                               SUPPORTED_Asym_Pause);
1360
1361         phydev->advertising = phydev->supported;
1362
1363         printk(KERN_INFO
1364                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1365                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1366
1367         return 0;
1368 }
1369
1370 static void tg3_phy_start(struct tg3 *tp)
1371 {
1372         struct phy_device *phydev;
1373
1374         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1375                 return;
1376
1377         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1378
1379         if (tp->link_config.phy_is_low_power) {
1380                 tp->link_config.phy_is_low_power = 0;
1381                 phydev->speed = tp->link_config.orig_speed;
1382                 phydev->duplex = tp->link_config.orig_duplex;
1383                 phydev->autoneg = tp->link_config.orig_autoneg;
1384                 phydev->advertising = tp->link_config.orig_advertising;
1385         }
1386
1387         phy_start(phydev);
1388
1389         phy_start_aneg(phydev);
1390 }
1391
1392 static void tg3_phy_stop(struct tg3 *tp)
1393 {
1394         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1395                 return;
1396
1397         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1398 }
1399
1400 static void tg3_phy_fini(struct tg3 *tp)
1401 {
1402         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1403                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1404                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1405         }
1406 }
1407
1408 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1409 {
1410         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1411         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1412 }
1413
1414 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1415 {
1416         u32 phy;
1417
1418         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1419             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1420                 return;
1421
1422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1423                 u32 ephy;
1424
1425                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1426                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1427                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1428                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1429                                 if (enable)
1430                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1431                                 else
1432                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1433                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1434                         }
1435                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1436                 }
1437         } else {
1438                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1439                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1440                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1441                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1442                         if (enable)
1443                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1444                         else
1445                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1446                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1447                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1448                 }
1449         }
1450 }
1451
1452 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1453 {
1454         u32 val;
1455
1456         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1457                 return;
1458
1459         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1460             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1461                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1462                              (val | (1 << 15) | (1 << 4)));
1463 }
1464
1465 static void tg3_phy_apply_otp(struct tg3 *tp)
1466 {
1467         u32 otp, phy;
1468
1469         if (!tp->phy_otp)
1470                 return;
1471
1472         otp = tp->phy_otp;
1473
1474         /* Enable SM_DSP clock and tx 6dB coding. */
1475         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1476               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1477               MII_TG3_AUXCTL_ACTL_TX_6DB;
1478         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1479
1480         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1481         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1482         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1483
1484         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1485               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1486         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1487
1488         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1489         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1490         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1491
1492         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1494
1495         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1496         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1497
1498         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1499               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1500         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1501
1502         /* Turn off SM_DSP clock. */
1503         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1504               MII_TG3_AUXCTL_ACTL_TX_6DB;
1505         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1506 }
1507
1508 static int tg3_wait_macro_done(struct tg3 *tp)
1509 {
1510         int limit = 100;
1511
1512         while (limit--) {
1513                 u32 tmp32;
1514
1515                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1516                         if ((tmp32 & 0x1000) == 0)
1517                                 break;
1518                 }
1519         }
1520         if (limit <= 0)
1521                 return -EBUSY;
1522
1523         return 0;
1524 }
1525
1526 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1527 {
1528         static const u32 test_pat[4][6] = {
1529         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1530         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1531         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1532         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1533         };
1534         int chan;
1535
1536         for (chan = 0; chan < 4; chan++) {
1537                 int i;
1538
1539                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1540                              (chan * 0x2000) | 0x0200);
1541                 tg3_writephy(tp, 0x16, 0x0002);
1542
1543                 for (i = 0; i < 6; i++)
1544                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1545                                      test_pat[chan][i]);
1546
1547                 tg3_writephy(tp, 0x16, 0x0202);
1548                 if (tg3_wait_macro_done(tp)) {
1549                         *resetp = 1;
1550                         return -EBUSY;
1551                 }
1552
1553                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1554                              (chan * 0x2000) | 0x0200);
1555                 tg3_writephy(tp, 0x16, 0x0082);
1556                 if (tg3_wait_macro_done(tp)) {
1557                         *resetp = 1;
1558                         return -EBUSY;
1559                 }
1560
1561                 tg3_writephy(tp, 0x16, 0x0802);
1562                 if (tg3_wait_macro_done(tp)) {
1563                         *resetp = 1;
1564                         return -EBUSY;
1565                 }
1566
1567                 for (i = 0; i < 6; i += 2) {
1568                         u32 low, high;
1569
1570                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1571                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1572                             tg3_wait_macro_done(tp)) {
1573                                 *resetp = 1;
1574                                 return -EBUSY;
1575                         }
1576                         low &= 0x7fff;
1577                         high &= 0x000f;
1578                         if (low != test_pat[chan][i] ||
1579                             high != test_pat[chan][i+1]) {
1580                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1581                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1582                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1583
1584                                 return -EBUSY;
1585                         }
1586                 }
1587         }
1588
1589         return 0;
1590 }
1591
1592 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1593 {
1594         int chan;
1595
1596         for (chan = 0; chan < 4; chan++) {
1597                 int i;
1598
1599                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1600                              (chan * 0x2000) | 0x0200);
1601                 tg3_writephy(tp, 0x16, 0x0002);
1602                 for (i = 0; i < 6; i++)
1603                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1604                 tg3_writephy(tp, 0x16, 0x0202);
1605                 if (tg3_wait_macro_done(tp))
1606                         return -EBUSY;
1607         }
1608
1609         return 0;
1610 }
1611
1612 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1613 {
1614         u32 reg32, phy9_orig;
1615         int retries, do_phy_reset, err;
1616
1617         retries = 10;
1618         do_phy_reset = 1;
1619         do {
1620                 if (do_phy_reset) {
1621                         err = tg3_bmcr_reset(tp);
1622                         if (err)
1623                                 return err;
1624                         do_phy_reset = 0;
1625                 }
1626
1627                 /* Disable transmitter and interrupt.  */
1628                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1629                         continue;
1630
1631                 reg32 |= 0x3000;
1632                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1633
1634                 /* Set full-duplex, 1000 mbps.  */
1635                 tg3_writephy(tp, MII_BMCR,
1636                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1637
1638                 /* Set to master mode.  */
1639                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1640                         continue;
1641
1642                 tg3_writephy(tp, MII_TG3_CTRL,
1643                              (MII_TG3_CTRL_AS_MASTER |
1644                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1645
1646                 /* Enable SM_DSP_CLOCK and 6dB.  */
1647                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1648
1649                 /* Block the PHY control access.  */
1650                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1651                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1652
1653                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1654                 if (!err)
1655                         break;
1656         } while (--retries);
1657
1658         err = tg3_phy_reset_chanpat(tp);
1659         if (err)
1660                 return err;
1661
1662         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1663         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1664
1665         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1666         tg3_writephy(tp, 0x16, 0x0000);
1667
1668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1670                 /* Set Extended packet length bit for jumbo frames */
1671                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1672         }
1673         else {
1674                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1675         }
1676
1677         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1678
1679         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1680                 reg32 &= ~0x3000;
1681                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1682         } else if (!err)
1683                 err = -EBUSY;
1684
1685         return err;
1686 }
1687
1688 /* This will reset the tigon3 PHY if there is no valid
1689  * link unless the FORCE argument is non-zero.
1690  */
1691 static int tg3_phy_reset(struct tg3 *tp)
1692 {
1693         u32 cpmuctrl;
1694         u32 phy_status;
1695         int err;
1696
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1698                 u32 val;
1699
1700                 val = tr32(GRC_MISC_CFG);
1701                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1702                 udelay(40);
1703         }
1704         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1705         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1706         if (err != 0)
1707                 return -EBUSY;
1708
1709         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1710                 netif_carrier_off(tp->dev);
1711                 tg3_link_report(tp);
1712         }
1713
1714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1715             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1716             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1717                 err = tg3_phy_reset_5703_4_5(tp);
1718                 if (err)
1719                         return err;
1720                 goto out;
1721         }
1722
1723         cpmuctrl = 0;
1724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1725             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1726                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1727                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1728                         tw32(TG3_CPMU_CTRL,
1729                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1730         }
1731
1732         err = tg3_bmcr_reset(tp);
1733         if (err)
1734                 return err;
1735
1736         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1737                 u32 phy;
1738
1739                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1740                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1741
1742                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1743         }
1744
1745         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1746                 u32 val;
1747
1748                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1749                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1750                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1751                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1752                         udelay(40);
1753                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1754                 }
1755
1756                 /* Disable GPHY autopowerdown. */
1757                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1758                              MII_TG3_MISC_SHDW_WREN |
1759                              MII_TG3_MISC_SHDW_APD_SEL |
1760                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1761         }
1762
1763         tg3_phy_apply_otp(tp);
1764
1765 out:
1766         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1767                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1768                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1769                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1770                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1771                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1773         }
1774         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1775                 tg3_writephy(tp, 0x1c, 0x8d68);
1776                 tg3_writephy(tp, 0x1c, 0x8d68);
1777         }
1778         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1779                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1780                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1781                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1782                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1783                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1784                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1785                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1786                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1787         }
1788         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1789                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1790                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1791                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1792                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1793                         tg3_writephy(tp, MII_TG3_TEST1,
1794                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1795                 } else
1796                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1797                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1798         }
1799         /* Set Extended packet length bit (bit 14) on all chips that */
1800         /* support jumbo frames */
1801         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1802                 /* Cannot do read-modify-write on 5401 */
1803                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1804         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1805                 u32 phy_reg;
1806
1807                 /* Set bit 14 with read-modify-write to preserve other bits */
1808                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1809                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1810                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1811         }
1812
1813         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1814          * jumbo frames transmission.
1815          */
1816         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1817                 u32 phy_reg;
1818
1819                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1820                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1821                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1822         }
1823
1824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1825                 /* adjust output voltage */
1826                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1827         }
1828
1829         tg3_phy_toggle_automdix(tp, 1);
1830         tg3_phy_set_wirespeed(tp);
1831         return 0;
1832 }
1833
1834 static void tg3_frob_aux_power(struct tg3 *tp)
1835 {
1836         struct tg3 *tp_peer = tp;
1837
1838         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1839                 return;
1840
1841         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1842             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1843                 struct net_device *dev_peer;
1844
1845                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1846                 /* remove_one() may have been run on the peer. */
1847                 if (!dev_peer)
1848                         tp_peer = tp;
1849                 else
1850                         tp_peer = netdev_priv(dev_peer);
1851         }
1852
1853         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1854             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1855             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1856             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1857                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1858                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1859                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1860                                     (GRC_LCLCTRL_GPIO_OE0 |
1861                                      GRC_LCLCTRL_GPIO_OE1 |
1862                                      GRC_LCLCTRL_GPIO_OE2 |
1863                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1864                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1865                                     100);
1866                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1867                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1868                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1869                                              GRC_LCLCTRL_GPIO_OE1 |
1870                                              GRC_LCLCTRL_GPIO_OE2 |
1871                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1872                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1873                                              tp->grc_local_ctrl;
1874                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1877                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1878
1879                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1880                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1881                 } else {
1882                         u32 no_gpio2;
1883                         u32 grc_local_ctrl = 0;
1884
1885                         if (tp_peer != tp &&
1886                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1887                                 return;
1888
1889                         /* Workaround to prevent overdrawing Amps. */
1890                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1891                             ASIC_REV_5714) {
1892                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1893                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1894                                             grc_local_ctrl, 100);
1895                         }
1896
1897                         /* On 5753 and variants, GPIO2 cannot be used. */
1898                         no_gpio2 = tp->nic_sram_data_cfg &
1899                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1900
1901                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1902                                          GRC_LCLCTRL_GPIO_OE1 |
1903                                          GRC_LCLCTRL_GPIO_OE2 |
1904                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1905                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1906                         if (no_gpio2) {
1907                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1908                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1909                         }
1910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1911                                                     grc_local_ctrl, 100);
1912
1913                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1914
1915                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1916                                                     grc_local_ctrl, 100);
1917
1918                         if (!no_gpio2) {
1919                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1920                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1921                                             grc_local_ctrl, 100);
1922                         }
1923                 }
1924         } else {
1925                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1926                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1927                         if (tp_peer != tp &&
1928                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1929                                 return;
1930
1931                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932                                     (GRC_LCLCTRL_GPIO_OE1 |
1933                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1934
1935                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936                                     GRC_LCLCTRL_GPIO_OE1, 100);
1937
1938                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1939                                     (GRC_LCLCTRL_GPIO_OE1 |
1940                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1941                 }
1942         }
1943 }
1944
1945 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1946 {
1947         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1948                 return 1;
1949         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1950                 if (speed != SPEED_10)
1951                         return 1;
1952         } else if (speed == SPEED_10)
1953                 return 1;
1954
1955         return 0;
1956 }
1957
1958 static int tg3_setup_phy(struct tg3 *, int);
1959
1960 #define RESET_KIND_SHUTDOWN     0
1961 #define RESET_KIND_INIT         1
1962 #define RESET_KIND_SUSPEND      2
1963
1964 static void tg3_write_sig_post_reset(struct tg3 *, int);
1965 static int tg3_halt_cpu(struct tg3 *, u32);
1966 static int tg3_nvram_lock(struct tg3 *);
1967 static void tg3_nvram_unlock(struct tg3 *);
1968
1969 static void tg3_power_down_phy(struct tg3 *tp)
1970 {
1971         u32 val;
1972
1973         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1974                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1975                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1976                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1977
1978                         sg_dig_ctrl |=
1979                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1980                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1981                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1982                 }
1983                 return;
1984         }
1985
1986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1987                 tg3_bmcr_reset(tp);
1988                 val = tr32(GRC_MISC_CFG);
1989                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1990                 udelay(40);
1991                 return;
1992         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1993                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1996         }
1997
1998         /* The PHY should not be powered down on some chips because
1999          * of bugs.
2000          */
2001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2003             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2004              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2005                 return;
2006
2007         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2008                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2009                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2010                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2011                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2012         }
2013
2014         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2015 }
2016
2017 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2018 {
2019         u32 misc_host_ctrl;
2020
2021         /* Make sure register accesses (indirect or otherwise)
2022          * will function correctly.
2023          */
2024         pci_write_config_dword(tp->pdev,
2025                                TG3PCI_MISC_HOST_CTRL,
2026                                tp->misc_host_ctrl);
2027
2028         switch (state) {
2029         case PCI_D0:
2030                 pci_enable_wake(tp->pdev, state, false);
2031                 pci_set_power_state(tp->pdev, PCI_D0);
2032
2033                 /* Switch out of Vaux if it is a NIC */
2034                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2035                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2036
2037                 return 0;
2038
2039         case PCI_D1:
2040         case PCI_D2:
2041         case PCI_D3hot:
2042                 break;
2043
2044         default:
2045                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2046                         tp->dev->name, state);
2047                 return -EINVAL;
2048         }
2049         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2050         tw32(TG3PCI_MISC_HOST_CTRL,
2051              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2052
2053         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2054                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2055                     !tp->link_config.phy_is_low_power) {
2056                         struct phy_device *phydev;
2057                         u32 advertising;
2058
2059                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2060
2061                         tp->link_config.phy_is_low_power = 1;
2062
2063                         tp->link_config.orig_speed = phydev->speed;
2064                         tp->link_config.orig_duplex = phydev->duplex;
2065                         tp->link_config.orig_autoneg = phydev->autoneg;
2066                         tp->link_config.orig_advertising = phydev->advertising;
2067
2068                         advertising = ADVERTISED_TP |
2069                                       ADVERTISED_Pause |
2070                                       ADVERTISED_Autoneg |
2071                                       ADVERTISED_10baseT_Half;
2072
2073                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2074                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2075                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2076                                         advertising |=
2077                                                 ADVERTISED_100baseT_Half |
2078                                                 ADVERTISED_100baseT_Full |
2079                                                 ADVERTISED_10baseT_Full;
2080                                 else
2081                                         advertising |= ADVERTISED_10baseT_Full;
2082                         }
2083
2084                         phydev->advertising = advertising;
2085
2086                         phy_start_aneg(phydev);
2087                 }
2088         } else {
2089                 if (tp->link_config.phy_is_low_power == 0) {
2090                         tp->link_config.phy_is_low_power = 1;
2091                         tp->link_config.orig_speed = tp->link_config.speed;
2092                         tp->link_config.orig_duplex = tp->link_config.duplex;
2093                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2094                 }
2095
2096                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2097                         tp->link_config.speed = SPEED_10;
2098                         tp->link_config.duplex = DUPLEX_HALF;
2099                         tp->link_config.autoneg = AUTONEG_ENABLE;
2100                         tg3_setup_phy(tp, 0);
2101                 }
2102         }
2103
2104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2105                 u32 val;
2106
2107                 val = tr32(GRC_VCPU_EXT_CTRL);
2108                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2109         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2110                 int i;
2111                 u32 val;
2112
2113                 for (i = 0; i < 200; i++) {
2114                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2115                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2116                                 break;
2117                         msleep(1);
2118                 }
2119         }
2120         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2121                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2122                                                      WOL_DRV_STATE_SHUTDOWN |
2123                                                      WOL_DRV_WOL |
2124                                                      WOL_SET_MAGIC_PKT);
2125
2126         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2127                 u32 mac_mode;
2128
2129                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2130                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2131                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2132                                 udelay(40);
2133                         }
2134
2135                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2136                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2137                         else
2138                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2139
2140                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2141                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2142                             ASIC_REV_5700) {
2143                                 u32 speed = (tp->tg3_flags &
2144                                              TG3_FLAG_WOL_SPEED_100MB) ?
2145                                              SPEED_100 : SPEED_10;
2146                                 if (tg3_5700_link_polarity(tp, speed))
2147                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2148                                 else
2149                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2150                         }
2151                 } else {
2152                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2153                 }
2154
2155                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2156                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2157
2158                 if (pci_pme_capable(tp->pdev, state) &&
2159                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2160                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2161                         if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2162                             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2163                             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2164                              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2165                                 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2166                 }
2167
2168                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2169                         mac_mode |= tp->mac_mode &
2170                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2171                         if (mac_mode & MAC_MODE_APE_TX_EN)
2172                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2173                 }
2174
2175                 tw32_f(MAC_MODE, mac_mode);
2176                 udelay(100);
2177
2178                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2179                 udelay(10);
2180         }
2181
2182         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2183             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2184              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2185                 u32 base_val;
2186
2187                 base_val = tp->pci_clock_ctrl;
2188                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2189                              CLOCK_CTRL_TXCLK_DISABLE);
2190
2191                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2192                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2193         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2194                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2195                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2196                 /* do nothing */
2197         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2198                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2199                 u32 newbits1, newbits2;
2200
2201                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2202                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2203                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2204                                     CLOCK_CTRL_TXCLK_DISABLE |
2205                                     CLOCK_CTRL_ALTCLK);
2206                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2207                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2208                         newbits1 = CLOCK_CTRL_625_CORE;
2209                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2210                 } else {
2211                         newbits1 = CLOCK_CTRL_ALTCLK;
2212                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2213                 }
2214
2215                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2216                             40);
2217
2218                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2219                             40);
2220
2221                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2222                         u32 newbits3;
2223
2224                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2225                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2226                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2227                                             CLOCK_CTRL_TXCLK_DISABLE |
2228                                             CLOCK_CTRL_44MHZ_CORE);
2229                         } else {
2230                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2231                         }
2232
2233                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2234                                     tp->pci_clock_ctrl | newbits3, 40);
2235                 }
2236         }
2237
2238         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2239             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2240             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2241                 tg3_power_down_phy(tp);
2242
2243         tg3_frob_aux_power(tp);
2244
2245         /* Workaround for unstable PLL clock */
2246         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2247             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2248                 u32 val = tr32(0x7d00);
2249
2250                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2251                 tw32(0x7d00, val);
2252                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2253                         int err;
2254
2255                         err = tg3_nvram_lock(tp);
2256                         tg3_halt_cpu(tp, RX_CPU_BASE);
2257                         if (!err)
2258                                 tg3_nvram_unlock(tp);
2259                 }
2260         }
2261
2262         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2263
2264         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2265                 pci_enable_wake(tp->pdev, state, true);
2266
2267         /* Finally, set the new power state. */
2268         pci_set_power_state(tp->pdev, state);
2269
2270         return 0;
2271 }
2272
2273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2274 {
2275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2276         case MII_TG3_AUX_STAT_10HALF:
2277                 *speed = SPEED_10;
2278                 *duplex = DUPLEX_HALF;
2279                 break;
2280
2281         case MII_TG3_AUX_STAT_10FULL:
2282                 *speed = SPEED_10;
2283                 *duplex = DUPLEX_FULL;
2284                 break;
2285
2286         case MII_TG3_AUX_STAT_100HALF:
2287                 *speed = SPEED_100;
2288                 *duplex = DUPLEX_HALF;
2289                 break;
2290
2291         case MII_TG3_AUX_STAT_100FULL:
2292                 *speed = SPEED_100;
2293                 *duplex = DUPLEX_FULL;
2294                 break;
2295
2296         case MII_TG3_AUX_STAT_1000HALF:
2297                 *speed = SPEED_1000;
2298                 *duplex = DUPLEX_HALF;
2299                 break;
2300
2301         case MII_TG3_AUX_STAT_1000FULL:
2302                 *speed = SPEED_1000;
2303                 *duplex = DUPLEX_FULL;
2304                 break;
2305
2306         default:
2307                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2308                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2309                                  SPEED_10;
2310                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2311                                   DUPLEX_HALF;
2312                         break;
2313                 }
2314                 *speed = SPEED_INVALID;
2315                 *duplex = DUPLEX_INVALID;
2316                 break;
2317         }
2318 }
2319
2320 static void tg3_phy_copper_begin(struct tg3 *tp)
2321 {
2322         u32 new_adv;
2323         int i;
2324
2325         if (tp->link_config.phy_is_low_power) {
2326                 /* Entering low power mode.  Disable gigabit and
2327                  * 100baseT advertisements.
2328                  */
2329                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2330
2331                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2332                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2333                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2334                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2335
2336                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2337         } else if (tp->link_config.speed == SPEED_INVALID) {
2338                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2339                         tp->link_config.advertising &=
2340                                 ~(ADVERTISED_1000baseT_Half |
2341                                   ADVERTISED_1000baseT_Full);
2342
2343                 new_adv = ADVERTISE_CSMA;
2344                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2345                         new_adv |= ADVERTISE_10HALF;
2346                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2347                         new_adv |= ADVERTISE_10FULL;
2348                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2349                         new_adv |= ADVERTISE_100HALF;
2350                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2351                         new_adv |= ADVERTISE_100FULL;
2352
2353                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2354
2355                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2356
2357                 if (tp->link_config.advertising &
2358                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2359                         new_adv = 0;
2360                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2361                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2362                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2363                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2364                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2365                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2366                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2367                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2368                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2369                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2370                 } else {
2371                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2372                 }
2373         } else {
2374                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2375                 new_adv |= ADVERTISE_CSMA;
2376
2377                 /* Asking for a specific link mode. */
2378                 if (tp->link_config.speed == SPEED_1000) {
2379                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2380
2381                         if (tp->link_config.duplex == DUPLEX_FULL)
2382                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2383                         else
2384                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2385                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2386                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2387                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2388                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2389                 } else {
2390                         if (tp->link_config.speed == SPEED_100) {
2391                                 if (tp->link_config.duplex == DUPLEX_FULL)
2392                                         new_adv |= ADVERTISE_100FULL;
2393                                 else
2394                                         new_adv |= ADVERTISE_100HALF;
2395                         } else {
2396                                 if (tp->link_config.duplex == DUPLEX_FULL)
2397                                         new_adv |= ADVERTISE_10FULL;
2398                                 else
2399                                         new_adv |= ADVERTISE_10HALF;
2400                         }
2401                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2402
2403                         new_adv = 0;
2404                 }
2405
2406                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2407         }
2408
2409         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2410             tp->link_config.speed != SPEED_INVALID) {
2411                 u32 bmcr, orig_bmcr;
2412
2413                 tp->link_config.active_speed = tp->link_config.speed;
2414                 tp->link_config.active_duplex = tp->link_config.duplex;
2415
2416                 bmcr = 0;
2417                 switch (tp->link_config.speed) {
2418                 default:
2419                 case SPEED_10:
2420                         break;
2421
2422                 case SPEED_100:
2423                         bmcr |= BMCR_SPEED100;
2424                         break;
2425
2426                 case SPEED_1000:
2427                         bmcr |= TG3_BMCR_SPEED1000;
2428                         break;
2429                 }
2430
2431                 if (tp->link_config.duplex == DUPLEX_FULL)
2432                         bmcr |= BMCR_FULLDPLX;
2433
2434                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2435                     (bmcr != orig_bmcr)) {
2436                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2437                         for (i = 0; i < 1500; i++) {
2438                                 u32 tmp;
2439
2440                                 udelay(10);
2441                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2442                                     tg3_readphy(tp, MII_BMSR, &tmp))
2443                                         continue;
2444                                 if (!(tmp & BMSR_LSTATUS)) {
2445                                         udelay(40);
2446                                         break;
2447                                 }
2448                         }
2449                         tg3_writephy(tp, MII_BMCR, bmcr);
2450                         udelay(40);
2451                 }
2452         } else {
2453                 tg3_writephy(tp, MII_BMCR,
2454                              BMCR_ANENABLE | BMCR_ANRESTART);
2455         }
2456 }
2457
2458 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2459 {
2460         int err;
2461
2462         /* Turn off tap power management. */
2463         /* Set Extended packet length bit */
2464         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2465
2466         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2467         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2468
2469         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2470         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2471
2472         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2473         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2474
2475         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2476         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2477
2478         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2479         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2480
2481         udelay(40);
2482
2483         return err;
2484 }
2485
2486 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2487 {
2488         u32 adv_reg, all_mask = 0;
2489
2490         if (mask & ADVERTISED_10baseT_Half)
2491                 all_mask |= ADVERTISE_10HALF;
2492         if (mask & ADVERTISED_10baseT_Full)
2493                 all_mask |= ADVERTISE_10FULL;
2494         if (mask & ADVERTISED_100baseT_Half)
2495                 all_mask |= ADVERTISE_100HALF;
2496         if (mask & ADVERTISED_100baseT_Full)
2497                 all_mask |= ADVERTISE_100FULL;
2498
2499         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2500                 return 0;
2501
2502         if ((adv_reg & all_mask) != all_mask)
2503                 return 0;
2504         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2505                 u32 tg3_ctrl;
2506
2507                 all_mask = 0;
2508                 if (mask & ADVERTISED_1000baseT_Half)
2509                         all_mask |= ADVERTISE_1000HALF;
2510                 if (mask & ADVERTISED_1000baseT_Full)
2511                         all_mask |= ADVERTISE_1000FULL;
2512
2513                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2514                         return 0;
2515
2516                 if ((tg3_ctrl & all_mask) != all_mask)
2517                         return 0;
2518         }
2519         return 1;
2520 }
2521
2522 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2523 {
2524         u32 curadv, reqadv;
2525
2526         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2527                 return 1;
2528
2529         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2530         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2531
2532         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2533                 if (curadv != reqadv)
2534                         return 0;
2535
2536                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2537                         tg3_readphy(tp, MII_LPA, rmtadv);
2538         } else {
2539                 /* Reprogram the advertisement register, even if it
2540                  * does not affect the current link.  If the link
2541                  * gets renegotiated in the future, we can save an
2542                  * additional renegotiation cycle by advertising
2543                  * it correctly in the first place.
2544                  */
2545                 if (curadv != reqadv) {
2546                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2547                                      ADVERTISE_PAUSE_ASYM);
2548                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2549                 }
2550         }
2551
2552         return 1;
2553 }
2554
2555 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2556 {
2557         int current_link_up;
2558         u32 bmsr, dummy;
2559         u32 lcl_adv, rmt_adv;
2560         u16 current_speed;
2561         u8 current_duplex;
2562         int i, err;
2563
2564         tw32(MAC_EVENT, 0);
2565
2566         tw32_f(MAC_STATUS,
2567              (MAC_STATUS_SYNC_CHANGED |
2568               MAC_STATUS_CFG_CHANGED |
2569               MAC_STATUS_MI_COMPLETION |
2570               MAC_STATUS_LNKSTATE_CHANGED));
2571         udelay(40);
2572
2573         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2574                 tw32_f(MAC_MI_MODE,
2575                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2576                 udelay(80);
2577         }
2578
2579         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2580
2581         /* Some third-party PHYs need to be reset on link going
2582          * down.
2583          */
2584         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2585              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2586              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2587             netif_carrier_ok(tp->dev)) {
2588                 tg3_readphy(tp, MII_BMSR, &bmsr);
2589                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2590                     !(bmsr & BMSR_LSTATUS))
2591                         force_reset = 1;
2592         }
2593         if (force_reset)
2594                 tg3_phy_reset(tp);
2595
2596         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2597                 tg3_readphy(tp, MII_BMSR, &bmsr);
2598                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2599                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2600                         bmsr = 0;
2601
2602                 if (!(bmsr & BMSR_LSTATUS)) {
2603                         err = tg3_init_5401phy_dsp(tp);
2604                         if (err)
2605                                 return err;
2606
2607                         tg3_readphy(tp, MII_BMSR, &bmsr);
2608                         for (i = 0; i < 1000; i++) {
2609                                 udelay(10);
2610                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2611                                     (bmsr & BMSR_LSTATUS)) {
2612                                         udelay(40);
2613                                         break;
2614                                 }
2615                         }
2616
2617                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2618                             !(bmsr & BMSR_LSTATUS) &&
2619                             tp->link_config.active_speed == SPEED_1000) {
2620                                 err = tg3_phy_reset(tp);
2621                                 if (!err)
2622                                         err = tg3_init_5401phy_dsp(tp);
2623                                 if (err)
2624                                         return err;
2625                         }
2626                 }
2627         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2628                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2629                 /* 5701 {A0,B0} CRC bug workaround */
2630                 tg3_writephy(tp, 0x15, 0x0a75);
2631                 tg3_writephy(tp, 0x1c, 0x8c68);
2632                 tg3_writephy(tp, 0x1c, 0x8d68);
2633                 tg3_writephy(tp, 0x1c, 0x8c68);
2634         }
2635
2636         /* Clear pending interrupts... */
2637         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2638         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2639
2640         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2641                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2642         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2643                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2644
2645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2647                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2648                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2649                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2650                 else
2651                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2652         }
2653
2654         current_link_up = 0;
2655         current_speed = SPEED_INVALID;
2656         current_duplex = DUPLEX_INVALID;
2657
2658         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2659                 u32 val;
2660
2661                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2662                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2663                 if (!(val & (1 << 10))) {
2664                         val |= (1 << 10);
2665                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2666                         goto relink;
2667                 }
2668         }
2669
2670         bmsr = 0;
2671         for (i = 0; i < 100; i++) {
2672                 tg3_readphy(tp, MII_BMSR, &bmsr);
2673                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2674                     (bmsr & BMSR_LSTATUS))
2675                         break;
2676                 udelay(40);
2677         }
2678
2679         if (bmsr & BMSR_LSTATUS) {
2680                 u32 aux_stat, bmcr;
2681
2682                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2683                 for (i = 0; i < 2000; i++) {
2684                         udelay(10);
2685                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2686                             aux_stat)
2687                                 break;
2688                 }
2689
2690                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2691                                              &current_speed,
2692                                              &current_duplex);
2693
2694                 bmcr = 0;
2695                 for (i = 0; i < 200; i++) {
2696                         tg3_readphy(tp, MII_BMCR, &bmcr);
2697                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2698                                 continue;
2699                         if (bmcr && bmcr != 0x7fff)
2700                                 break;
2701                         udelay(10);
2702                 }
2703
2704                 lcl_adv = 0;
2705                 rmt_adv = 0;
2706
2707                 tp->link_config.active_speed = current_speed;
2708                 tp->link_config.active_duplex = current_duplex;
2709
2710                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2711                         if ((bmcr & BMCR_ANENABLE) &&
2712                             tg3_copper_is_advertising_all(tp,
2713                                                 tp->link_config.advertising)) {
2714                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2715                                                                   &rmt_adv))
2716                                         current_link_up = 1;
2717                         }
2718                 } else {
2719                         if (!(bmcr & BMCR_ANENABLE) &&
2720                             tp->link_config.speed == current_speed &&
2721                             tp->link_config.duplex == current_duplex &&
2722                             tp->link_config.flowctrl ==
2723                             tp->link_config.active_flowctrl) {
2724                                 current_link_up = 1;
2725                         }
2726                 }
2727
2728                 if (current_link_up == 1 &&
2729                     tp->link_config.active_duplex == DUPLEX_FULL)
2730                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2731         }
2732
2733 relink:
2734         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2735                 u32 tmp;
2736
2737                 tg3_phy_copper_begin(tp);
2738
2739                 tg3_readphy(tp, MII_BMSR, &tmp);
2740                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2741                     (tmp & BMSR_LSTATUS))
2742                         current_link_up = 1;
2743         }
2744
2745         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2746         if (current_link_up == 1) {
2747                 if (tp->link_config.active_speed == SPEED_100 ||
2748                     tp->link_config.active_speed == SPEED_10)
2749                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2750                 else
2751                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2752         } else
2753                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2754
2755         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2756         if (tp->link_config.active_duplex == DUPLEX_HALF)
2757                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2758
2759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2760                 if (current_link_up == 1 &&
2761                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2762                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2763                 else
2764                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2765         }
2766
2767         /* ??? Without this setting Netgear GA302T PHY does not
2768          * ??? send/receive packets...
2769          */
2770         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2771             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2772                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2773                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2774                 udelay(80);
2775         }
2776
2777         tw32_f(MAC_MODE, tp->mac_mode);
2778         udelay(40);
2779
2780         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2781                 /* Polled via timer. */
2782                 tw32_f(MAC_EVENT, 0);
2783         } else {
2784                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2785         }
2786         udelay(40);
2787
2788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2789             current_link_up == 1 &&
2790             tp->link_config.active_speed == SPEED_1000 &&
2791             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2792              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2793                 udelay(120);
2794                 tw32_f(MAC_STATUS,
2795                      (MAC_STATUS_SYNC_CHANGED |
2796                       MAC_STATUS_CFG_CHANGED));
2797                 udelay(40);
2798                 tg3_write_mem(tp,
2799                               NIC_SRAM_FIRMWARE_MBOX,
2800                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2801         }
2802
2803         if (current_link_up != netif_carrier_ok(tp->dev)) {
2804                 if (current_link_up)
2805                         netif_carrier_on(tp->dev);
2806                 else
2807                         netif_carrier_off(tp->dev);
2808                 tg3_link_report(tp);
2809         }
2810
2811         return 0;
2812 }
2813
2814 struct tg3_fiber_aneginfo {
2815         int state;
2816 #define ANEG_STATE_UNKNOWN              0
2817 #define ANEG_STATE_AN_ENABLE            1
2818 #define ANEG_STATE_RESTART_INIT         2
2819 #define ANEG_STATE_RESTART              3
2820 #define ANEG_STATE_DISABLE_LINK_OK      4
2821 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2822 #define ANEG_STATE_ABILITY_DETECT       6
2823 #define ANEG_STATE_ACK_DETECT_INIT      7
2824 #define ANEG_STATE_ACK_DETECT           8
2825 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2826 #define ANEG_STATE_COMPLETE_ACK         10
2827 #define ANEG_STATE_IDLE_DETECT_INIT     11
2828 #define ANEG_STATE_IDLE_DETECT          12
2829 #define ANEG_STATE_LINK_OK              13
2830 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2831 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2832
2833         u32 flags;
2834 #define MR_AN_ENABLE            0x00000001
2835 #define MR_RESTART_AN           0x00000002
2836 #define MR_AN_COMPLETE          0x00000004
2837 #define MR_PAGE_RX              0x00000008
2838 #define MR_NP_LOADED            0x00000010
2839 #define MR_TOGGLE_TX            0x00000020
2840 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2841 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2842 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2843 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2844 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2845 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2846 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2847 #define MR_TOGGLE_RX            0x00002000
2848 #define MR_NP_RX                0x00004000
2849
2850 #define MR_LINK_OK              0x80000000
2851
2852         unsigned long link_time, cur_time;
2853
2854         u32 ability_match_cfg;
2855         int ability_match_count;
2856
2857         char ability_match, idle_match, ack_match;
2858
2859         u32 txconfig, rxconfig;
2860 #define ANEG_CFG_NP             0x00000080
2861 #define ANEG_CFG_ACK            0x00000040
2862 #define ANEG_CFG_RF2            0x00000020
2863 #define ANEG_CFG_RF1            0x00000010
2864 #define ANEG_CFG_PS2            0x00000001
2865 #define ANEG_CFG_PS1            0x00008000
2866 #define ANEG_CFG_HD             0x00004000
2867 #define ANEG_CFG_FD             0x00002000
2868 #define ANEG_CFG_INVAL          0x00001f06
2869
2870 };
2871 #define ANEG_OK         0
2872 #define ANEG_DONE       1
2873 #define ANEG_TIMER_ENAB 2
2874 #define ANEG_FAILED     -1
2875
2876 #define ANEG_STATE_SETTLE_TIME  10000
2877
2878 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2879                                    struct tg3_fiber_aneginfo *ap)
2880 {
2881         u16 flowctrl;
2882         unsigned long delta;
2883         u32 rx_cfg_reg;
2884         int ret;
2885
2886         if (ap->state == ANEG_STATE_UNKNOWN) {
2887                 ap->rxconfig = 0;
2888                 ap->link_time = 0;
2889                 ap->cur_time = 0;
2890                 ap->ability_match_cfg = 0;
2891                 ap->ability_match_count = 0;
2892                 ap->ability_match = 0;
2893                 ap->idle_match = 0;
2894                 ap->ack_match = 0;
2895         }
2896         ap->cur_time++;
2897
2898         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2899                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2900
2901                 if (rx_cfg_reg != ap->ability_match_cfg) {
2902                         ap->ability_match_cfg = rx_cfg_reg;
2903                         ap->ability_match = 0;
2904                         ap->ability_match_count = 0;
2905                 } else {
2906                         if (++ap->ability_match_count > 1) {
2907                                 ap->ability_match = 1;
2908                                 ap->ability_match_cfg = rx_cfg_reg;
2909                         }
2910                 }
2911                 if (rx_cfg_reg & ANEG_CFG_ACK)
2912                         ap->ack_match = 1;
2913                 else
2914                         ap->ack_match = 0;
2915
2916                 ap->idle_match = 0;
2917         } else {
2918                 ap->idle_match = 1;
2919                 ap->ability_match_cfg = 0;
2920                 ap->ability_match_count = 0;
2921                 ap->ability_match = 0;
2922                 ap->ack_match = 0;
2923
2924                 rx_cfg_reg = 0;
2925         }
2926
2927         ap->rxconfig = rx_cfg_reg;
2928         ret = ANEG_OK;
2929
2930         switch(ap->state) {
2931         case ANEG_STATE_UNKNOWN:
2932                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2933                         ap->state = ANEG_STATE_AN_ENABLE;
2934
2935                 /* fallthru */
2936         case ANEG_STATE_AN_ENABLE:
2937                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2938                 if (ap->flags & MR_AN_ENABLE) {
2939                         ap->link_time = 0;
2940                         ap->cur_time = 0;
2941                         ap->ability_match_cfg = 0;
2942                         ap->ability_match_count = 0;
2943                         ap->ability_match = 0;
2944                         ap->idle_match = 0;
2945                         ap->ack_match = 0;
2946
2947                         ap->state = ANEG_STATE_RESTART_INIT;
2948                 } else {
2949                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2950                 }
2951                 break;
2952
2953         case ANEG_STATE_RESTART_INIT:
2954                 ap->link_time = ap->cur_time;
2955                 ap->flags &= ~(MR_NP_LOADED);
2956                 ap->txconfig = 0;
2957                 tw32(MAC_TX_AUTO_NEG, 0);
2958                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2959                 tw32_f(MAC_MODE, tp->mac_mode);
2960                 udelay(40);
2961
2962                 ret = ANEG_TIMER_ENAB;
2963                 ap->state = ANEG_STATE_RESTART;
2964
2965                 /* fallthru */
2966         case ANEG_STATE_RESTART:
2967                 delta = ap->cur_time - ap->link_time;
2968                 if (delta > ANEG_STATE_SETTLE_TIME) {
2969                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2970                 } else {
2971                         ret = ANEG_TIMER_ENAB;
2972                 }
2973                 break;
2974
2975         case ANEG_STATE_DISABLE_LINK_OK:
2976                 ret = ANEG_DONE;
2977                 break;
2978
2979         case ANEG_STATE_ABILITY_DETECT_INIT:
2980                 ap->flags &= ~(MR_TOGGLE_TX);
2981                 ap->txconfig = ANEG_CFG_FD;
2982                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2983                 if (flowctrl & ADVERTISE_1000XPAUSE)
2984                         ap->txconfig |= ANEG_CFG_PS1;
2985                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2986                         ap->txconfig |= ANEG_CFG_PS2;
2987                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2988                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2989                 tw32_f(MAC_MODE, tp->mac_mode);
2990                 udelay(40);
2991
2992                 ap->state = ANEG_STATE_ABILITY_DETECT;
2993                 break;
2994
2995         case ANEG_STATE_ABILITY_DETECT:
2996                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2997                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2998                 }
2999                 break;
3000
3001         case ANEG_STATE_ACK_DETECT_INIT:
3002                 ap->txconfig |= ANEG_CFG_ACK;
3003                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3004                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3005                 tw32_f(MAC_MODE, tp->mac_mode);
3006                 udelay(40);
3007
3008                 ap->state = ANEG_STATE_ACK_DETECT;
3009
3010                 /* fallthru */
3011         case ANEG_STATE_ACK_DETECT:
3012                 if (ap->ack_match != 0) {
3013                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3014                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3015                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3016                         } else {
3017                                 ap->state = ANEG_STATE_AN_ENABLE;
3018                         }
3019                 } else if (ap->ability_match != 0 &&
3020                            ap->rxconfig == 0) {
3021                         ap->state = ANEG_STATE_AN_ENABLE;
3022                 }
3023                 break;
3024
3025         case ANEG_STATE_COMPLETE_ACK_INIT:
3026                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3027                         ret = ANEG_FAILED;
3028                         break;
3029                 }
3030                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3031                                MR_LP_ADV_HALF_DUPLEX |
3032                                MR_LP_ADV_SYM_PAUSE |
3033                                MR_LP_ADV_ASYM_PAUSE |
3034                                MR_LP_ADV_REMOTE_FAULT1 |
3035                                MR_LP_ADV_REMOTE_FAULT2 |
3036                                MR_LP_ADV_NEXT_PAGE |
3037                                MR_TOGGLE_RX |
3038                                MR_NP_RX);
3039                 if (ap->rxconfig & ANEG_CFG_FD)
3040                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3041                 if (ap->rxconfig & ANEG_CFG_HD)
3042                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3043                 if (ap->rxconfig & ANEG_CFG_PS1)
3044                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3045                 if (ap->rxconfig & ANEG_CFG_PS2)
3046                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3047                 if (ap->rxconfig & ANEG_CFG_RF1)
3048                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3049                 if (ap->rxconfig & ANEG_CFG_RF2)
3050                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3051                 if (ap->rxconfig & ANEG_CFG_NP)
3052                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3053
3054                 ap->link_time = ap->cur_time;
3055
3056                 ap->flags ^= (MR_TOGGLE_TX);
3057                 if (ap->rxconfig & 0x0008)
3058                         ap->flags |= MR_TOGGLE_RX;
3059                 if (ap->rxconfig & ANEG_CFG_NP)
3060                         ap->flags |= MR_NP_RX;
3061                 ap->flags |= MR_PAGE_RX;
3062
3063                 ap->state = ANEG_STATE_COMPLETE_ACK;
3064                 ret = ANEG_TIMER_ENAB;
3065                 break;
3066
3067         case ANEG_STATE_COMPLETE_ACK:
3068                 if (ap->ability_match != 0 &&
3069                     ap->rxconfig == 0) {
3070                         ap->state = ANEG_STATE_AN_ENABLE;
3071                         break;
3072                 }
3073                 delta = ap->cur_time - ap->link_time;
3074                 if (delta > ANEG_STATE_SETTLE_TIME) {
3075                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3076                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3077                         } else {
3078                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3079                                     !(ap->flags & MR_NP_RX)) {
3080                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3081                                 } else {
3082                                         ret = ANEG_FAILED;
3083                                 }
3084                         }
3085                 }
3086                 break;
3087
3088         case ANEG_STATE_IDLE_DETECT_INIT:
3089                 ap->link_time = ap->cur_time;
3090                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3091                 tw32_f(MAC_MODE, tp->mac_mode);
3092                 udelay(40);
3093
3094                 ap->state = ANEG_STATE_IDLE_DETECT;
3095                 ret = ANEG_TIMER_ENAB;
3096                 break;
3097
3098         case ANEG_STATE_IDLE_DETECT:
3099                 if (ap->ability_match != 0 &&
3100                     ap->rxconfig == 0) {
3101                         ap->state = ANEG_STATE_AN_ENABLE;
3102                         break;
3103                 }
3104                 delta = ap->cur_time - ap->link_time;
3105                 if (delta > ANEG_STATE_SETTLE_TIME) {
3106                         /* XXX another gem from the Broadcom driver :( */
3107                         ap->state = ANEG_STATE_LINK_OK;
3108                 }
3109                 break;
3110
3111         case ANEG_STATE_LINK_OK:
3112                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3113                 ret = ANEG_DONE;
3114                 break;
3115
3116         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3117                 /* ??? unimplemented */
3118                 break;
3119
3120         case ANEG_STATE_NEXT_PAGE_WAIT:
3121                 /* ??? unimplemented */
3122                 break;
3123
3124         default:
3125                 ret = ANEG_FAILED;
3126                 break;
3127         }
3128
3129         return ret;
3130 }
3131
3132 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3133 {
3134         int res = 0;
3135         struct tg3_fiber_aneginfo aninfo;
3136         int status = ANEG_FAILED;
3137         unsigned int tick;
3138         u32 tmp;
3139
3140         tw32_f(MAC_TX_AUTO_NEG, 0);
3141
3142         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3143         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3144         udelay(40);
3145
3146         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3147         udelay(40);
3148
3149         memset(&aninfo, 0, sizeof(aninfo));
3150         aninfo.flags |= MR_AN_ENABLE;
3151         aninfo.state = ANEG_STATE_UNKNOWN;
3152         aninfo.cur_time = 0;
3153         tick = 0;
3154         while (++tick < 195000) {
3155                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3156                 if (status == ANEG_DONE || status == ANEG_FAILED)
3157                         break;
3158
3159                 udelay(1);
3160         }
3161
3162         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3163         tw32_f(MAC_MODE, tp->mac_mode);
3164         udelay(40);
3165
3166         *txflags = aninfo.txconfig;
3167         *rxflags = aninfo.flags;
3168
3169         if (status == ANEG_DONE &&
3170             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3171                              MR_LP_ADV_FULL_DUPLEX)))
3172                 res = 1;
3173
3174         return res;
3175 }
3176
3177 static void tg3_init_bcm8002(struct tg3 *tp)
3178 {
3179         u32 mac_status = tr32(MAC_STATUS);
3180         int i;
3181
3182         /* Reset when initting first time or we have a link. */
3183         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3184             !(mac_status & MAC_STATUS_PCS_SYNCED))
3185                 return;
3186
3187         /* Set PLL lock range. */
3188         tg3_writephy(tp, 0x16, 0x8007);
3189
3190         /* SW reset */
3191         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3192
3193         /* Wait for reset to complete. */
3194         /* XXX schedule_timeout() ... */
3195         for (i = 0; i < 500; i++)
3196                 udelay(10);
3197
3198         /* Config mode; select PMA/Ch 1 regs. */
3199         tg3_writephy(tp, 0x10, 0x8411);
3200
3201         /* Enable auto-lock and comdet, select txclk for tx. */
3202         tg3_writephy(tp, 0x11, 0x0a10);
3203
3204         tg3_writephy(tp, 0x18, 0x00a0);
3205         tg3_writephy(tp, 0x16, 0x41ff);
3206
3207         /* Assert and deassert POR. */
3208         tg3_writephy(tp, 0x13, 0x0400);
3209         udelay(40);
3210         tg3_writephy(tp, 0x13, 0x0000);
3211
3212         tg3_writephy(tp, 0x11, 0x0a50);
3213         udelay(40);
3214         tg3_writephy(tp, 0x11, 0x0a10);
3215
3216         /* Wait for signal to stabilize */
3217         /* XXX schedule_timeout() ... */
3218         for (i = 0; i < 15000; i++)
3219                 udelay(10);
3220
3221         /* Deselect the channel register so we can read the PHYID
3222          * later.
3223          */
3224         tg3_writephy(tp, 0x10, 0x8011);
3225 }
3226
3227 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3228 {
3229         u16 flowctrl;
3230         u32 sg_dig_ctrl, sg_dig_status;
3231         u32 serdes_cfg, expected_sg_dig_ctrl;
3232         int workaround, port_a;
3233         int current_link_up;
3234
3235         serdes_cfg = 0;
3236         expected_sg_dig_ctrl = 0;
3237         workaround = 0;
3238         port_a = 1;
3239         current_link_up = 0;
3240
3241         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3242             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3243                 workaround = 1;
3244                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3245                         port_a = 0;
3246
3247                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3248                 /* preserve bits 20-23 for voltage regulator */
3249                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3250         }
3251
3252         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3253
3254         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3255                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3256                         if (workaround) {
3257                                 u32 val = serdes_cfg;
3258
3259                                 if (port_a)
3260                                         val |= 0xc010000;
3261                                 else
3262                                         val |= 0x4010000;
3263                                 tw32_f(MAC_SERDES_CFG, val);
3264                         }
3265
3266                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3267                 }
3268                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3269                         tg3_setup_flow_control(tp, 0, 0);
3270                         current_link_up = 1;
3271                 }
3272                 goto out;
3273         }
3274
3275         /* Want auto-negotiation.  */
3276         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3277
3278         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3279         if (flowctrl & ADVERTISE_1000XPAUSE)
3280                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3281         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3282                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3283
3284         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3285                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3286                     tp->serdes_counter &&
3287                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3288                                     MAC_STATUS_RCVD_CFG)) ==
3289                      MAC_STATUS_PCS_SYNCED)) {
3290                         tp->serdes_counter--;
3291                         current_link_up = 1;
3292                         goto out;
3293                 }
3294 restart_autoneg:
3295                 if (workaround)
3296                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3297                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3298                 udelay(5);
3299                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3300
3301                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3302                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3303         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3304                                  MAC_STATUS_SIGNAL_DET)) {
3305                 sg_dig_status = tr32(SG_DIG_STATUS);
3306                 mac_status = tr32(MAC_STATUS);
3307
3308                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3309                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3310                         u32 local_adv = 0, remote_adv = 0;
3311
3312                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3313                                 local_adv |= ADVERTISE_1000XPAUSE;
3314                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3315                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3316
3317                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3318                                 remote_adv |= LPA_1000XPAUSE;
3319                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3320                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3321
3322                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3323                         current_link_up = 1;
3324                         tp->serdes_counter = 0;
3325                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3326                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3327                         if (tp->serdes_counter)
3328                                 tp->serdes_counter--;
3329                         else {
3330                                 if (workaround) {
3331                                         u32 val = serdes_cfg;
3332
3333                                         if (port_a)
3334                                                 val |= 0xc010000;
3335                                         else
3336                                                 val |= 0x4010000;
3337
3338                                         tw32_f(MAC_SERDES_CFG, val);
3339                                 }
3340
3341                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3342                                 udelay(40);
3343
3344                                 /* Link parallel detection - link is up */
3345                                 /* only if we have PCS_SYNC and not */
3346                                 /* receiving config code words */
3347                                 mac_status = tr32(MAC_STATUS);
3348                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3349                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3350                                         tg3_setup_flow_control(tp, 0, 0);
3351                                         current_link_up = 1;
3352                                         tp->tg3_flags2 |=
3353                                                 TG3_FLG2_PARALLEL_DETECT;
3354                                         tp->serdes_counter =
3355                                                 SERDES_PARALLEL_DET_TIMEOUT;
3356                                 } else
3357                                         goto restart_autoneg;
3358                         }
3359                 }
3360         } else {
3361                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3362                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3363         }
3364
3365 out:
3366         return current_link_up;
3367 }
3368
3369 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3370 {
3371         int current_link_up = 0;
3372
3373         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3374                 goto out;
3375
3376         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3377                 u32 txflags, rxflags;
3378                 int i;
3379
3380                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3381                         u32 local_adv = 0, remote_adv = 0;
3382
3383                         if (txflags & ANEG_CFG_PS1)
3384                                 local_adv |= ADVERTISE_1000XPAUSE;
3385                         if (txflags & ANEG_CFG_PS2)
3386                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3387
3388                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3389                                 remote_adv |= LPA_1000XPAUSE;
3390                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3391                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3392
3393                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3394
3395                         current_link_up = 1;
3396                 }
3397                 for (i = 0; i < 30; i++) {
3398                         udelay(20);
3399                         tw32_f(MAC_STATUS,
3400                                (MAC_STATUS_SYNC_CHANGED |
3401                                 MAC_STATUS_CFG_CHANGED));
3402                         udelay(40);
3403                         if ((tr32(MAC_STATUS) &
3404                              (MAC_STATUS_SYNC_CHANGED |
3405                               MAC_STATUS_CFG_CHANGED)) == 0)
3406                                 break;
3407                 }
3408
3409                 mac_status = tr32(MAC_STATUS);
3410                 if (current_link_up == 0 &&
3411                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3412                     !(mac_status & MAC_STATUS_RCVD_CFG))
3413                         current_link_up = 1;
3414         } else {
3415                 tg3_setup_flow_control(tp, 0, 0);
3416
3417                 /* Forcing 1000FD link up. */
3418                 current_link_up = 1;
3419
3420                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3421                 udelay(40);
3422
3423                 tw32_f(MAC_MODE, tp->mac_mode);
3424                 udelay(40);
3425         }
3426
3427 out:
3428         return current_link_up;
3429 }
3430
3431 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3432 {
3433         u32 orig_pause_cfg;
3434         u16 orig_active_speed;
3435         u8 orig_active_duplex;
3436         u32 mac_status;
3437         int current_link_up;
3438         int i;
3439
3440         orig_pause_cfg = tp->link_config.active_flowctrl;
3441         orig_active_speed = tp->link_config.active_speed;
3442         orig_active_duplex = tp->link_config.active_duplex;
3443
3444         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3445             netif_carrier_ok(tp->dev) &&
3446             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3447                 mac_status = tr32(MAC_STATUS);
3448                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3449                                MAC_STATUS_SIGNAL_DET |
3450                                MAC_STATUS_CFG_CHANGED |
3451                                MAC_STATUS_RCVD_CFG);
3452                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3453                                    MAC_STATUS_SIGNAL_DET)) {
3454                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3455                                             MAC_STATUS_CFG_CHANGED));
3456                         return 0;
3457                 }
3458         }
3459
3460         tw32_f(MAC_TX_AUTO_NEG, 0);
3461
3462         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3463         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3464         tw32_f(MAC_MODE, tp->mac_mode);
3465         udelay(40);
3466
3467         if (tp->phy_id == PHY_ID_BCM8002)
3468                 tg3_init_bcm8002(tp);
3469
3470         /* Enable link change event even when serdes polling.  */
3471         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3472         udelay(40);
3473
3474         current_link_up = 0;
3475         mac_status = tr32(MAC_STATUS);
3476
3477         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3478                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3479         else
3480                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3481
3482         tp->hw_status->status =
3483                 (SD_STATUS_UPDATED |
3484                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3485
3486         for (i = 0; i < 100; i++) {
3487                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3488                                     MAC_STATUS_CFG_CHANGED));
3489                 udelay(5);
3490                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3491                                          MAC_STATUS_CFG_CHANGED |
3492                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3493                         break;
3494         }
3495
3496         mac_status = tr32(MAC_STATUS);
3497         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3498                 current_link_up = 0;
3499                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3500                     tp->serdes_counter == 0) {
3501                         tw32_f(MAC_MODE, (tp->mac_mode |
3502                                           MAC_MODE_SEND_CONFIGS));
3503                         udelay(1);
3504                         tw32_f(MAC_MODE, tp->mac_mode);
3505                 }
3506         }
3507
3508         if (current_link_up == 1) {
3509                 tp->link_config.active_speed = SPEED_1000;
3510                 tp->link_config.active_duplex = DUPLEX_FULL;
3511                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3512                                     LED_CTRL_LNKLED_OVERRIDE |
3513                                     LED_CTRL_1000MBPS_ON));
3514         } else {
3515                 tp->link_config.active_speed = SPEED_INVALID;
3516                 tp->link_config.active_duplex = DUPLEX_INVALID;
3517                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3518                                     LED_CTRL_LNKLED_OVERRIDE |
3519                                     LED_CTRL_TRAFFIC_OVERRIDE));
3520         }
3521
3522         if (current_link_up != netif_carrier_ok(tp->dev)) {
3523                 if (current_link_up)
3524                         netif_carrier_on(tp->dev);
3525                 else
3526                         netif_carrier_off(tp->dev);
3527                 tg3_link_report(tp);
3528         } else {
3529                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3530                 if (orig_pause_cfg != now_pause_cfg ||
3531                     orig_active_speed != tp->link_config.active_speed ||
3532                     orig_active_duplex != tp->link_config.active_duplex)
3533                         tg3_link_report(tp);
3534         }
3535
3536         return 0;
3537 }
3538
3539 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3540 {
3541         int current_link_up, err = 0;
3542         u32 bmsr, bmcr;
3543         u16 current_speed;
3544         u8 current_duplex;
3545         u32 local_adv, remote_adv;
3546
3547         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3548         tw32_f(MAC_MODE, tp->mac_mode);
3549         udelay(40);
3550
3551         tw32(MAC_EVENT, 0);
3552
3553         tw32_f(MAC_STATUS,
3554              (MAC_STATUS_SYNC_CHANGED |
3555               MAC_STATUS_CFG_CHANGED |
3556               MAC_STATUS_MI_COMPLETION |
3557               MAC_STATUS_LNKSTATE_CHANGED));
3558         udelay(40);
3559
3560         if (force_reset)
3561                 tg3_phy_reset(tp);
3562
3563         current_link_up = 0;
3564         current_speed = SPEED_INVALID;
3565         current_duplex = DUPLEX_INVALID;
3566
3567         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3568         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3570                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3571                         bmsr |= BMSR_LSTATUS;
3572                 else
3573                         bmsr &= ~BMSR_LSTATUS;
3574         }
3575
3576         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3577
3578         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3579             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3580                 /* do nothing, just check for link up at the end */
3581         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3582                 u32 adv, new_adv;
3583
3584                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3585                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3586                                   ADVERTISE_1000XPAUSE |
3587                                   ADVERTISE_1000XPSE_ASYM |
3588                                   ADVERTISE_SLCT);
3589
3590                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3591
3592                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3593                         new_adv |= ADVERTISE_1000XHALF;
3594                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3595                         new_adv |= ADVERTISE_1000XFULL;
3596
3597                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3598                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3599                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3600                         tg3_writephy(tp, MII_BMCR, bmcr);
3601
3602                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3603                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3604                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3605
3606                         return err;
3607                 }
3608         } else {
3609                 u32 new_bmcr;
3610
3611                 bmcr &= ~BMCR_SPEED1000;
3612                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3613
3614                 if (tp->link_config.duplex == DUPLEX_FULL)
3615                         new_bmcr |= BMCR_FULLDPLX;
3616
3617                 if (new_bmcr != bmcr) {
3618                         /* BMCR_SPEED1000 is a reserved bit that needs
3619                          * to be set on write.
3620                          */
3621                         new_bmcr |= BMCR_SPEED1000;
3622
3623                         /* Force a linkdown */
3624                         if (netif_carrier_ok(tp->dev)) {
3625                                 u32 adv;
3626
3627                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3628                                 adv &= ~(ADVERTISE_1000XFULL |
3629                                          ADVERTISE_1000XHALF |
3630                                          ADVERTISE_SLCT);
3631                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3632                                 tg3_writephy(tp, MII_BMCR, bmcr |
3633                                                            BMCR_ANRESTART |
3634                                                            BMCR_ANENABLE);
3635                                 udelay(10);
3636                                 netif_carrier_off(tp->dev);
3637                         }
3638                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3639                         bmcr = new_bmcr;
3640                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3641                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3642                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3643                             ASIC_REV_5714) {
3644                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3645                                         bmsr |= BMSR_LSTATUS;
3646                                 else
3647                                         bmsr &= ~BMSR_LSTATUS;
3648                         }
3649                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3650                 }
3651         }
3652
3653         if (bmsr & BMSR_LSTATUS) {
3654                 current_speed = SPEED_1000;
3655                 current_link_up = 1;
3656                 if (bmcr & BMCR_FULLDPLX)
3657                         current_duplex = DUPLEX_FULL;
3658                 else
3659                         current_duplex = DUPLEX_HALF;
3660
3661                 local_adv = 0;
3662                 remote_adv = 0;
3663
3664                 if (bmcr & BMCR_ANENABLE) {
3665                         u32 common;
3666
3667                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3668                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3669                         common = local_adv & remote_adv;
3670                         if (common & (ADVERTISE_1000XHALF |
3671                                       ADVERTISE_1000XFULL)) {
3672                                 if (common & ADVERTISE_1000XFULL)
3673                                         current_duplex = DUPLEX_FULL;
3674                                 else
3675                                         current_duplex = DUPLEX_HALF;
3676                         }
3677                         else
3678                                 current_link_up = 0;
3679                 }
3680         }
3681
3682         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3683                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3684
3685         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3686         if (tp->link_config.active_duplex == DUPLEX_HALF)
3687                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3688
3689         tw32_f(MAC_MODE, tp->mac_mode);
3690         udelay(40);
3691
3692         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3693
3694         tp->link_config.active_speed = current_speed;
3695         tp->link_config.active_duplex = current_duplex;
3696
3697         if (current_link_up != netif_carrier_ok(tp->dev)) {
3698                 if (current_link_up)
3699                         netif_carrier_on(tp->dev);
3700                 else {
3701                         netif_carrier_off(tp->dev);
3702                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3703                 }
3704                 tg3_link_report(tp);
3705         }
3706         return err;
3707 }
3708
3709 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3710 {
3711         if (tp->serdes_counter) {
3712                 /* Give autoneg time to complete. */
3713                 tp->serdes_counter--;
3714                 return;
3715         }
3716         if (!netif_carrier_ok(tp->dev) &&
3717             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3718                 u32 bmcr;
3719
3720                 tg3_readphy(tp, MII_BMCR, &bmcr);
3721                 if (bmcr & BMCR_ANENABLE) {
3722                         u32 phy1, phy2;
3723
3724                         /* Select shadow register 0x1f */
3725                         tg3_writephy(tp, 0x1c, 0x7c00);
3726                         tg3_readphy(tp, 0x1c, &phy1);
3727
3728                         /* Select expansion interrupt status register */
3729                         tg3_writephy(tp, 0x17, 0x0f01);
3730                         tg3_readphy(tp, 0x15, &phy2);
3731                         tg3_readphy(tp, 0x15, &phy2);
3732
3733                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3734                                 /* We have signal detect and not receiving
3735                                  * config code words, link is up by parallel
3736                                  * detection.
3737                                  */
3738
3739                                 bmcr &= ~BMCR_ANENABLE;
3740                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3741                                 tg3_writephy(tp, MII_BMCR, bmcr);
3742                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3743                         }
3744                 }
3745         }
3746         else if (netif_carrier_ok(tp->dev) &&
3747                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3748                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3749                 u32 phy2;
3750
3751                 /* Select expansion interrupt status register */
3752                 tg3_writephy(tp, 0x17, 0x0f01);
3753                 tg3_readphy(tp, 0x15, &phy2);
3754                 if (phy2 & 0x20) {
3755                         u32 bmcr;
3756
3757                         /* Config code words received, turn on autoneg. */
3758                         tg3_readphy(tp, MII_BMCR, &bmcr);
3759                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3760
3761                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3762
3763                 }
3764         }
3765 }
3766
3767 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3768 {
3769         int err;
3770
3771         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3772                 err = tg3_setup_fiber_phy(tp, force_reset);
3773         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3774                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3775         } else {
3776                 err = tg3_setup_copper_phy(tp, force_reset);
3777         }
3778
3779         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3780             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3781                 u32 val, scale;
3782
3783                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3784                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3785                         scale = 65;
3786                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3787                         scale = 6;
3788                 else
3789                         scale = 12;
3790
3791                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3792                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3793                 tw32(GRC_MISC_CFG, val);
3794         }
3795
3796         if (tp->link_config.active_speed == SPEED_1000 &&
3797             tp->link_config.active_duplex == DUPLEX_HALF)
3798                 tw32(MAC_TX_LENGTHS,
3799                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3800                       (6 << TX_LENGTHS_IPG_SHIFT) |
3801                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3802         else
3803                 tw32(MAC_TX_LENGTHS,
3804                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3805                       (6 << TX_LENGTHS_IPG_SHIFT) |
3806                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3807
3808         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3809                 if (netif_carrier_ok(tp->dev)) {
3810                         tw32(HOSTCC_STAT_COAL_TICKS,
3811                              tp->coal.stats_block_coalesce_usecs);
3812                 } else {
3813                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3814                 }
3815         }
3816
3817         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3818                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3819                 if (!netif_carrier_ok(tp->dev))
3820                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3821                               tp->pwrmgmt_thresh;
3822                 else
3823                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3824                 tw32(PCIE_PWR_MGMT_THRESH, val);
3825         }
3826
3827         return err;
3828 }
3829
3830 /* This is called whenever we suspect that the system chipset is re-
3831  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3832  * is bogus tx completions. We try to recover by setting the
3833  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3834  * in the workqueue.
3835  */
3836 static void tg3_tx_recover(struct tg3 *tp)
3837 {
3838         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3839                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3840
3841         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3842                "mapped I/O cycles to the network device, attempting to "
3843                "recover. Please report the problem to the driver maintainer "
3844                "and include system chipset information.\n", tp->dev->name);
3845
3846         spin_lock(&tp->lock);
3847         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3848         spin_unlock(&tp->lock);
3849 }
3850
3851 static inline u32 tg3_tx_avail(struct tg3 *tp)
3852 {
3853         smp_mb();
3854         return (tp->tx_pending -
3855                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3856 }
3857
3858 /* Tigon3 never reports partial packet sends.  So we do not
3859  * need special logic to handle SKBs that have not had all
3860  * of their frags sent yet, like SunGEM does.
3861  */
3862 static void tg3_tx(struct tg3 *tp)
3863 {
3864         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3865         u32 sw_idx = tp->tx_cons;
3866
3867         while (sw_idx != hw_idx) {
3868                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3869                 struct sk_buff *skb = ri->skb;
3870                 int i, tx_bug = 0;
3871
3872                 if (unlikely(skb == NULL)) {
3873                         tg3_tx_recover(tp);
3874                         return;
3875                 }
3876
3877                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3878
3879                 ri->skb = NULL;
3880
3881                 sw_idx = NEXT_TX(sw_idx);
3882
3883                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3884                         ri = &tp->tx_buffers[sw_idx];
3885                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3886                                 tx_bug = 1;
3887                         sw_idx = NEXT_TX(sw_idx);
3888                 }
3889
3890                 dev_kfree_skb(skb);
3891
3892                 if (unlikely(tx_bug)) {
3893                         tg3_tx_recover(tp);
3894                         return;
3895                 }
3896         }
3897
3898         tp->tx_cons = sw_idx;
3899
3900         /* Need to make the tx_cons update visible to tg3_start_xmit()
3901          * before checking for netif_queue_stopped().  Without the
3902          * memory barrier, there is a small possibility that tg3_start_xmit()
3903          * will miss it and cause the queue to be stopped forever.
3904          */
3905         smp_mb();
3906
3907         if (unlikely(netif_queue_stopped(tp->dev) &&
3908                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3909                 netif_tx_lock(tp->dev);
3910                 if (netif_queue_stopped(tp->dev) &&
3911                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3912                         netif_wake_queue(tp->dev);
3913                 netif_tx_unlock(tp->dev);
3914         }
3915 }
3916
3917 /* Returns size of skb allocated or < 0 on error.
3918  *
3919  * We only need to fill in the address because the other members
3920  * of the RX descriptor are invariant, see tg3_init_rings.
3921  *
3922  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3923  * posting buffers we only dirty the first cache line of the RX
3924  * descriptor (containing the address).  Whereas for the RX status
3925  * buffers the cpu only reads the last cacheline of the RX descriptor
3926  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3927  */
3928 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3929                             int src_idx, u32 dest_idx_unmasked)
3930 {
3931         struct tg3_rx_buffer_desc *desc;
3932         struct ring_info *map, *src_map;
3933         struct sk_buff *skb;
3934         dma_addr_t mapping;
3935         int skb_size, dest_idx;
3936
3937         src_map = NULL;
3938         switch (opaque_key) {
3939         case RXD_OPAQUE_RING_STD:
3940                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3941                 desc = &tp->rx_std[dest_idx];
3942                 map = &tp->rx_std_buffers[dest_idx];
3943                 if (src_idx >= 0)
3944                         src_map = &tp->rx_std_buffers[src_idx];
3945                 skb_size = tp->rx_pkt_buf_sz;
3946                 break;
3947
3948         case RXD_OPAQUE_RING_JUMBO:
3949                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3950                 desc = &tp->rx_jumbo[dest_idx];
3951                 map = &tp->rx_jumbo_buffers[dest_idx];
3952                 if (src_idx >= 0)
3953                         src_map = &tp->rx_jumbo_buffers[src_idx];
3954                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3955                 break;
3956
3957         default:
3958                 return -EINVAL;
3959         }
3960
3961         /* Do not overwrite any of the map or rp information
3962          * until we are sure we can commit to a new buffer.
3963          *
3964          * Callers depend upon this behavior and assume that
3965          * we leave everything unchanged if we fail.
3966          */
3967         skb = netdev_alloc_skb(tp->dev, skb_size);
3968         if (skb == NULL)
3969                 return -ENOMEM;
3970
3971         skb_reserve(skb, tp->rx_offset);
3972
3973         mapping = pci_map_single(tp->pdev, skb->data,
3974                                  skb_size - tp->rx_offset,
3975                                  PCI_DMA_FROMDEVICE);
3976
3977         map->skb = skb;
3978         pci_unmap_addr_set(map, mapping, mapping);
3979
3980         if (src_map != NULL)
3981                 src_map->skb = NULL;
3982
3983         desc->addr_hi = ((u64)mapping >> 32);
3984         desc->addr_lo = ((u64)mapping & 0xffffffff);
3985
3986         return skb_size;
3987 }
3988
3989 /* We only need to move over in the address because the other
3990  * members of the RX descriptor are invariant.  See notes above
3991  * tg3_alloc_rx_skb for full details.
3992  */
3993 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3994                            int src_idx, u32 dest_idx_unmasked)
3995 {
3996         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3997         struct ring_info *src_map, *dest_map;
3998         int dest_idx;
3999
4000         switch (opaque_key) {
4001         case RXD_OPAQUE_RING_STD:
4002                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4003                 dest_desc = &tp->rx_std[dest_idx];
4004                 dest_map = &tp->rx_std_buffers[dest_idx];
4005                 src_desc = &tp->rx_std[src_idx];
4006                 src_map = &tp->rx_std_buffers[src_idx];
4007                 break;
4008
4009         case RXD_OPAQUE_RING_JUMBO:
4010                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4011                 dest_desc = &tp->rx_jumbo[dest_idx];
4012                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4013                 src_desc = &tp->rx_jumbo[src_idx];
4014                 src_map = &tp->rx_jumbo_buffers[src_idx];
4015                 break;
4016
4017         default:
4018                 return;
4019         }
4020
4021         dest_map->skb = src_map->skb;
4022         pci_unmap_addr_set(dest_map, mapping,
4023                            pci_unmap_addr(src_map, mapping));
4024         dest_desc->addr_hi = src_desc->addr_hi;
4025         dest_desc->addr_lo = src_desc->addr_lo;
4026
4027         src_map->skb = NULL;
4028 }
4029
4030 #if TG3_VLAN_TAG_USED
4031 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4032 {
4033         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4034 }
4035 #endif
4036
4037 /* The RX ring scheme is composed of multiple rings which post fresh
4038  * buffers to the chip, and one special ring the chip uses to report
4039  * status back to the host.
4040  *
4041  * The special ring reports the status of received packets to the
4042  * host.  The chip does not write into the original descriptor the
4043  * RX buffer was obtained from.  The chip simply takes the original
4044  * descriptor as provided by the host, updates the status and length
4045  * field, then writes this into the next status ring entry.
4046  *
4047  * Each ring the host uses to post buffers to the chip is described
4048  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4049  * it is first placed into the on-chip ram.  When the packet's length
4050  * is known, it walks down the TG3_BDINFO entries to select the ring.
4051  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4052  * which is within the range of the new packet's length is chosen.
4053  *
4054  * The "separate ring for rx status" scheme may sound queer, but it makes
4055  * sense from a cache coherency perspective.  If only the host writes
4056  * to the buffer post rings, and only the chip writes to the rx status
4057  * rings, then cache lines never move beyond shared-modified state.
4058  * If both the host and chip were to write into the same ring, cache line
4059  * eviction could occur since both entities want it in an exclusive state.
4060  */
4061 static int tg3_rx(struct tg3 *tp, int budget)
4062 {
4063         u32 work_mask, rx_std_posted = 0;
4064         u32 sw_idx = tp->rx_rcb_ptr;
4065         u16 hw_idx;
4066         int received;
4067
4068         hw_idx = tp->hw_status->idx[0].rx_producer;
4069         /*
4070          * We need to order the read of hw_idx and the read of
4071          * the opaque cookie.
4072          */
4073         rmb();
4074         work_mask = 0;
4075         received = 0;
4076         while (sw_idx != hw_idx && budget > 0) {
4077                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4078                 unsigned int len;
4079                 struct sk_buff *skb;
4080                 dma_addr_t dma_addr;
4081                 u32 opaque_key, desc_idx, *post_ptr;
4082
4083                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4084                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4085                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4086                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4087                                                   mapping);
4088                         skb = tp->rx_std_buffers[desc_idx].skb;
4089                         post_ptr = &tp->rx_std_ptr;
4090                         rx_std_posted++;
4091                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4092                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4093                                                   mapping);
4094                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4095                         post_ptr = &tp->rx_jumbo_ptr;
4096                 }
4097                 else {
4098                         goto next_pkt_nopost;
4099                 }
4100
4101                 work_mask |= opaque_key;
4102
4103                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4104                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4105                 drop_it:
4106                         tg3_recycle_rx(tp, opaque_key,
4107                                        desc_idx, *post_ptr);
4108                 drop_it_no_recycle:
4109                         /* Other statistics kept track of by card. */
4110                         tp->net_stats.rx_dropped++;
4111                         goto next_pkt;
4112                 }
4113
4114                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4115
4116                 if (len > RX_COPY_THRESHOLD
4117                         && tp->rx_offset == 2
4118                         /* rx_offset != 2 iff this is a 5701 card running
4119                          * in PCI-X mode [see tg3_get_invariants()] */
4120                 ) {
4121                         int skb_size;
4122
4123                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4124                                                     desc_idx, *post_ptr);
4125                         if (skb_size < 0)
4126                                 goto drop_it;
4127
4128                         pci_unmap_single(tp->pdev, dma_addr,
4129                                          skb_size - tp->rx_offset,
4130                                          PCI_DMA_FROMDEVICE);
4131
4132                         skb_put(skb, len);
4133                 } else {
4134                         struct sk_buff *copy_skb;
4135
4136                         tg3_recycle_rx(tp, opaque_key,
4137                                        desc_idx, *post_ptr);
4138
4139                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4140                         if (copy_skb == NULL)
4141                                 goto drop_it_no_recycle;
4142
4143                         skb_reserve(copy_skb, 2);
4144                         skb_put(copy_skb, len);
4145                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4146                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4147                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4148
4149                         /* We'll reuse the original ring buffer. */
4150                         skb = copy_skb;
4151                 }
4152
4153                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4154                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4155                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4156                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4157                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4158                 else
4159                         skb->ip_summed = CHECKSUM_NONE;
4160
4161                 skb->protocol = eth_type_trans(skb, tp->dev);
4162 #if TG3_VLAN_TAG_USED
4163                 if (tp->vlgrp != NULL &&
4164                     desc->type_flags & RXD_FLAG_VLAN) {
4165                         tg3_vlan_rx(tp, skb,
4166                                     desc->err_vlan & RXD_VLAN_MASK);
4167                 } else
4168 #endif
4169                         netif_receive_skb(skb);
4170
4171                 tp->dev->last_rx = jiffies;
4172                 received++;
4173                 budget--;
4174
4175 next_pkt:
4176                 (*post_ptr)++;
4177
4178                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4179                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4180
4181                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4182                                      TG3_64BIT_REG_LOW, idx);
4183                         work_mask &= ~RXD_OPAQUE_RING_STD;
4184                         rx_std_posted = 0;
4185                 }
4186 next_pkt_nopost:
4187                 sw_idx++;
4188                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4189
4190                 /* Refresh hw_idx to see if there is new work */
4191                 if (sw_idx == hw_idx) {
4192                         hw_idx = tp->hw_status->idx[0].rx_producer;
4193                         rmb();
4194                 }
4195         }
4196
4197         /* ACK the status ring. */
4198         tp->rx_rcb_ptr = sw_idx;
4199         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4200
4201         /* Refill RX ring(s). */
4202         if (work_mask & RXD_OPAQUE_RING_STD) {
4203                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4204                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4205                              sw_idx);
4206         }
4207         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4208                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4209                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4210                              sw_idx);
4211         }
4212         mmiowb();
4213
4214         return received;
4215 }
4216
4217 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4218 {
4219         struct tg3_hw_status *sblk = tp->hw_status;
4220
4221         /* handle link change and other phy events */
4222         if (!(tp->tg3_flags &
4223               (TG3_FLAG_USE_LINKCHG_REG |
4224                TG3_FLAG_POLL_SERDES))) {
4225                 if (sblk->status & SD_STATUS_LINK_CHG) {
4226                         sblk->status = SD_STATUS_UPDATED |
4227                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4228                         spin_lock(&tp->lock);
4229                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4230                                 tw32_f(MAC_STATUS,
4231                                      (MAC_STATUS_SYNC_CHANGED |
4232                                       MAC_STATUS_CFG_CHANGED |
4233                                       MAC_STATUS_MI_COMPLETION |
4234                                       MAC_STATUS_LNKSTATE_CHANGED));
4235                                 udelay(40);
4236                         } else
4237                                 tg3_setup_phy(tp, 0);
4238                         spin_unlock(&tp->lock);
4239                 }
4240         }
4241
4242         /* run TX completion thread */
4243         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4244                 tg3_tx(tp);
4245                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4246                         return work_done;
4247         }
4248
4249         /* run RX thread, within the bounds set by NAPI.
4250          * All RX "locking" is done by ensuring outside
4251          * code synchronizes with tg3->napi.poll()
4252          */
4253         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4254                 work_done += tg3_rx(tp, budget - work_done);
4255
4256         return work_done;
4257 }
4258
4259 static int tg3_poll(struct napi_struct *napi, int budget)
4260 {
4261         struct tg3 *tp = container_of(napi, struct tg3, napi);
4262         int work_done = 0;
4263         struct tg3_hw_status *sblk = tp->hw_status;
4264
4265         while (1) {
4266                 work_done = tg3_poll_work(tp, work_done, budget);
4267
4268                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4269                         goto tx_recovery;
4270
4271                 if (unlikely(work_done >= budget))
4272                         break;
4273
4274                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4275                         /* tp->last_tag is used in tg3_restart_ints() below
4276                          * to tell the hw how much work has been processed,
4277                          * so we must read it before checking for more work.
4278                          */
4279                         tp->last_tag = sblk->status_tag;
4280                         rmb();
4281                 } else
4282                         sblk->status &= ~SD_STATUS_UPDATED;
4283
4284                 if (likely(!tg3_has_work(tp))) {
4285                         netif_rx_complete(tp->dev, napi);
4286                         tg3_restart_ints(tp);
4287                         break;
4288                 }
4289         }
4290
4291         return work_done;
4292
4293 tx_recovery:
4294         /* work_done is guaranteed to be less than budget. */
4295         netif_rx_complete(tp->dev, napi);
4296         schedule_work(&tp->reset_task);
4297         return work_done;
4298 }
4299
4300 static void tg3_irq_quiesce(struct tg3 *tp)
4301 {
4302         BUG_ON(tp->irq_sync);
4303
4304         tp->irq_sync = 1;
4305         smp_mb();
4306
4307         synchronize_irq(tp->pdev->irq);
4308 }
4309
4310 static inline int tg3_irq_sync(struct tg3 *tp)
4311 {
4312         return tp->irq_sync;
4313 }
4314
4315 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4316  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4317  * with as well.  Most of the time, this is not necessary except when
4318  * shutting down the device.
4319  */
4320 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4321 {
4322         spin_lock_bh(&tp->lock);
4323         if (irq_sync)
4324                 tg3_irq_quiesce(tp);
4325 }
4326
4327 static inline void tg3_full_unlock(struct tg3 *tp)
4328 {
4329         spin_unlock_bh(&tp->lock);
4330 }
4331
4332 /* One-shot MSI handler - Chip automatically disables interrupt
4333  * after sending MSI so driver doesn't have to do it.
4334  */
4335 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4336 {
4337         struct net_device *dev = dev_id;
4338         struct tg3 *tp = netdev_priv(dev);
4339
4340         prefetch(tp->hw_status);
4341         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4342
4343         if (likely(!tg3_irq_sync(tp)))
4344                 netif_rx_schedule(dev, &tp->napi);
4345
4346         return IRQ_HANDLED;
4347 }
4348
4349 /* MSI ISR - No need to check for interrupt sharing and no need to
4350  * flush status block and interrupt mailbox. PCI ordering rules
4351  * guarantee that MSI will arrive after the status block.
4352  */
4353 static irqreturn_t tg3_msi(int irq, void *dev_id)
4354 {
4355         struct net_device *dev = dev_id;
4356         struct tg3 *tp = netdev_priv(dev);
4357
4358         prefetch(tp->hw_status);
4359         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4360         /*
4361          * Writing any value to intr-mbox-0 clears PCI INTA# and
4362          * chip-internal interrupt pending events.
4363          * Writing non-zero to intr-mbox-0 additional tells the
4364          * NIC to stop sending us irqs, engaging "in-intr-handler"
4365          * event coalescing.
4366          */
4367         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4368         if (likely(!tg3_irq_sync(tp)))
4369                 netif_rx_schedule(dev, &tp->napi);
4370
4371         return IRQ_RETVAL(1);
4372 }
4373
4374 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4375 {
4376         struct net_device *dev = dev_id;
4377         struct tg3 *tp = netdev_priv(dev);
4378         struct tg3_hw_status *sblk = tp->hw_status;
4379         unsigned int handled = 1;
4380
4381         /* In INTx mode, it is possible for the interrupt to arrive at
4382          * the CPU before the status block posted prior to the interrupt.
4383          * Reading the PCI State register will confirm whether the
4384          * interrupt is ours and will flush the status block.
4385          */
4386         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4387                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4388                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4389                         handled = 0;
4390                         goto out;
4391                 }
4392         }
4393
4394         /*
4395          * Writing any value to intr-mbox-0 clears PCI INTA# and
4396          * chip-internal interrupt pending events.
4397          * Writing non-zero to intr-mbox-0 additional tells the
4398          * NIC to stop sending us irqs, engaging "in-intr-handler"
4399          * event coalescing.
4400          *
4401          * Flush the mailbox to de-assert the IRQ immediately to prevent
4402          * spurious interrupts.  The flush impacts performance but
4403          * excessive spurious interrupts can be worse in some cases.
4404          */
4405         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4406         if (tg3_irq_sync(tp))
4407                 goto out;
4408         sblk->status &= ~SD_STATUS_UPDATED;
4409         if (likely(tg3_has_work(tp))) {
4410                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4411                 netif_rx_schedule(dev, &tp->napi);
4412         } else {
4413                 /* No work, shared interrupt perhaps?  re-enable
4414                  * interrupts, and flush that PCI write
4415                  */
4416                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4417                                0x00000000);
4418         }
4419 out:
4420         return IRQ_RETVAL(handled);
4421 }
4422
4423 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4424 {
4425         struct net_device *dev = dev_id;
4426         struct tg3 *tp = netdev_priv(dev);
4427         struct tg3_hw_status *sblk = tp->hw_status;
4428         unsigned int handled = 1;
4429
4430         /* In INTx mode, it is possible for the interrupt to arrive at
4431          * the CPU before the status block posted prior to the interrupt.
4432          * Reading the PCI State register will confirm whether the
4433          * interrupt is ours and will flush the status block.
4434          */
4435         if (unlikely(sblk->status_tag == tp->last_tag)) {
4436                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4437                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4438                         handled = 0;
4439                         goto out;
4440                 }
4441         }
4442
4443         /*
4444          * writing any value to intr-mbox-0 clears PCI INTA# and
4445          * chip-internal interrupt pending events.
4446          * writing non-zero to intr-mbox-0 additional tells the
4447          * NIC to stop sending us irqs, engaging "in-intr-handler"
4448          * event coalescing.
4449          *
4450          * Flush the mailbox to de-assert the IRQ immediately to prevent
4451          * spurious interrupts.  The flush impacts performance but
4452          * excessive spurious interrupts can be worse in some cases.
4453          */
4454         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4455         if (tg3_irq_sync(tp))
4456                 goto out;
4457         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4458                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4459                 /* Update last_tag to mark that this status has been
4460                  * seen. Because interrupt may be shared, we may be
4461                  * racing with tg3_poll(), so only update last_tag
4462                  * if tg3_poll() is not scheduled.
4463                  */
4464                 tp->last_tag = sblk->status_tag;
4465                 __netif_rx_schedule(dev, &tp->napi);
4466         }
4467 out:
4468         return IRQ_RETVAL(handled);
4469 }
4470
4471 /* ISR for interrupt test */
4472 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4473 {
4474         struct net_device *dev = dev_id;
4475         struct tg3 *tp = netdev_priv(dev);
4476         struct tg3_hw_status *sblk = tp->hw_status;
4477
4478         if ((sblk->status & SD_STATUS_UPDATED) ||
4479             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4480                 tg3_disable_ints(tp);
4481                 return IRQ_RETVAL(1);
4482         }
4483         return IRQ_RETVAL(0);
4484 }
4485
4486 static int tg3_init_hw(struct tg3 *, int);
4487 static int tg3_halt(struct tg3 *, int, int);
4488
4489 /* Restart hardware after configuration changes, self-test, etc.
4490  * Invoked with tp->lock held.
4491  */
4492 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4493         __releases(tp->lock)
4494         __acquires(tp->lock)
4495 {
4496         int err;
4497
4498         err = tg3_init_hw(tp, reset_phy);
4499         if (err) {
4500                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4501                        "aborting.\n", tp->dev->name);
4502                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4503                 tg3_full_unlock(tp);
4504                 del_timer_sync(&tp->timer);
4505                 tp->irq_sync = 0;
4506                 napi_enable(&tp->napi);
4507                 dev_close(tp->dev);
4508                 tg3_full_lock(tp, 0);
4509         }
4510         return err;
4511 }
4512
4513 #ifdef CONFIG_NET_POLL_CONTROLLER
4514 static void tg3_poll_controller(struct net_device *dev)
4515 {
4516         struct tg3 *tp = netdev_priv(dev);
4517
4518         tg3_interrupt(tp->pdev->irq, dev);
4519 }
4520 #endif
4521
4522 static void tg3_reset_task(struct work_struct *work)
4523 {
4524         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4525         int err;
4526         unsigned int restart_timer;
4527
4528         tg3_full_lock(tp, 0);
4529
4530         if (!netif_running(tp->dev)) {
4531                 tg3_full_unlock(tp);
4532                 return;
4533         }
4534
4535         tg3_full_unlock(tp);
4536
4537         tg3_phy_stop(tp);
4538
4539         tg3_netif_stop(tp);
4540
4541         tg3_full_lock(tp, 1);
4542
4543         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4544         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4545
4546         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4547                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4548                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4549                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4550                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4551         }
4552
4553         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4554         err = tg3_init_hw(tp, 1);
4555         if (err)
4556                 goto out;
4557
4558         tg3_netif_start(tp);
4559
4560         if (restart_timer)
4561                 mod_timer(&tp->timer, jiffies + 1);
4562
4563 out:
4564         tg3_full_unlock(tp);
4565
4566         if (!err)
4567                 tg3_phy_start(tp);
4568 }
4569
4570 static void tg3_dump_short_state(struct tg3 *tp)
4571 {
4572         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4573                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4574         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4575                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4576 }
4577
4578 static void tg3_tx_timeout(struct net_device *dev)
4579 {
4580         struct tg3 *tp = netdev_priv(dev);
4581
4582         if (netif_msg_tx_err(tp)) {
4583                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4584                        dev->name);
4585                 tg3_dump_short_state(tp);
4586         }
4587
4588         schedule_work(&tp->reset_task);
4589 }
4590
4591 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4592 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4593 {
4594         u32 base = (u32) mapping & 0xffffffff;
4595
4596         return ((base > 0xffffdcc0) &&
4597                 (base + len + 8 < base));
4598 }
4599
4600 /* Test for DMA addresses > 40-bit */
4601 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4602                                           int len)
4603 {
4604 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4605         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4606                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4607         return 0;
4608 #else
4609         return 0;
4610 #endif
4611 }
4612
4613 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4614
4615 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4616 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4617                                        u32 last_plus_one, u32 *start,
4618                                        u32 base_flags, u32 mss)
4619 {
4620         struct sk_buff *new_skb;
4621         dma_addr_t new_addr = 0;
4622         u32 entry = *start;
4623         int i, ret = 0;
4624
4625         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4626                 new_skb = skb_copy(skb, GFP_ATOMIC);
4627         else {
4628                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4629
4630                 new_skb = skb_copy_expand(skb,
4631                                           skb_headroom(skb) + more_headroom,
4632                                           skb_tailroom(skb), GFP_ATOMIC);
4633         }
4634
4635         if (!new_skb) {
4636                 ret = -1;
4637         } else {
4638                 /* New SKB is guaranteed to be linear. */
4639                 entry = *start;
4640                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4641                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4642
4643                 /* Make sure new skb does not cross any 4G boundaries.
4644                  * Drop the packet if it does.
4645                  */
4646                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4647                         if (!ret)
4648                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4649                                               DMA_TO_DEVICE);
4650                         ret = -1;
4651                         dev_kfree_skb(new_skb);
4652                         new_skb = NULL;
4653                 } else {
4654                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4655                                     base_flags, 1 | (mss << 1));
4656                         *start = NEXT_TX(entry);
4657                 }
4658         }
4659
4660         /* Now clean up the sw ring entries. */
4661         i = 0;
4662         while (entry != last_plus_one) {
4663                 if (i == 0) {
4664                         tp->tx_buffers[entry].skb = new_skb;
4665                 } else {
4666                         tp->tx_buffers[entry].skb = NULL;
4667                 }
4668                 entry = NEXT_TX(entry);
4669                 i++;
4670         }
4671
4672         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4673         dev_kfree_skb(skb);
4674
4675         return ret;
4676 }
4677
4678 static void tg3_set_txd(struct tg3 *tp, int entry,
4679                         dma_addr_t mapping, int len, u32 flags,
4680                         u32 mss_and_is_end)
4681 {
4682         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4683         int is_end = (mss_and_is_end & 0x1);
4684         u32 mss = (mss_and_is_end >> 1);
4685         u32 vlan_tag = 0;
4686
4687         if (is_end)
4688                 flags |= TXD_FLAG_END;
4689         if (flags & TXD_FLAG_VLAN) {
4690                 vlan_tag = flags >> 16;
4691                 flags &= 0xffff;
4692         }
4693         vlan_tag |= (mss << TXD_MSS_SHIFT);
4694
4695         txd->addr_hi = ((u64) mapping >> 32);
4696         txd->addr_lo = ((u64) mapping & 0xffffffff);
4697         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4698         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4699 }
4700
4701 /* hard_start_xmit for devices that don't have any bugs and
4702  * support TG3_FLG2_HW_TSO_2 only.
4703  */
4704 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4705 {
4706         struct tg3 *tp = netdev_priv(dev);
4707         u32 len, entry, base_flags, mss;
4708         struct skb_shared_info *sp;
4709         dma_addr_t mapping;
4710
4711         len = skb_headlen(skb);
4712
4713         /* We are running in BH disabled context with netif_tx_lock
4714          * and TX reclaim runs via tp->napi.poll inside of a software
4715          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4716          * no IRQ context deadlocks to worry about either.  Rejoice!
4717          */
4718         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4719                 if (!netif_queue_stopped(dev)) {
4720                         netif_stop_queue(dev);
4721
4722                         /* This is a hard error, log it. */
4723                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4724                                "queue awake!\n", dev->name);
4725                 }
4726                 return NETDEV_TX_BUSY;
4727         }
4728
4729         entry = tp->tx_prod;
4730         base_flags = 0;
4731         mss = 0;
4732         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4733                 int tcp_opt_len, ip_tcp_len;
4734
4735                 if (skb_header_cloned(skb) &&
4736                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4737                         dev_kfree_skb(skb);
4738                         goto out_unlock;
4739                 }
4740
4741                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4742                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4743                 else {
4744                         struct iphdr *iph = ip_hdr(skb);
4745
4746                         tcp_opt_len = tcp_optlen(skb);
4747                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4748
4749                         iph->check = 0;
4750                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4751                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4752                 }
4753
4754                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4755                                TXD_FLAG_CPU_POST_DMA);
4756
4757                 tcp_hdr(skb)->check = 0;
4758
4759         }
4760         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4761                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4762 #if TG3_VLAN_TAG_USED
4763         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4764                 base_flags |= (TXD_FLAG_VLAN |
4765                                (vlan_tx_tag_get(skb) << 16));
4766 #endif
4767
4768         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4769                 dev_kfree_skb(skb);
4770                 goto out_unlock;
4771         }
4772
4773         sp = skb_shinfo(skb);
4774
4775         mapping = sp->dma_maps[0];
4776
4777         tp->tx_buffers[entry].skb = skb;
4778
4779         tg3_set_txd(tp, entry, mapping, len, base_flags,
4780                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4781
4782         entry = NEXT_TX(entry);
4783
4784         /* Now loop through additional data fragments, and queue them. */
4785         if (skb_shinfo(skb)->nr_frags > 0) {
4786                 unsigned int i, last;
4787
4788                 last = skb_shinfo(skb)->nr_frags - 1;
4789                 for (i = 0; i <= last; i++) {
4790                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4791
4792                         len = frag->size;
4793                         mapping = sp->dma_maps[i + 1];
4794                         tp->tx_buffers[entry].skb = NULL;
4795
4796                         tg3_set_txd(tp, entry, mapping, len,
4797                                     base_flags, (i == last) | (mss << 1));
4798
4799                         entry = NEXT_TX(entry);
4800                 }
4801         }
4802
4803         /* Packets are ready, update Tx producer idx local and on card. */
4804         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4805
4806         tp->tx_prod = entry;
4807         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4808                 netif_stop_queue(dev);
4809                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4810                         netif_wake_queue(tp->dev);
4811         }
4812
4813 out_unlock:
4814         mmiowb();
4815
4816         dev->trans_start = jiffies;
4817
4818         return NETDEV_TX_OK;
4819 }
4820
4821 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4822
4823 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4824  * TSO header is greater than 80 bytes.
4825  */
4826 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4827 {
4828         struct sk_buff *segs, *nskb;
4829
4830         /* Estimate the number of fragments in the worst case */
4831         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4832                 netif_stop_queue(tp->dev);
4833                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4834                         return NETDEV_TX_BUSY;
4835
4836                 netif_wake_queue(tp->dev);
4837         }
4838
4839         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4840         if (IS_ERR(segs))
4841                 goto tg3_tso_bug_end;
4842
4843         do {
4844                 nskb = segs;
4845                 segs = segs->next;
4846                 nskb->next = NULL;
4847                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4848         } while (segs);
4849
4850 tg3_tso_bug_end:
4851         dev_kfree_skb(skb);
4852
4853         return NETDEV_TX_OK;
4854 }
4855
4856 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4857  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4858  */
4859 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4860 {
4861         struct tg3 *tp = netdev_priv(dev);
4862         u32 len, entry, base_flags, mss;
4863         struct skb_shared_info *sp;
4864         int would_hit_hwbug;
4865         dma_addr_t mapping;
4866
4867         len = skb_headlen(skb);
4868
4869         /* We are running in BH disabled context with netif_tx_lock
4870          * and TX reclaim runs via tp->napi.poll inside of a software
4871          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4872          * no IRQ context deadlocks to worry about either.  Rejoice!
4873          */
4874         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4875                 if (!netif_queue_stopped(dev)) {
4876                         netif_stop_queue(dev);
4877
4878                         /* This is a hard error, log it. */
4879                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4880                                "queue awake!\n", dev->name);
4881                 }
4882                 return NETDEV_TX_BUSY;
4883         }
4884
4885         entry = tp->tx_prod;
4886         base_flags = 0;
4887         if (skb->ip_summed == CHECKSUM_PARTIAL)
4888                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4889         mss = 0;
4890         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4891                 struct iphdr *iph;
4892                 int tcp_opt_len, ip_tcp_len, hdr_len;
4893
4894                 if (skb_header_cloned(skb) &&
4895                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4896                         dev_kfree_skb(skb);
4897                         goto out_unlock;
4898                 }
4899
4900                 tcp_opt_len = tcp_optlen(skb);
4901                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4902
4903                 hdr_len = ip_tcp_len + tcp_opt_len;
4904                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4905                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4906                         return (tg3_tso_bug(tp, skb));
4907
4908                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4909                                TXD_FLAG_CPU_POST_DMA);
4910
4911                 iph = ip_hdr(skb);
4912                 iph->check = 0;
4913                 iph->tot_len = htons(mss + hdr_len);
4914                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4915                         tcp_hdr(skb)->check = 0;
4916                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4917                 } else
4918                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4919                                                                  iph->daddr, 0,
4920                                                                  IPPROTO_TCP,
4921                                                                  0);
4922
4923                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4924                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4925                         if (tcp_opt_len || iph->ihl > 5) {
4926                                 int tsflags;
4927
4928                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4929                                 mss |= (tsflags << 11);
4930                         }
4931                 } else {
4932                         if (tcp_opt_len || iph->ihl > 5) {
4933                                 int tsflags;
4934
4935                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4936                                 base_flags |= tsflags << 12;
4937                         }
4938                 }
4939         }
4940 #if TG3_VLAN_TAG_USED
4941         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4942                 base_flags |= (TXD_FLAG_VLAN |
4943                                (vlan_tx_tag_get(skb) << 16));
4944 #endif
4945
4946         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4947                 dev_kfree_skb(skb);
4948                 goto out_unlock;
4949         }
4950
4951         sp = skb_shinfo(skb);
4952
4953         mapping = sp->dma_maps[0];
4954
4955         tp->tx_buffers[entry].skb = skb;
4956
4957         would_hit_hwbug = 0;
4958
4959         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4960                 would_hit_hwbug = 1;
4961         else if (tg3_4g_overflow_test(mapping, len))
4962                 would_hit_hwbug = 1;
4963
4964         tg3_set_txd(tp, entry, mapping, len, base_flags,
4965                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4966
4967         entry = NEXT_TX(entry);
4968
4969         /* Now loop through additional data fragments, and queue them. */
4970         if (skb_shinfo(skb)->nr_frags > 0) {
4971                 unsigned int i, last;
4972
4973                 last = skb_shinfo(skb)->nr_frags - 1;
4974                 for (i = 0; i <= last; i++) {
4975                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4976
4977                         len = frag->size;
4978                         mapping = sp->dma_maps[i + 1];
4979
4980                         tp->tx_buffers[entry].skb = NULL;
4981
4982                         if (tg3_4g_overflow_test(mapping, len))
4983                                 would_hit_hwbug = 1;
4984
4985                         if (tg3_40bit_overflow_test(tp, mapping, len))
4986                                 would_hit_hwbug = 1;
4987
4988                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4989                                 tg3_set_txd(tp, entry, mapping, len,
4990                                             base_flags, (i == last)|(mss << 1));
4991                         else
4992                                 tg3_set_txd(tp, entry, mapping, len,
4993                                             base_flags, (i == last));
4994
4995                         entry = NEXT_TX(entry);
4996                 }
4997         }
4998
4999         if (would_hit_hwbug) {
5000                 u32 last_plus_one = entry;
5001                 u32 start;
5002
5003                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5004                 start &= (TG3_TX_RING_SIZE - 1);
5005
5006                 /* If the workaround fails due to memory/mapping
5007                  * failure, silently drop this packet.
5008                  */
5009                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5010                                                 &start, base_flags, mss))
5011                         goto out_unlock;
5012
5013                 entry = start;
5014         }
5015
5016         /* Packets are ready, update Tx producer idx local and on card. */
5017         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5018
5019         tp->tx_prod = entry;
5020         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5021                 netif_stop_queue(dev);
5022                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5023                         netif_wake_queue(tp->dev);
5024         }
5025
5026 out_unlock:
5027         mmiowb();
5028
5029         dev->trans_start = jiffies;
5030
5031         return NETDEV_TX_OK;
5032 }
5033
5034 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5035                                int new_mtu)
5036 {
5037         dev->mtu = new_mtu;
5038
5039         if (new_mtu > ETH_DATA_LEN) {
5040                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5041                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5042                         ethtool_op_set_tso(dev, 0);
5043                 }
5044                 else
5045                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5046         } else {
5047                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5048                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5049                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5050         }
5051 }
5052
5053 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5054 {
5055         struct tg3 *tp = netdev_priv(dev);
5056         int err;
5057
5058         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5059                 return -EINVAL;
5060
5061         if (!netif_running(dev)) {
5062                 /* We'll just catch it later when the
5063                  * device is up'd.
5064                  */
5065                 tg3_set_mtu(dev, tp, new_mtu);
5066                 return 0;
5067         }
5068
5069         tg3_phy_stop(tp);
5070
5071         tg3_netif_stop(tp);
5072
5073         tg3_full_lock(tp, 1);
5074
5075         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5076
5077         tg3_set_mtu(dev, tp, new_mtu);
5078
5079         err = tg3_restart_hw(tp, 0);
5080
5081         if (!err)
5082                 tg3_netif_start(tp);
5083
5084         tg3_full_unlock(tp);
5085
5086         if (!err)
5087                 tg3_phy_start(tp);
5088
5089         return err;
5090 }
5091
5092 /* Free up pending packets in all rx/tx rings.
5093  *
5094  * The chip has been shut down and the driver detached from
5095  * the networking, so no interrupts or new tx packets will
5096  * end up in the driver.  tp->{tx,}lock is not held and we are not
5097  * in an interrupt context and thus may sleep.
5098  */
5099 static void tg3_free_rings(struct tg3 *tp)
5100 {
5101         struct ring_info *rxp;
5102         int i;
5103
5104         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5105                 rxp = &tp->rx_std_buffers[i];
5106
5107                 if (rxp->skb == NULL)
5108                         continue;
5109                 pci_unmap_single(tp->pdev,
5110                                  pci_unmap_addr(rxp, mapping),
5111                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5112                                  PCI_DMA_FROMDEVICE);
5113                 dev_kfree_skb_any(rxp->skb);
5114                 rxp->skb = NULL;
5115         }
5116
5117         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5118                 rxp = &tp->rx_jumbo_buffers[i];
5119
5120                 if (rxp->skb == NULL)
5121                         continue;
5122                 pci_unmap_single(tp->pdev,
5123                                  pci_unmap_addr(rxp, mapping),
5124                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5125                                  PCI_DMA_FROMDEVICE);
5126                 dev_kfree_skb_any(rxp->skb);
5127                 rxp->skb = NULL;
5128         }
5129
5130         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5131                 struct tx_ring_info *txp;
5132                 struct sk_buff *skb;
5133
5134                 txp = &tp->tx_buffers[i];
5135                 skb = txp->skb;
5136
5137                 if (skb == NULL) {
5138                         i++;
5139                         continue;
5140                 }
5141
5142                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5143
5144                 txp->skb = NULL;
5145
5146                 i += skb_shinfo(skb)->nr_frags + 1;
5147
5148                 dev_kfree_skb_any(skb);
5149         }
5150 }
5151
5152 /* Initialize tx/rx rings for packet processing.
5153  *
5154  * The chip has been shut down and the driver detached from
5155  * the networking, so no interrupts or new tx packets will
5156  * end up in the driver.  tp->{tx,}lock are held and thus
5157  * we may not sleep.
5158  */
5159 static int tg3_init_rings(struct tg3 *tp)
5160 {
5161         u32 i;
5162
5163         /* Free up all the SKBs. */
5164         tg3_free_rings(tp);
5165
5166         /* Zero out all descriptors. */
5167         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5168         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5169         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5170         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5171
5172         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5173         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5174             (tp->dev->mtu > ETH_DATA_LEN))
5175                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5176
5177         /* Initialize invariants of the rings, we only set this
5178          * stuff once.  This works because the card does not
5179          * write into the rx buffer posting rings.
5180          */
5181         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5182                 struct tg3_rx_buffer_desc *rxd;
5183
5184                 rxd = &tp->rx_std[i];
5185                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5186                         << RXD_LEN_SHIFT;
5187                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5188                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5189                                (i << RXD_OPAQUE_INDEX_SHIFT));
5190         }
5191
5192         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5193                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5194                         struct tg3_rx_buffer_desc *rxd;
5195
5196                         rxd = &tp->rx_jumbo[i];
5197                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5198                                 << RXD_LEN_SHIFT;
5199                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5200                                 RXD_FLAG_JUMBO;
5201                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5202                                (i << RXD_OPAQUE_INDEX_SHIFT));
5203                 }
5204         }
5205
5206         /* Now allocate fresh SKBs for each rx ring. */
5207         for (i = 0; i < tp->rx_pending; i++) {
5208                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5209                         printk(KERN_WARNING PFX
5210                                "%s: Using a smaller RX standard ring, "
5211                                "only %d out of %d buffers were allocated "
5212                                "successfully.\n",
5213                                tp->dev->name, i, tp->rx_pending);
5214                         if (i == 0)
5215                                 return -ENOMEM;
5216                         tp->rx_pending = i;
5217                         break;
5218                 }
5219         }
5220
5221         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5222                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5223                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5224                                              -1, i) < 0) {
5225                                 printk(KERN_WARNING PFX
5226                                        "%s: Using a smaller RX jumbo ring, "
5227                                        "only %d out of %d buffers were "
5228                                        "allocated successfully.\n",
5229                                        tp->dev->name, i, tp->rx_jumbo_pending);
5230                                 if (i == 0) {
5231                                         tg3_free_rings(tp);
5232                                         return -ENOMEM;
5233                                 }
5234                                 tp->rx_jumbo_pending = i;
5235                                 break;
5236                         }
5237                 }
5238         }
5239         return 0;
5240 }
5241
5242 /*
5243  * Must not be invoked with interrupt sources disabled and
5244  * the hardware shutdown down.
5245  */
5246 static void tg3_free_consistent(struct tg3 *tp)
5247 {
5248         kfree(tp->rx_std_buffers);
5249         tp->rx_std_buffers = NULL;
5250         if (tp->rx_std) {
5251                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5252                                     tp->rx_std, tp->rx_std_mapping);
5253                 tp->rx_std = NULL;
5254         }
5255         if (tp->rx_jumbo) {
5256                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5257                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5258                 tp->rx_jumbo = NULL;
5259         }
5260         if (tp->rx_rcb) {
5261                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5262                                     tp->rx_rcb, tp->rx_rcb_mapping);
5263                 tp->rx_rcb = NULL;
5264         }
5265         if (tp->tx_ring) {
5266                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5267                         tp->tx_ring, tp->tx_desc_mapping);
5268                 tp->tx_ring = NULL;
5269         }
5270         if (tp->hw_status) {
5271                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5272                                     tp->hw_status, tp->status_mapping);
5273                 tp->hw_status = NULL;
5274         }
5275         if (tp->hw_stats) {
5276                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5277                                     tp->hw_stats, tp->stats_mapping);
5278                 tp->hw_stats = NULL;
5279         }
5280 }
5281
5282 /*
5283  * Must not be invoked with interrupt sources disabled and
5284  * the hardware shutdown down.  Can sleep.
5285  */
5286 static int tg3_alloc_consistent(struct tg3 *tp)
5287 {
5288         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5289                                       (TG3_RX_RING_SIZE +
5290                                        TG3_RX_JUMBO_RING_SIZE)) +
5291                                      (sizeof(struct tx_ring_info) *
5292                                       TG3_TX_RING_SIZE),
5293                                      GFP_KERNEL);
5294         if (!tp->rx_std_buffers)
5295                 return -ENOMEM;
5296
5297         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5298         tp->tx_buffers = (struct tx_ring_info *)
5299                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5300
5301         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5302                                           &tp->rx_std_mapping);
5303         if (!tp->rx_std)
5304                 goto err_out;
5305
5306         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5307                                             &tp->rx_jumbo_mapping);
5308
5309         if (!tp->rx_jumbo)
5310                 goto err_out;
5311
5312         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5313                                           &tp->rx_rcb_mapping);
5314         if (!tp->rx_rcb)
5315                 goto err_out;
5316
5317         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5318                                            &tp->tx_desc_mapping);
5319         if (!tp->tx_ring)
5320                 goto err_out;
5321
5322         tp->hw_status = pci_alloc_consistent(tp->pdev,
5323                                              TG3_HW_STATUS_SIZE,
5324                                              &tp->status_mapping);
5325         if (!tp->hw_status)
5326                 goto err_out;
5327
5328         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5329                                             sizeof(struct tg3_hw_stats),
5330                                             &tp->stats_mapping);
5331         if (!tp->hw_stats)
5332                 goto err_out;
5333
5334         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5335         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5336
5337         return 0;
5338
5339 err_out:
5340         tg3_free_consistent(tp);
5341         return -ENOMEM;
5342 }
5343
5344 #define MAX_WAIT_CNT 1000
5345
5346 /* To stop a block, clear the enable bit and poll till it
5347  * clears.  tp->lock is held.
5348  */
5349 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5350 {
5351         unsigned int i;
5352         u32 val;
5353
5354         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5355                 switch (ofs) {
5356                 case RCVLSC_MODE:
5357                 case DMAC_MODE:
5358                 case MBFREE_MODE:
5359                 case BUFMGR_MODE:
5360                 case MEMARB_MODE:
5361                         /* We can't enable/disable these bits of the
5362                          * 5705/5750, just say success.
5363                          */
5364                         return 0;
5365
5366                 default:
5367                         break;
5368                 }
5369         }
5370
5371         val = tr32(ofs);
5372         val &= ~enable_bit;
5373         tw32_f(ofs, val);
5374
5375         for (i = 0; i < MAX_WAIT_CNT; i++) {
5376                 udelay(100);
5377                 val = tr32(ofs);
5378                 if ((val & enable_bit) == 0)
5379                         break;
5380         }
5381
5382         if (i == MAX_WAIT_CNT && !silent) {
5383                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5384                        "ofs=%lx enable_bit=%x\n",
5385                        ofs, enable_bit);
5386                 return -ENODEV;
5387         }
5388
5389         return 0;
5390 }
5391
5392 /* tp->lock is held. */
5393 static int tg3_abort_hw(struct tg3 *tp, int silent)
5394 {
5395         int i, err;
5396
5397         tg3_disable_ints(tp);
5398
5399         tp->rx_mode &= ~RX_MODE_ENABLE;
5400         tw32_f(MAC_RX_MODE, tp->rx_mode);
5401         udelay(10);
5402
5403         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5404         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5405         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5406         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5407         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5408         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5409
5410         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5411         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5412         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5413         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5414         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5415         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5416         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5417
5418         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5419         tw32_f(MAC_MODE, tp->mac_mode);
5420         udelay(40);
5421
5422         tp->tx_mode &= ~TX_MODE_ENABLE;
5423         tw32_f(MAC_TX_MODE, tp->tx_mode);
5424
5425         for (i = 0; i < MAX_WAIT_CNT; i++) {
5426                 udelay(100);
5427                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5428                         break;
5429         }
5430         if (i >= MAX_WAIT_CNT) {
5431                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5432                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5433                        tp->dev->name, tr32(MAC_TX_MODE));
5434                 err |= -ENODEV;
5435         }
5436
5437         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5438         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5439         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5440
5441         tw32(FTQ_RESET, 0xffffffff);
5442         tw32(FTQ_RESET, 0x00000000);
5443
5444         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5445         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5446
5447         if (tp->hw_status)
5448                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5449         if (tp->hw_stats)
5450                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5451
5452         return err;
5453 }
5454
5455 /* tp->lock is held. */
5456 static int tg3_nvram_lock(struct tg3 *tp)
5457 {
5458         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5459                 int i;
5460
5461                 if (tp->nvram_lock_cnt == 0) {
5462                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5463                         for (i = 0; i < 8000; i++) {
5464                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5465                                         break;
5466                                 udelay(20);
5467                         }
5468                         if (i == 8000) {
5469                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5470                                 return -ENODEV;
5471                         }
5472                 }
5473                 tp->nvram_lock_cnt++;
5474         }
5475         return 0;
5476 }
5477
5478 /* tp->lock is held. */
5479 static void tg3_nvram_unlock(struct tg3 *tp)
5480 {
5481         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5482                 if (tp->nvram_lock_cnt > 0)
5483                         tp->nvram_lock_cnt--;
5484                 if (tp->nvram_lock_cnt == 0)
5485                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5486         }
5487 }
5488
5489 /* tp->lock is held. */
5490 static void tg3_enable_nvram_access(struct tg3 *tp)
5491 {
5492         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5493             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5494                 u32 nvaccess = tr32(NVRAM_ACCESS);
5495
5496                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5497         }
5498 }
5499
5500 /* tp->lock is held. */
5501 static void tg3_disable_nvram_access(struct tg3 *tp)
5502 {
5503         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5504             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5505                 u32 nvaccess = tr32(NVRAM_ACCESS);
5506
5507                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5508         }
5509 }
5510
5511 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5512 {
5513         int i;
5514         u32 apedata;
5515
5516         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5517         if (apedata != APE_SEG_SIG_MAGIC)
5518                 return;
5519
5520         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5521         if (!(apedata & APE_FW_STATUS_READY))
5522                 return;
5523
5524         /* Wait for up to 1 millisecond for APE to service previous event. */
5525         for (i = 0; i < 10; i++) {
5526                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5527                         return;
5528
5529                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5530
5531                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5532                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5533                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5534
5535                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5536
5537                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5538                         break;
5539
5540                 udelay(100);
5541         }
5542
5543         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5544                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5545 }
5546
5547 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5548 {
5549         u32 event;
5550         u32 apedata;
5551
5552         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5553                 return;
5554
5555         switch (kind) {
5556                 case RESET_KIND_INIT:
5557                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5558                                         APE_HOST_SEG_SIG_MAGIC);
5559                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5560                                         APE_HOST_SEG_LEN_MAGIC);
5561                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5562                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5563                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5564                                         APE_HOST_DRIVER_ID_MAGIC);
5565                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5566                                         APE_HOST_BEHAV_NO_PHYLOCK);
5567
5568                         event = APE_EVENT_STATUS_STATE_START;
5569                         break;
5570                 case RESET_KIND_SHUTDOWN:
5571                         /* With the interface we are currently using,
5572                          * APE does not track driver state.  Wiping
5573                          * out the HOST SEGMENT SIGNATURE forces
5574                          * the APE to assume OS absent status.
5575                          */
5576                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5577
5578                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5579                         break;
5580                 case RESET_KIND_SUSPEND:
5581                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5582                         break;
5583                 default:
5584                         return;
5585         }
5586
5587         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5588
5589         tg3_ape_send_event(tp, event);
5590 }
5591
5592 /* tp->lock is held. */
5593 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5594 {
5595         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5596                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5597
5598         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5599                 switch (kind) {
5600                 case RESET_KIND_INIT:
5601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5602                                       DRV_STATE_START);
5603                         break;
5604
5605                 case RESET_KIND_SHUTDOWN:
5606                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5607                                       DRV_STATE_UNLOAD);
5608                         break;
5609
5610                 case RESET_KIND_SUSPEND:
5611                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5612                                       DRV_STATE_SUSPEND);
5613                         break;
5614
5615                 default:
5616                         break;
5617                 }
5618         }
5619
5620         if (kind == RESET_KIND_INIT ||
5621             kind == RESET_KIND_SUSPEND)
5622                 tg3_ape_driver_state_change(tp, kind);
5623 }
5624
5625 /* tp->lock is held. */
5626 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5627 {
5628         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5629                 switch (kind) {
5630                 case RESET_KIND_INIT:
5631                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5632                                       DRV_STATE_START_DONE);
5633                         break;
5634
5635                 case RESET_KIND_SHUTDOWN:
5636                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5637                                       DRV_STATE_UNLOAD_DONE);
5638                         break;
5639
5640                 default:
5641                         break;
5642                 }
5643         }
5644
5645         if (kind == RESET_KIND_SHUTDOWN)
5646                 tg3_ape_driver_state_change(tp, kind);
5647 }
5648
5649 /* tp->lock is held. */
5650 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5651 {
5652         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5653                 switch (kind) {
5654                 case RESET_KIND_INIT:
5655                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5656                                       DRV_STATE_START);
5657                         break;
5658
5659                 case RESET_KIND_SHUTDOWN:
5660                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5661                                       DRV_STATE_UNLOAD);
5662                         break;
5663
5664                 case RESET_KIND_SUSPEND:
5665                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5666                                       DRV_STATE_SUSPEND);
5667                         break;
5668
5669                 default:
5670                         break;
5671                 }
5672         }
5673 }
5674
5675 static int tg3_poll_fw(struct tg3 *tp)
5676 {
5677         int i;
5678         u32 val;
5679
5680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5681                 /* Wait up to 20ms for init done. */
5682                 for (i = 0; i < 200; i++) {
5683                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5684                                 return 0;
5685                         udelay(100);
5686                 }
5687                 return -ENODEV;
5688         }
5689
5690         /* Wait for firmware initialization to complete. */
5691         for (i = 0; i < 100000; i++) {
5692                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5693                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5694                         break;
5695                 udelay(10);
5696         }
5697
5698         /* Chip might not be fitted with firmware.  Some Sun onboard
5699          * parts are configured like that.  So don't signal the timeout
5700          * of the above loop as an error, but do report the lack of
5701          * running firmware once.
5702          */
5703         if (i >= 100000 &&
5704             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5705                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5706
5707                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5708                        tp->dev->name);
5709         }
5710
5711         return 0;
5712 }
5713
5714 /* Save PCI command register before chip reset */
5715 static void tg3_save_pci_state(struct tg3 *tp)
5716 {
5717         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5718 }
5719
5720 /* Restore PCI state after chip reset */
5721 static void tg3_restore_pci_state(struct tg3 *tp)
5722 {
5723         u32 val;
5724
5725         /* Re-enable indirect register accesses. */
5726         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5727                                tp->misc_host_ctrl);
5728
5729         /* Set MAX PCI retry to zero. */
5730         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5731         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5732             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5733                 val |= PCISTATE_RETRY_SAME_DMA;
5734         /* Allow reads and writes to the APE register and memory space. */
5735         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5736                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5737                        PCISTATE_ALLOW_APE_SHMEM_WR;
5738         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5739
5740         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5741
5742         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5743                 pcie_set_readrq(tp->pdev, 4096);
5744         else {
5745                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5746                                       tp->pci_cacheline_sz);
5747                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5748                                       tp->pci_lat_timer);
5749         }
5750
5751         /* Make sure PCI-X relaxed ordering bit is clear. */
5752         if (tp->pcix_cap) {
5753                 u16 pcix_cmd;
5754
5755                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5756                                      &pcix_cmd);
5757                 pcix_cmd &= ~PCI_X_CMD_ERO;
5758                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5759                                       pcix_cmd);
5760         }
5761
5762         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5763
5764                 /* Chip reset on 5780 will reset MSI enable bit,
5765                  * so need to restore it.
5766                  */
5767                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5768                         u16 ctrl;
5769
5770                         pci_read_config_word(tp->pdev,
5771                                              tp->msi_cap + PCI_MSI_FLAGS,
5772                                              &ctrl);
5773                         pci_write_config_word(tp->pdev,
5774                                               tp->msi_cap + PCI_MSI_FLAGS,
5775                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5776                         val = tr32(MSGINT_MODE);
5777                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5778                 }
5779         }
5780 }
5781
5782 static void tg3_stop_fw(struct tg3 *);
5783
5784 /* tp->lock is held. */
5785 static int tg3_chip_reset(struct tg3 *tp)
5786 {
5787         u32 val;
5788         void (*write_op)(struct tg3 *, u32, u32);
5789         int err;
5790
5791         tg3_nvram_lock(tp);
5792
5793         tg3_mdio_stop(tp);
5794
5795         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5796
5797         /* No matching tg3_nvram_unlock() after this because
5798          * chip reset below will undo the nvram lock.
5799          */
5800         tp->nvram_lock_cnt = 0;
5801
5802         /* GRC_MISC_CFG core clock reset will clear the memory
5803          * enable bit in PCI register 4 and the MSI enable bit
5804          * on some chips, so we save relevant registers here.
5805          */
5806         tg3_save_pci_state(tp);
5807
5808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5809             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5814                 tw32(GRC_FASTBOOT_PC, 0);
5815
5816         /*
5817          * We must avoid the readl() that normally takes place.
5818          * It locks machines, causes machine checks, and other
5819          * fun things.  So, temporarily disable the 5701
5820          * hardware workaround, while we do the reset.
5821          */
5822         write_op = tp->write32;
5823         if (write_op == tg3_write_flush_reg32)
5824                 tp->write32 = tg3_write32;
5825
5826         /* Prevent the irq handler from reading or writing PCI registers
5827          * during chip reset when the memory enable bit in the PCI command
5828          * register may be cleared.  The chip does not generate interrupt
5829          * at this time, but the irq handler may still be called due to irq
5830          * sharing or irqpoll.
5831          */
5832         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5833         if (tp->hw_status) {
5834                 tp->hw_status->status = 0;
5835                 tp->hw_status->status_tag = 0;
5836         }
5837         tp->last_tag = 0;
5838         smp_mb();
5839         synchronize_irq(tp->pdev->irq);
5840
5841         /* do the reset */
5842         val = GRC_MISC_CFG_CORECLK_RESET;
5843
5844         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5845                 if (tr32(0x7e2c) == 0x60) {
5846                         tw32(0x7e2c, 0x20);
5847                 }
5848                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5849                         tw32(GRC_MISC_CFG, (1 << 29));
5850                         val |= (1 << 29);
5851                 }
5852         }
5853
5854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5855                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5856                 tw32(GRC_VCPU_EXT_CTRL,
5857                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5858         }
5859
5860         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5861                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5862         tw32(GRC_MISC_CFG, val);
5863
5864         /* restore 5701 hardware bug workaround write method */
5865         tp->write32 = write_op;
5866
5867         /* Unfortunately, we have to delay before the PCI read back.
5868          * Some 575X chips even will not respond to a PCI cfg access
5869          * when the reset command is given to the chip.
5870          *
5871          * How do these hardware designers expect things to work
5872          * properly if the PCI write is posted for a long period
5873          * of time?  It is always necessary to have some method by
5874          * which a register read back can occur to push the write
5875          * out which does the reset.
5876          *
5877          * For most tg3 variants the trick below was working.
5878          * Ho hum...
5879          */
5880         udelay(120);
5881
5882         /* Flush PCI posted writes.  The normal MMIO registers
5883          * are inaccessible at this time so this is the only
5884          * way to make this reliably (actually, this is no longer
5885          * the case, see above).  I tried to use indirect
5886          * register read/write but this upset some 5701 variants.
5887          */
5888         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5889
5890         udelay(120);
5891
5892         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5893                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5894                         int i;
5895                         u32 cfg_val;
5896
5897                         /* Wait for link training to complete.  */
5898                         for (i = 0; i < 5000; i++)
5899                                 udelay(100);
5900
5901                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5902                         pci_write_config_dword(tp->pdev, 0xc4,
5903                                                cfg_val | (1 << 15));
5904                 }
5905                 /* Set PCIE max payload size and clear error status.  */
5906                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5907         }
5908
5909         tg3_restore_pci_state(tp);
5910
5911         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5912
5913         val = 0;
5914         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5915                 val = tr32(MEMARB_MODE);
5916         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5917
5918         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5919                 tg3_stop_fw(tp);
5920                 tw32(0x5000, 0x400);
5921         }
5922
5923         tw32(GRC_MODE, tp->grc_mode);
5924
5925         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5926                 val = tr32(0xc4);
5927
5928                 tw32(0xc4, val | (1 << 15));
5929         }
5930
5931         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5933                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5934                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5935                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5936                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5937         }
5938
5939         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5940                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5941                 tw32_f(MAC_MODE, tp->mac_mode);
5942         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5943                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5944                 tw32_f(MAC_MODE, tp->mac_mode);
5945         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5946                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5947                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5948                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5949                 tw32_f(MAC_MODE, tp->mac_mode);
5950         } else
5951                 tw32_f(MAC_MODE, 0);
5952         udelay(40);
5953
5954         tg3_mdio_start(tp);
5955
5956         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5957
5958         err = tg3_poll_fw(tp);
5959         if (err)
5960                 return err;
5961
5962         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5963             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5964                 val = tr32(0x7c00);
5965
5966                 tw32(0x7c00, val | (1 << 25));
5967         }
5968
5969         /* Reprobe ASF enable state.  */
5970         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5971         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5972         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5973         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5974                 u32 nic_cfg;
5975
5976                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5977                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5978                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5979                         tp->last_event_jiffies = jiffies;
5980                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5981                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5982                 }
5983         }
5984
5985         return 0;
5986 }
5987
5988 /* tp->lock is held. */
5989 static void tg3_stop_fw(struct tg3 *tp)
5990 {
5991         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5992            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5993                 /* Wait for RX cpu to ACK the previous event. */
5994                 tg3_wait_for_event_ack(tp);
5995
5996                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5997
5998                 tg3_generate_fw_event(tp);
5999
6000                 /* Wait for RX cpu to ACK this event. */
6001                 tg3_wait_for_event_ack(tp);
6002         }
6003 }
6004
6005 /* tp->lock is held. */
6006 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6007 {
6008         int err;
6009
6010         tg3_stop_fw(tp);
6011
6012         tg3_write_sig_pre_reset(tp, kind);
6013
6014         tg3_abort_hw(tp, silent);
6015         err = tg3_chip_reset(tp);
6016
6017         tg3_write_sig_legacy(tp, kind);
6018         tg3_write_sig_post_reset(tp, kind);
6019
6020         if (err)
6021                 return err;
6022
6023         return 0;
6024 }
6025
6026 #define TG3_FW_RELEASE_MAJOR    0x0
6027 #define TG3_FW_RELASE_MINOR     0x0
6028 #define TG3_FW_RELEASE_FIX      0x0
6029 #define TG3_FW_START_ADDR       0x08000000
6030 #define TG3_FW_TEXT_ADDR        0x08000000
6031 #define TG3_FW_TEXT_LEN         0x9c0
6032 #define TG3_FW_RODATA_ADDR      0x080009c0
6033 #define TG3_FW_RODATA_LEN       0x60
6034 #define TG3_FW_DATA_ADDR        0x08000a40
6035 #define TG3_FW_DATA_LEN         0x20
6036 #define TG3_FW_SBSS_ADDR        0x08000a60
6037 #define TG3_FW_SBSS_LEN         0xc
6038 #define TG3_FW_BSS_ADDR         0x08000a70
6039 #define TG3_FW_BSS_LEN          0x10
6040
6041 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6042         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6043         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6044         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6045         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6046         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6047         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6048         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6049         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6050         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6051         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6052         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6053         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6054         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6055         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6056         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6057         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6058         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6059         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6060         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6061         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6062         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6063         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6064         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6065         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6066         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6067         0, 0, 0, 0, 0, 0,
6068         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6072         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6073         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6074         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6075         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6077         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6078         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6079         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6080         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6081         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6082         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6083         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6084         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6085         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6086         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6087         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6088         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6089         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6090         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6091         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6092         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6093         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6094         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6095         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6096         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6097         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6098         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6099         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6100         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6101         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6102         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6103         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6104         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6105         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6106         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6107         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6108         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6109         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6110         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6111         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6112         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6113         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6114         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6115         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6116         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6117         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6118         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6119         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6120         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6121         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6122         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6123         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6124         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6125         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6126         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6127         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6128         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6129         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6130         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6131         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6132         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6133 };
6134
6135 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6136         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6137         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6139         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6140         0x00000000
6141 };
6142
6143 #if 0 /* All zeros, don't eat up space with it. */
6144 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6145         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6146         0x00000000, 0x00000000, 0x00000000, 0x00000000
6147 };
6148 #endif
6149
6150 #define RX_CPU_SCRATCH_BASE     0x30000
6151 #define RX_CPU_SCRATCH_SIZE     0x04000
6152 #define TX_CPU_SCRATCH_BASE     0x34000
6153 #define TX_CPU_SCRATCH_SIZE     0x04000
6154
6155 /* tp->lock is held. */
6156 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6157 {
6158         int i;
6159
6160         BUG_ON(offset == TX_CPU_BASE &&
6161             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6162
6163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6164                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6165
6166                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6167                 return 0;
6168         }
6169         if (offset == RX_CPU_BASE) {
6170                 for (i = 0; i < 10000; i++) {
6171                         tw32(offset + CPU_STATE, 0xffffffff);
6172                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6173                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6174                                 break;
6175                 }
6176
6177                 tw32(offset + CPU_STATE, 0xffffffff);
6178                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6179                 udelay(10);
6180         } else {
6181                 for (i = 0; i < 10000; i++) {
6182                         tw32(offset + CPU_STATE, 0xffffffff);
6183                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6184                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6185                                 break;
6186                 }
6187         }
6188
6189         if (i >= 10000) {
6190                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6191                        "and %s CPU\n",
6192                        tp->dev->name,
6193                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6194                 return -ENODEV;
6195         }
6196
6197         /* Clear firmware's nvram arbitration. */
6198         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6199                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6200         return 0;
6201 }
6202
6203 struct fw_info {
6204         unsigned int text_base;
6205         unsigned int text_len;
6206         const u32 *text_data;
6207         unsigned int rodata_base;
6208         unsigned int rodata_len;
6209         const u32 *rodata_data;
6210         unsigned int data_base;
6211         unsigned int data_len;
6212         const u32 *data_data;
6213 };
6214
6215 /* tp->lock is held. */
6216 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6217                                  int cpu_scratch_size, struct fw_info *info)
6218 {
6219         int err, lock_err, i;
6220         void (*write_op)(struct tg3 *, u32, u32);
6221
6222         if (cpu_base == TX_CPU_BASE &&
6223             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6224                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6225                        "TX cpu firmware on %s which is 5705.\n",
6226                        tp->dev->name);
6227                 return -EINVAL;
6228         }
6229
6230         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6231                 write_op = tg3_write_mem;
6232         else
6233                 write_op = tg3_write_indirect_reg32;
6234
6235         /* It is possible that bootcode is still loading at this point.
6236          * Get the nvram lock first before halting the cpu.
6237          */
6238         lock_err = tg3_nvram_lock(tp);
6239         err = tg3_halt_cpu(tp, cpu_base);
6240         if (!lock_err)
6241                 tg3_nvram_unlock(tp);
6242         if (err)
6243                 goto out;
6244
6245         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6246                 write_op(tp, cpu_scratch_base + i, 0);
6247         tw32(cpu_base + CPU_STATE, 0xffffffff);
6248         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6249         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6250                 write_op(tp, (cpu_scratch_base +
6251                               (info->text_base & 0xffff) +
6252                               (i * sizeof(u32))),
6253                          (info->text_data ?
6254                           info->text_data[i] : 0));
6255         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6256                 write_op(tp, (cpu_scratch_base +
6257                               (info->rodata_base & 0xffff) +
6258                               (i * sizeof(u32))),
6259                          (info->rodata_data ?
6260                           info->rodata_data[i] : 0));
6261         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6262                 write_op(tp, (cpu_scratch_base +
6263                               (info->data_base & 0xffff) +
6264                               (i * sizeof(u32))),
6265                          (info->data_data ?
6266                           info->data_data[i] : 0));
6267
6268         err = 0;
6269
6270 out:
6271         return err;
6272 }
6273
6274 /* tp->lock is held. */
6275 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6276 {
6277         struct fw_info info;
6278         int err, i;
6279
6280         info.text_base = TG3_FW_TEXT_ADDR;
6281         info.text_len = TG3_FW_TEXT_LEN;
6282         info.text_data = &tg3FwText[0];
6283         info.rodata_base = TG3_FW_RODATA_ADDR;
6284         info.rodata_len = TG3_FW_RODATA_LEN;
6285         info.rodata_data = &tg3FwRodata[0];
6286         info.data_base = TG3_FW_DATA_ADDR;
6287         info.data_len = TG3_FW_DATA_LEN;
6288         info.data_data = NULL;
6289
6290         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6291                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6292                                     &info);
6293         if (err)
6294                 return err;
6295
6296         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6297                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6298                                     &info);
6299         if (err)
6300                 return err;
6301
6302         /* Now startup only the RX cpu. */
6303         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6304         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6305
6306         for (i = 0; i < 5; i++) {
6307                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6308                         break;
6309                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6310                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6311                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6312                 udelay(1000);
6313         }
6314         if (i >= 5) {
6315                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6316                        "to set RX CPU PC, is %08x should be %08x\n",
6317                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6318                        TG3_FW_TEXT_ADDR);
6319                 return -ENODEV;
6320         }
6321         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6322         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6323
6324         return 0;
6325 }
6326
6327
6328 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6329 #define TG3_TSO_FW_RELASE_MINOR         0x6
6330 #define TG3_TSO_FW_RELEASE_FIX          0x0
6331 #define TG3_TSO_FW_START_ADDR           0x08000000
6332 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6333 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6334 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6335 #define TG3_TSO_FW_RODATA_LEN           0x60
6336 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6337 #define TG3_TSO_FW_DATA_LEN             0x30
6338 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6339 #define TG3_TSO_FW_SBSS_LEN             0x2c
6340 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6341 #define TG3_TSO_FW_BSS_LEN              0x894
6342
6343 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6344         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6345         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6346         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6347         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6348         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6349         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6350         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6351         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6352         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6353         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6354         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6355         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6356         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6357         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6358         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6359         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6360         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6361         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6362         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6363         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6364         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6365         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6366         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6367         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6368         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6369         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6370         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6371         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6372         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6373         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6374         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6375         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6376         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6377         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6378         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6379         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6380         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6381         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6382         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6383         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6384         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6385         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6386         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6387         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6388         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6389         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6390         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6391         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6392         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6393         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6394         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6395         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6396         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6397         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6398         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6399         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6400         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6401         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6402         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6403         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6404         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6405         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6406         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6407         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6408         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6409         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6410         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6411         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6412         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6413         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6414         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6415         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6416         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6417         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6418         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6419         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6420         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6421         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6422         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6423         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6424         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6425         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6426         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6427         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6428         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6429         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6430         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6431         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6432         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6433         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6434         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6435         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6436         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6437         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6438         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6439         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6440         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6441         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6442         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6443         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6444         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6445         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6446         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6447         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6448         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6449         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6450         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6451         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6452         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6453         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6454         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6455         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6456         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6457         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6458         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6459         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6460         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6461         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6462         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6463         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6464         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6465         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6466         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6467         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6468         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6469         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6470         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6471         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6472         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6473         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6474         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6475         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6476         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6477         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6478         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6479         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6480         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6481         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6482         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6483         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6484         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6485         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6486         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6487         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6488         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6489         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6490         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6491         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6492         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6493         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6494         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6495         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6496         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6497         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6498         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6499         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6500         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6501         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6502         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6503         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6504         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6505         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6506         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6507         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6508         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6509         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6510         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6511         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6512         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6513         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6514         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6515         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6516         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6517         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6518         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6519         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6520         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6521         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6522         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6523         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6524         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6525         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6526         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6527         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6528         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6529         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6530         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6531         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6532         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6533         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6534         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6535         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6536         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6537         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6538         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6539         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6540         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6541         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6542         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6543         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6544         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6545         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6546         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6547         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6548         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6549         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6550         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6551         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6552         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6553         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6554         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6555         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6556         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6557         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6558         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6559         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6560         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6561         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6562         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6563         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6564         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6565         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6566         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6567         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6568         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6569         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6570         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6571         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6572         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6573         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6574         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6575         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6576         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6577         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6578         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6579         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6580         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6581         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6582         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6583         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6584         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6585         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6586         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6587         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6588         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6589         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6590         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6591         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6592         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6593         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6594         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6595         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6596         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6597         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6598         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6599         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6600         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6601         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6602         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6603         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6604         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6605         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6606         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6607         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6608         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6609         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6610         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6611         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6612         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6613         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6614         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6615         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6616         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6617         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6618         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6619         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6620         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6621         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6622         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6623         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6624         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6625         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6626         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6627         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6628 };
6629
6630 static const u32 tg3TsoFwRodata[] = {
6631         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6632         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6633         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6634         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6635         0x00000000,
6636 };
6637
6638 static const u32 tg3TsoFwData[] = {
6639         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6640         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6641         0x00000000,
6642 };
6643
6644 /* 5705 needs a special version of the TSO firmware.  */
6645 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6646 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6647 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6648 #define TG3_TSO5_FW_START_ADDR          0x00010000
6649 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6650 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6651 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6652 #define TG3_TSO5_FW_RODATA_LEN          0x50
6653 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6654 #define TG3_TSO5_FW_DATA_LEN            0x20
6655 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6656 #define TG3_TSO5_FW_SBSS_LEN            0x28
6657 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6658 #define TG3_TSO5_FW_BSS_LEN             0x88
6659
6660 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6661         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6662         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6663         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6664         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6665         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6666         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6667         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6668         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6669         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6670         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6671         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6672         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6673         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6674         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6675         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6676         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6677         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6678         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6679         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6680         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6681         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6682         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6683         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6684         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6685         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6686         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6687         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6688         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6689         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6690         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6691         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6692         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6693         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6694         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6695         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6696         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6697         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6698         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6699         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6700         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6701         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6702         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6703         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6704         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6705         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6706         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6707         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6708         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6709         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6710         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6711         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6712         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6713         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6714         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6715         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6716         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6717         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6718         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6719         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6720         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6721         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6722         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6723         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6724         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6725         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6726         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6727         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6728         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6729         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6730         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6731         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6732         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6733         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6734         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6735         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6736         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6737         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6738         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6739         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6740         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6741         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6742         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6743         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6744         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6745         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6746         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6747         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6748         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6749         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6750         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6751         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6752         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6753         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6754         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6755         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6756         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6757         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6758         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6759         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6760         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6761         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6762         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6763         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6764         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6765         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6766         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6767         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6768         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6769         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6770         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6771         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6772         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6773         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6774         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6775         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6776         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6777         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6778         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6779         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6780         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6781         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6782         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6783         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6784         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6785         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6786         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6787         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6788         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6789         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6790         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6791         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6792         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6793         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6794         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6795         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6796         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6797         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6798         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6799         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6800         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6801         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6802         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6803         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6804         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6805         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6806         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6807         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6808         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6809         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6810         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6811         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6812         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6813         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6814         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6815         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6816         0x00000000, 0x00000000, 0x00000000,
6817 };
6818
6819 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6820         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6821         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6822         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6823         0x00000000, 0x00000000, 0x00000000,
6824 };
6825
6826 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6827         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6828         0x00000000, 0x00000000, 0x00000000,
6829 };
6830
6831 /* tp->lock is held. */
6832 static int tg3_load_tso_firmware(struct tg3 *tp)
6833 {
6834         struct fw_info info;
6835         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6836         int err, i;
6837
6838         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6839                 return 0;
6840
6841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6842                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6843                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6844                 info.text_data = &tg3Tso5FwText[0];
6845                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6846                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6847                 info.rodata_data = &tg3Tso5FwRodata[0];
6848                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6849                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6850                 info.data_data = &tg3Tso5FwData[0];
6851                 cpu_base = RX_CPU_BASE;
6852                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6853                 cpu_scratch_size = (info.text_len +
6854                                     info.rodata_len +
6855                                     info.data_len +
6856                                     TG3_TSO5_FW_SBSS_LEN +
6857                                     TG3_TSO5_FW_BSS_LEN);
6858         } else {
6859                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6860                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6861                 info.text_data = &tg3TsoFwText[0];
6862                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6863                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6864                 info.rodata_data = &tg3TsoFwRodata[0];
6865                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6866                 info.data_len = TG3_TSO_FW_DATA_LEN;
6867                 info.data_data = &tg3TsoFwData[0];
6868                 cpu_base = TX_CPU_BASE;
6869                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6870                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6871         }
6872
6873         err = tg3_load_firmware_cpu(tp, cpu_base,
6874                                     cpu_scratch_base, cpu_scratch_size,
6875                                     &info);
6876         if (err)
6877                 return err;
6878
6879         /* Now startup the cpu. */
6880         tw32(cpu_base + CPU_STATE, 0xffffffff);
6881         tw32_f(cpu_base + CPU_PC,    info.text_base);
6882
6883         for (i = 0; i < 5; i++) {
6884                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6885                         break;
6886                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6887                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6888                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6889                 udelay(1000);
6890         }
6891         if (i >= 5) {
6892                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6893                        "to set CPU PC, is %08x should be %08x\n",
6894                        tp->dev->name, tr32(cpu_base + CPU_PC),
6895                        info.text_base);
6896                 return -ENODEV;
6897         }
6898         tw32(cpu_base + CPU_STATE, 0xffffffff);
6899         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6900         return 0;
6901 }
6902
6903
6904 /* tp->lock is held. */
6905 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6906 {
6907         u32 addr_high, addr_low;
6908         int i;
6909
6910         addr_high = ((tp->dev->dev_addr[0] << 8) |
6911                      tp->dev->dev_addr[1]);
6912         addr_low = ((tp->dev->dev_addr[2] << 24) |
6913                     (tp->dev->dev_addr[3] << 16) |
6914                     (tp->dev->dev_addr[4] <<  8) |
6915                     (tp->dev->dev_addr[5] <<  0));
6916         for (i = 0; i < 4; i++) {
6917                 if (i == 1 && skip_mac_1)
6918                         continue;
6919                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6920                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6921         }
6922
6923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6925                 for (i = 0; i < 12; i++) {
6926                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6927                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6928                 }
6929         }
6930
6931         addr_high = (tp->dev->dev_addr[0] +
6932                      tp->dev->dev_addr[1] +
6933                      tp->dev->dev_addr[2] +
6934                      tp->dev->dev_addr[3] +
6935                      tp->dev->dev_addr[4] +
6936                      tp->dev->dev_addr[5]) &
6937                 TX_BACKOFF_SEED_MASK;
6938         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6939 }
6940
6941 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6942 {
6943         struct tg3 *tp = netdev_priv(dev);
6944         struct sockaddr *addr = p;
6945         int err = 0, skip_mac_1 = 0;
6946
6947         if (!is_valid_ether_addr(addr->sa_data))
6948                 return -EINVAL;
6949
6950         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6951
6952         if (!netif_running(dev))
6953                 return 0;
6954
6955         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6956                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6957
6958                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6959                 addr0_low = tr32(MAC_ADDR_0_LOW);
6960                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6961                 addr1_low = tr32(MAC_ADDR_1_LOW);
6962
6963                 /* Skip MAC addr 1 if ASF is using it. */
6964                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6965                     !(addr1_high == 0 && addr1_low == 0))
6966                         skip_mac_1 = 1;
6967         }
6968         spin_lock_bh(&tp->lock);
6969         __tg3_set_mac_addr(tp, skip_mac_1);
6970         spin_unlock_bh(&tp->lock);
6971
6972         return err;
6973 }
6974
6975 /* tp->lock is held. */
6976 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6977                            dma_addr_t mapping, u32 maxlen_flags,
6978                            u32 nic_addr)
6979 {
6980         tg3_write_mem(tp,
6981                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6982                       ((u64) mapping >> 32));
6983         tg3_write_mem(tp,
6984                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6985                       ((u64) mapping & 0xffffffff));
6986         tg3_write_mem(tp,
6987                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6988                        maxlen_flags);
6989
6990         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6991                 tg3_write_mem(tp,
6992                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6993                               nic_addr);
6994 }
6995
6996 static void __tg3_set_rx_mode(struct net_device *);
6997 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6998 {
6999         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7000         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7001         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7002         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7003         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7004                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7005                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7006         }
7007         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7008         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7009         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7010                 u32 val = ec->stats_block_coalesce_usecs;
7011
7012                 if (!netif_carrier_ok(tp->dev))
7013                         val = 0;
7014
7015                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7016         }
7017 }
7018
7019 /* tp->lock is held. */
7020 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7021 {
7022         u32 val, rdmac_mode;
7023         int i, err, limit;
7024
7025         tg3_disable_ints(tp);
7026
7027         tg3_stop_fw(tp);
7028
7029         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7030
7031         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7032                 tg3_abort_hw(tp, 1);
7033         }
7034
7035         if (reset_phy &&
7036             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7037                 tg3_phy_reset(tp);
7038
7039         err = tg3_chip_reset(tp);
7040         if (err)
7041                 return err;
7042
7043         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7044
7045         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7046             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7047                 val = tr32(TG3_CPMU_CTRL);
7048                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7049                 tw32(TG3_CPMU_CTRL, val);
7050
7051                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7052                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7053                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7054                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7055
7056                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7057                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7058                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7059                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7060
7061                 val = tr32(TG3_CPMU_HST_ACC);
7062                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7063                 val |= CPMU_HST_ACC_MACCLK_6_25;
7064                 tw32(TG3_CPMU_HST_ACC, val);
7065         }
7066
7067         /* This works around an issue with Athlon chipsets on
7068          * B3 tigon3 silicon.  This bit has no effect on any
7069          * other revision.  But do not set this on PCI Express
7070          * chips and don't even touch the clocks if the CPMU is present.
7071          */
7072         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7073                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7074                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7075                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7076         }
7077
7078         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7079             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7080                 val = tr32(TG3PCI_PCISTATE);
7081                 val |= PCISTATE_RETRY_SAME_DMA;
7082                 tw32(TG3PCI_PCISTATE, val);
7083         }
7084
7085         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7086                 /* Allow reads and writes to the
7087                  * APE register and memory space.
7088                  */
7089                 val = tr32(TG3PCI_PCISTATE);
7090                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7091                        PCISTATE_ALLOW_APE_SHMEM_WR;
7092                 tw32(TG3PCI_PCISTATE, val);
7093         }
7094
7095         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7096                 /* Enable some hw fixes.  */
7097                 val = tr32(TG3PCI_MSI_DATA);
7098                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7099                 tw32(TG3PCI_MSI_DATA, val);
7100         }
7101
7102         /* Descriptor ring init may make accesses to the
7103          * NIC SRAM area to setup the TX descriptors, so we
7104          * can only do this after the hardware has been
7105          * successfully reset.
7106          */
7107         err = tg3_init_rings(tp);
7108         if (err)
7109                 return err;
7110
7111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7112             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7113             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7114                 /* This value is determined during the probe time DMA
7115                  * engine test, tg3_test_dma.
7116                  */
7117                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7118         }
7119
7120         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7121                           GRC_MODE_4X_NIC_SEND_RINGS |
7122                           GRC_MODE_NO_TX_PHDR_CSUM |
7123                           GRC_MODE_NO_RX_PHDR_CSUM);
7124         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7125
7126         /* Pseudo-header checksum is done by hardware logic and not
7127          * the offload processers, so make the chip do the pseudo-
7128          * header checksums on receive.  For transmit it is more
7129          * convenient to do the pseudo-header checksum in software
7130          * as Linux does that on transmit for us in all cases.
7131          */
7132         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7133
7134         tw32(GRC_MODE,
7135              tp->grc_mode |
7136              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7137
7138         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7139         val = tr32(GRC_MISC_CFG);
7140         val &= ~0xff;
7141         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7142         tw32(GRC_MISC_CFG, val);
7143
7144         /* Initialize MBUF/DESC pool. */
7145         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7146                 /* Do nothing.  */
7147         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7148                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7149                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7150                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7151                 else
7152                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7153                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7154                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7155         }
7156         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7157                 int fw_len;
7158
7159                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7160                           TG3_TSO5_FW_RODATA_LEN +
7161                           TG3_TSO5_FW_DATA_LEN +
7162                           TG3_TSO5_FW_SBSS_LEN +
7163                           TG3_TSO5_FW_BSS_LEN);
7164                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7165                 tw32(BUFMGR_MB_POOL_ADDR,
7166                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7167                 tw32(BUFMGR_MB_POOL_SIZE,
7168                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7169         }
7170
7171         if (tp->dev->mtu <= ETH_DATA_LEN) {
7172                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7173                      tp->bufmgr_config.mbuf_read_dma_low_water);
7174                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7175                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7176                 tw32(BUFMGR_MB_HIGH_WATER,
7177                      tp->bufmgr_config.mbuf_high_water);
7178         } else {
7179                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7180                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7181                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7182                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7183                 tw32(BUFMGR_MB_HIGH_WATER,
7184                      tp->bufmgr_config.mbuf_high_water_jumbo);
7185         }
7186         tw32(BUFMGR_DMA_LOW_WATER,
7187              tp->bufmgr_config.dma_low_water);
7188         tw32(BUFMGR_DMA_HIGH_WATER,
7189              tp->bufmgr_config.dma_high_water);
7190
7191         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7192         for (i = 0; i < 2000; i++) {
7193                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7194                         break;
7195                 udelay(10);
7196         }
7197         if (i >= 2000) {
7198                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7199                        tp->dev->name);
7200                 return -ENODEV;
7201         }
7202
7203         /* Setup replenish threshold. */
7204         val = tp->rx_pending / 8;
7205         if (val == 0)
7206                 val = 1;
7207         else if (val > tp->rx_std_max_post)
7208                 val = tp->rx_std_max_post;
7209         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7210                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7211                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7212
7213                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7214                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7215         }
7216
7217         tw32(RCVBDI_STD_THRESH, val);
7218
7219         /* Initialize TG3_BDINFO's at:
7220          *  RCVDBDI_STD_BD:     standard eth size rx ring
7221          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7222          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7223          *
7224          * like so:
7225          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7226          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7227          *                              ring attribute flags
7228          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7229          *
7230          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7231          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7232          *
7233          * The size of each ring is fixed in the firmware, but the location is
7234          * configurable.
7235          */
7236         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7237              ((u64) tp->rx_std_mapping >> 32));
7238         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7239              ((u64) tp->rx_std_mapping & 0xffffffff));
7240         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7241              NIC_SRAM_RX_BUFFER_DESC);
7242
7243         /* Don't even try to program the JUMBO/MINI buffer descriptor
7244          * configs on 5705.
7245          */
7246         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7247                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7248                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7249         } else {
7250                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7251                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7252
7253                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7254                      BDINFO_FLAGS_DISABLED);
7255
7256                 /* Setup replenish threshold. */
7257                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7258
7259                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7260                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7261                              ((u64) tp->rx_jumbo_mapping >> 32));
7262                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7263                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7264                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7265                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7266                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7267                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7268                 } else {
7269                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7270                              BDINFO_FLAGS_DISABLED);
7271                 }
7272
7273         }
7274
7275         /* There is only one send ring on 5705/5750, no need to explicitly
7276          * disable the others.
7277          */
7278         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7279                 /* Clear out send RCB ring in SRAM. */
7280                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7281                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7282                                       BDINFO_FLAGS_DISABLED);
7283         }
7284
7285         tp->tx_prod = 0;
7286         tp->tx_cons = 0;
7287         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7288         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7289
7290         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7291                        tp->tx_desc_mapping,
7292                        (TG3_TX_RING_SIZE <<
7293                         BDINFO_FLAGS_MAXLEN_SHIFT),
7294                        NIC_SRAM_TX_BUFFER_DESC);
7295
7296         /* There is only one receive return ring on 5705/5750, no need
7297          * to explicitly disable the others.
7298          */
7299         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7300                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7301                      i += TG3_BDINFO_SIZE) {
7302                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7303                                       BDINFO_FLAGS_DISABLED);
7304                 }
7305         }
7306
7307         tp->rx_rcb_ptr = 0;
7308         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7309
7310         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7311                        tp->rx_rcb_mapping,
7312                        (TG3_RX_RCB_RING_SIZE(tp) <<
7313                         BDINFO_FLAGS_MAXLEN_SHIFT),
7314                        0);
7315
7316         tp->rx_std_ptr = tp->rx_pending;
7317         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7318                      tp->rx_std_ptr);
7319
7320         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7321                                                 tp->rx_jumbo_pending : 0;
7322         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7323                      tp->rx_jumbo_ptr);
7324
7325         /* Initialize MAC address and backoff seed. */
7326         __tg3_set_mac_addr(tp, 0);
7327
7328         /* MTU + ethernet header + FCS + optional VLAN tag */
7329         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7330
7331         /* The slot time is changed by tg3_setup_phy if we
7332          * run at gigabit with half duplex.
7333          */
7334         tw32(MAC_TX_LENGTHS,
7335              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7336              (6 << TX_LENGTHS_IPG_SHIFT) |
7337              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7338
7339         /* Receive rules. */
7340         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7341         tw32(RCVLPC_CONFIG, 0x0181);
7342
7343         /* Calculate RDMAC_MODE setting early, we need it to determine
7344          * the RCVLPC_STATE_ENABLE mask.
7345          */
7346         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7347                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7348                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7349                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7350                       RDMAC_MODE_LNGREAD_ENAB);
7351
7352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7353             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7354                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7355                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7356                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7357
7358         /* If statement applies to 5705 and 5750 PCI devices only */
7359         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7360              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7361             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7362                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7363                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7364                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7365                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7366                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7367                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7368                 }
7369         }
7370
7371         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7372                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7373
7374         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7375                 rdmac_mode |= (1 << 27);
7376
7377         /* Receive/send statistics. */
7378         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7379                 val = tr32(RCVLPC_STATS_ENABLE);
7380                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7381                 tw32(RCVLPC_STATS_ENABLE, val);
7382         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7383                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7384                 val = tr32(RCVLPC_STATS_ENABLE);
7385                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7386                 tw32(RCVLPC_STATS_ENABLE, val);
7387         } else {
7388                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7389         }
7390         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7391         tw32(SNDDATAI_STATSENAB, 0xffffff);
7392         tw32(SNDDATAI_STATSCTRL,
7393              (SNDDATAI_SCTRL_ENABLE |
7394               SNDDATAI_SCTRL_FASTUPD));
7395
7396         /* Setup host coalescing engine. */
7397         tw32(HOSTCC_MODE, 0);
7398         for (i = 0; i < 2000; i++) {
7399                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7400                         break;
7401                 udelay(10);
7402         }
7403
7404         __tg3_set_coalesce(tp, &tp->coal);
7405
7406         /* set status block DMA address */
7407         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7408              ((u64) tp->status_mapping >> 32));
7409         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7410              ((u64) tp->status_mapping & 0xffffffff));
7411
7412         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7413                 /* Status/statistics block address.  See tg3_timer,
7414                  * the tg3_periodic_fetch_stats call there, and
7415                  * tg3_get_stats to see how this works for 5705/5750 chips.
7416                  */
7417                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7418                      ((u64) tp->stats_mapping >> 32));
7419                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7420                      ((u64) tp->stats_mapping & 0xffffffff));
7421                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7422                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7423         }
7424
7425         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7426
7427         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7428         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7429         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7430                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7431
7432         /* Clear statistics/status block in chip, and status block in ram. */
7433         for (i = NIC_SRAM_STATS_BLK;
7434              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7435              i += sizeof(u32)) {
7436                 tg3_write_mem(tp, i, 0);
7437                 udelay(40);
7438         }
7439         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7440
7441         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7442                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7443                 /* reset to prevent losing 1st rx packet intermittently */
7444                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7445                 udelay(10);
7446         }
7447
7448         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7449                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7450         else
7451                 tp->mac_mode = 0;
7452         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7453                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7454         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7455             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7456             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7457                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7458         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7459         udelay(40);
7460
7461         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7462          * If TG3_FLG2_IS_NIC is zero, we should read the
7463          * register to preserve the GPIO settings for LOMs. The GPIOs,
7464          * whether used as inputs or outputs, are set by boot code after
7465          * reset.
7466          */
7467         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7468                 u32 gpio_mask;
7469
7470                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7471                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7472                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7473
7474                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7475                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7476                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7477
7478                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7479                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7480
7481                 tp->grc_local_ctrl &= ~gpio_mask;
7482                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7483
7484                 /* GPIO1 must be driven high for eeprom write protect */
7485                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7486                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7487                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7488         }
7489         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7490         udelay(100);
7491
7492         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7493         tp->last_tag = 0;
7494
7495         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7496                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7497                 udelay(40);
7498         }
7499
7500         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7501                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7502                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7503                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7504                WDMAC_MODE_LNGREAD_ENAB);
7505
7506         /* If statement applies to 5705 and 5750 PCI devices only */
7507         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7508              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7510                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7511                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7512                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7513                         /* nothing */
7514                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7515                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7516                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7517                         val |= WDMAC_MODE_RX_ACCEL;
7518                 }
7519         }
7520
7521         /* Enable host coalescing bug fix */
7522         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7523             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7524             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7525             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7526             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7527                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7528
7529         tw32_f(WDMAC_MODE, val);
7530         udelay(40);
7531
7532         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7533                 u16 pcix_cmd;
7534
7535                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7536                                      &pcix_cmd);
7537                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7538                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7539                         pcix_cmd |= PCI_X_CMD_READ_2K;
7540                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7541                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7542                         pcix_cmd |= PCI_X_CMD_READ_2K;
7543                 }
7544                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7545                                       pcix_cmd);
7546         }
7547
7548         tw32_f(RDMAC_MODE, rdmac_mode);
7549         udelay(40);
7550
7551         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7552         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7553                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7554
7555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7556                 tw32(SNDDATAC_MODE,
7557                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7558         else
7559                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7560
7561         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7562         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7563         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7564         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7565         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7566                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7567         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7568         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7569
7570         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7571                 err = tg3_load_5701_a0_firmware_fix(tp);
7572                 if (err)
7573                         return err;
7574         }
7575
7576         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7577                 err = tg3_load_tso_firmware(tp);
7578                 if (err)
7579                         return err;
7580         }
7581
7582         tp->tx_mode = TX_MODE_ENABLE;
7583         tw32_f(MAC_TX_MODE, tp->tx_mode);
7584         udelay(100);
7585
7586         tp->rx_mode = RX_MODE_ENABLE;
7587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7588             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7589             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7591                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7592
7593         tw32_f(MAC_RX_MODE, tp->rx_mode);
7594         udelay(10);
7595
7596         tw32(MAC_LED_CTRL, tp->led_ctrl);
7597
7598         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7599         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7600                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7601                 udelay(10);
7602         }
7603         tw32_f(MAC_RX_MODE, tp->rx_mode);
7604         udelay(10);
7605
7606         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7607                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7608                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7609                         /* Set drive transmission level to 1.2V  */
7610                         /* only if the signal pre-emphasis bit is not set  */
7611                         val = tr32(MAC_SERDES_CFG);
7612                         val &= 0xfffff000;
7613                         val |= 0x880;
7614                         tw32(MAC_SERDES_CFG, val);
7615                 }
7616                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7617                         tw32(MAC_SERDES_CFG, 0x616000);
7618         }
7619
7620         /* Prevent chip from dropping frames when flow control
7621          * is enabled.
7622          */
7623         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7624
7625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7626             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7627                 /* Use hardware link auto-negotiation */
7628                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7629         }
7630
7631         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7632             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7633                 u32 tmp;
7634
7635                 tmp = tr32(SERDES_RX_CTRL);
7636                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7637                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7638                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7639                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7640         }
7641
7642         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7643                 if (tp->link_config.phy_is_low_power) {
7644                         tp->link_config.phy_is_low_power = 0;
7645                         tp->link_config.speed = tp->link_config.orig_speed;
7646                         tp->link_config.duplex = tp->link_config.orig_duplex;
7647                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7648                 }
7649
7650                 err = tg3_setup_phy(tp, 0);
7651                 if (err)
7652                         return err;
7653
7654                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7655                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7656                         u32 tmp;
7657
7658                         /* Clear CRC stats. */
7659                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7660                                 tg3_writephy(tp, MII_TG3_TEST1,
7661                                              tmp | MII_TG3_TEST1_CRC_EN);
7662                                 tg3_readphy(tp, 0x14, &tmp);
7663                         }
7664                 }
7665         }
7666
7667         __tg3_set_rx_mode(tp->dev);
7668
7669         /* Initialize receive rules. */
7670         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7671         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7672         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7673         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7674
7675         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7676             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7677                 limit = 8;
7678         else
7679                 limit = 16;
7680         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7681                 limit -= 4;
7682         switch (limit) {
7683         case 16:
7684                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7685         case 15:
7686                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7687         case 14:
7688                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7689         case 13:
7690                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7691         case 12:
7692                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7693         case 11:
7694                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7695         case 10:
7696                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7697         case 9:
7698                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7699         case 8:
7700                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7701         case 7:
7702                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7703         case 6:
7704                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7705         case 5:
7706                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7707         case 4:
7708                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7709         case 3:
7710                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7711         case 2:
7712         case 1:
7713
7714         default:
7715                 break;
7716         }
7717
7718         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7719                 /* Write our heartbeat update interval to APE. */
7720                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7721                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7722
7723         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7724
7725         return 0;
7726 }
7727
7728 /* Called at device open time to get the chip ready for
7729  * packet processing.  Invoked with tp->lock held.
7730  */
7731 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7732 {
7733         tg3_switch_clocks(tp);
7734
7735         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7736
7737         return tg3_reset_hw(tp, reset_phy);
7738 }
7739
7740 #define TG3_STAT_ADD32(PSTAT, REG) \
7741 do {    u32 __val = tr32(REG); \
7742         (PSTAT)->low += __val; \
7743         if ((PSTAT)->low < __val) \
7744                 (PSTAT)->high += 1; \
7745 } while (0)
7746
7747 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7748 {
7749         struct tg3_hw_stats *sp = tp->hw_stats;
7750
7751         if (!netif_carrier_ok(tp->dev))
7752                 return;
7753
7754         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7755         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7756         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7757         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7758         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7759         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7760         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7761         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7762         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7763         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7764         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7765         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7766         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7767
7768         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7769         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7770         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7771         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7772         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7773         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7774         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7775         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7776         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7777         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7778         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7779         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7780         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7781         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7782
7783         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7784         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7785         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7786 }
7787
7788 static void tg3_timer(unsigned long __opaque)
7789 {
7790         struct tg3 *tp = (struct tg3 *) __opaque;
7791
7792         if (tp->irq_sync)
7793                 goto restart_timer;
7794
7795         spin_lock(&tp->lock);
7796
7797         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7798                 /* All of this garbage is because when using non-tagged
7799                  * IRQ status the mailbox/status_block protocol the chip
7800                  * uses with the cpu is race prone.
7801                  */
7802                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7803                         tw32(GRC_LOCAL_CTRL,
7804                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7805                 } else {
7806                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7807                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7808                 }
7809
7810                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7811                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7812                         spin_unlock(&tp->lock);
7813                         schedule_work(&tp->reset_task);
7814                         return;
7815                 }
7816         }
7817
7818         /* This part only runs once per second. */
7819         if (!--tp->timer_counter) {
7820                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7821                         tg3_periodic_fetch_stats(tp);
7822
7823                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7824                         u32 mac_stat;
7825                         int phy_event;
7826
7827                         mac_stat = tr32(MAC_STATUS);
7828
7829                         phy_event = 0;
7830                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7831                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7832                                         phy_event = 1;
7833                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7834                                 phy_event = 1;
7835
7836                         if (phy_event)
7837                                 tg3_setup_phy(tp, 0);
7838                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7839                         u32 mac_stat = tr32(MAC_STATUS);
7840                         int need_setup = 0;
7841
7842                         if (netif_carrier_ok(tp->dev) &&
7843                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7844                                 need_setup = 1;
7845                         }
7846                         if (! netif_carrier_ok(tp->dev) &&
7847                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7848                                          MAC_STATUS_SIGNAL_DET))) {
7849                                 need_setup = 1;
7850                         }
7851                         if (need_setup) {
7852                                 if (!tp->serdes_counter) {
7853                                         tw32_f(MAC_MODE,
7854                                              (tp->mac_mode &
7855                                               ~MAC_MODE_PORT_MODE_MASK));
7856                                         udelay(40);
7857                                         tw32_f(MAC_MODE, tp->mac_mode);
7858                                         udelay(40);
7859                                 }
7860                                 tg3_setup_phy(tp, 0);
7861                         }
7862                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7863                         tg3_serdes_parallel_detect(tp);
7864
7865                 tp->timer_counter = tp->timer_multiplier;
7866         }
7867
7868         /* Heartbeat is only sent once every 2 seconds.
7869          *
7870          * The heartbeat is to tell the ASF firmware that the host
7871          * driver is still alive.  In the event that the OS crashes,
7872          * ASF needs to reset the hardware to free up the FIFO space
7873          * that may be filled with rx packets destined for the host.
7874          * If the FIFO is full, ASF will no longer function properly.
7875          *
7876          * Unintended resets have been reported on real time kernels
7877          * where the timer doesn't run on time.  Netpoll will also have
7878          * same problem.
7879          *
7880          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7881          * to check the ring condition when the heartbeat is expiring
7882          * before doing the reset.  This will prevent most unintended
7883          * resets.
7884          */
7885         if (!--tp->asf_counter) {
7886                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7887                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7888                         tg3_wait_for_event_ack(tp);
7889
7890                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7891                                       FWCMD_NICDRV_ALIVE3);
7892                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7893                         /* 5 seconds timeout */
7894                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7895
7896                         tg3_generate_fw_event(tp);
7897                 }
7898                 tp->asf_counter = tp->asf_multiplier;
7899         }
7900
7901         spin_unlock(&tp->lock);
7902
7903 restart_timer:
7904         tp->timer.expires = jiffies + tp->timer_offset;
7905         add_timer(&tp->timer);
7906 }
7907
7908 static int tg3_request_irq(struct tg3 *tp)
7909 {
7910         irq_handler_t fn;
7911         unsigned long flags;
7912         struct net_device *dev = tp->dev;
7913
7914         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7915                 fn = tg3_msi;
7916                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7917                         fn = tg3_msi_1shot;
7918                 flags = IRQF_SAMPLE_RANDOM;
7919         } else {
7920                 fn = tg3_interrupt;
7921                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7922                         fn = tg3_interrupt_tagged;
7923                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7924         }
7925         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7926 }
7927
7928 static int tg3_test_interrupt(struct tg3 *tp)
7929 {
7930         struct net_device *dev = tp->dev;
7931         int err, i, intr_ok = 0;
7932
7933         if (!netif_running(dev))
7934                 return -ENODEV;
7935
7936         tg3_disable_ints(tp);
7937
7938         free_irq(tp->pdev->irq, dev);
7939
7940         err = request_irq(tp->pdev->irq, tg3_test_isr,
7941                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7942         if (err)
7943                 return err;
7944
7945         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7946         tg3_enable_ints(tp);
7947
7948         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7949                HOSTCC_MODE_NOW);
7950
7951         for (i = 0; i < 5; i++) {
7952                 u32 int_mbox, misc_host_ctrl;
7953
7954                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7955                                         TG3_64BIT_REG_LOW);
7956                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7957
7958                 if ((int_mbox != 0) ||
7959                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7960                         intr_ok = 1;
7961                         break;
7962                 }
7963
7964                 msleep(10);
7965         }
7966
7967         tg3_disable_ints(tp);
7968
7969         free_irq(tp->pdev->irq, dev);
7970
7971         err = tg3_request_irq(tp);
7972
7973         if (err)
7974                 return err;
7975
7976         if (intr_ok)
7977                 return 0;
7978
7979         return -EIO;
7980 }
7981
7982 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7983  * successfully restored
7984  */
7985 static int tg3_test_msi(struct tg3 *tp)
7986 {
7987         struct net_device *dev = tp->dev;
7988         int err;
7989         u16 pci_cmd;
7990
7991         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7992                 return 0;
7993
7994         /* Turn off SERR reporting in case MSI terminates with Master
7995          * Abort.
7996          */
7997         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7998         pci_write_config_word(tp->pdev, PCI_COMMAND,
7999                               pci_cmd & ~PCI_COMMAND_SERR);
8000
8001         err = tg3_test_interrupt(tp);
8002
8003         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8004
8005         if (!err)
8006                 return 0;
8007
8008         /* other failures */
8009         if (err != -EIO)
8010                 return err;
8011
8012         /* MSI test failed, go back to INTx mode */
8013         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8014                "switching to INTx mode. Please report this failure to "
8015                "the PCI maintainer and include system chipset information.\n",
8016                        tp->dev->name);
8017
8018         free_irq(tp->pdev->irq, dev);
8019         pci_disable_msi(tp->pdev);
8020
8021         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8022
8023         err = tg3_request_irq(tp);
8024         if (err)
8025                 return err;
8026
8027         /* Need to reset the chip because the MSI cycle may have terminated
8028          * with Master Abort.
8029          */
8030         tg3_full_lock(tp, 1);
8031
8032         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8033         err = tg3_init_hw(tp, 1);
8034
8035         tg3_full_unlock(tp);
8036
8037         if (err)
8038                 free_irq(tp->pdev->irq, dev);
8039
8040         return err;
8041 }
8042
8043 static int tg3_open(struct net_device *dev)
8044 {
8045         struct tg3 *tp = netdev_priv(dev);
8046         int err;
8047
8048         netif_carrier_off(tp->dev);
8049
8050         err = tg3_set_power_state(tp, PCI_D0);
8051         if (err)
8052                 return err;
8053
8054         tg3_full_lock(tp, 0);
8055
8056         tg3_disable_ints(tp);
8057         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8058
8059         tg3_full_unlock(tp);
8060
8061         /* The placement of this call is tied
8062          * to the setup and use of Host TX descriptors.
8063          */
8064         err = tg3_alloc_consistent(tp);
8065         if (err)
8066                 return err;
8067
8068         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8069                 /* All MSI supporting chips should support tagged
8070                  * status.  Assert that this is the case.
8071                  */
8072                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8073                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8074                                "Not using MSI.\n", tp->dev->name);
8075                 } else if (pci_enable_msi(tp->pdev) == 0) {
8076                         u32 msi_mode;
8077
8078                         msi_mode = tr32(MSGINT_MODE);
8079                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8080                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8081                 }
8082         }
8083         err = tg3_request_irq(tp);
8084
8085         if (err) {
8086                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8087                         pci_disable_msi(tp->pdev);
8088                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8089                 }
8090                 tg3_free_consistent(tp);
8091                 return err;
8092         }
8093
8094         napi_enable(&tp->napi);
8095
8096         tg3_full_lock(tp, 0);
8097
8098         err = tg3_init_hw(tp, 1);
8099         if (err) {
8100                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8101                 tg3_free_rings(tp);
8102         } else {
8103                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8104                         tp->timer_offset = HZ;
8105                 else
8106                         tp->timer_offset = HZ / 10;
8107
8108                 BUG_ON(tp->timer_offset > HZ);
8109                 tp->timer_counter = tp->timer_multiplier =
8110                         (HZ / tp->timer_offset);
8111                 tp->asf_counter = tp->asf_multiplier =
8112                         ((HZ / tp->timer_offset) * 2);
8113
8114                 init_timer(&tp->timer);
8115                 tp->timer.expires = jiffies + tp->timer_offset;
8116                 tp->timer.data = (unsigned long) tp;
8117                 tp->timer.function = tg3_timer;
8118         }
8119
8120         tg3_full_unlock(tp);
8121
8122         if (err) {
8123                 napi_disable(&tp->napi);
8124                 free_irq(tp->pdev->irq, dev);
8125                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8126                         pci_disable_msi(tp->pdev);
8127                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8128                 }
8129                 tg3_free_consistent(tp);
8130                 return err;
8131         }
8132
8133         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8134                 err = tg3_test_msi(tp);
8135
8136                 if (err) {
8137                         tg3_full_lock(tp, 0);
8138
8139                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8140                                 pci_disable_msi(tp->pdev);
8141                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8142                         }
8143                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8144                         tg3_free_rings(tp);
8145                         tg3_free_consistent(tp);
8146
8147                         tg3_full_unlock(tp);
8148
8149                         napi_disable(&tp->napi);
8150
8151                         return err;
8152                 }
8153
8154                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8155                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8156                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8157
8158                                 tw32(PCIE_TRANSACTION_CFG,
8159                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8160                         }
8161                 }
8162         }
8163
8164         tg3_phy_start(tp);
8165
8166         tg3_full_lock(tp, 0);
8167
8168         add_timer(&tp->timer);
8169         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8170         tg3_enable_ints(tp);
8171
8172         tg3_full_unlock(tp);
8173
8174         netif_start_queue(dev);
8175
8176         return 0;
8177 }
8178
8179 #if 0
8180 /*static*/ void tg3_dump_state(struct tg3 *tp)
8181 {
8182         u32 val32, val32_2, val32_3, val32_4, val32_5;
8183         u16 val16;
8184         int i;
8185
8186         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8187         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8188         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8189                val16, val32);
8190
8191         /* MAC block */
8192         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8193                tr32(MAC_MODE), tr32(MAC_STATUS));
8194         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8195                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8196         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8197                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8198         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8199                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8200
8201         /* Send data initiator control block */
8202         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8203                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8204         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8205                tr32(SNDDATAI_STATSCTRL));
8206
8207         /* Send data completion control block */
8208         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8209
8210         /* Send BD ring selector block */
8211         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8212                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8213
8214         /* Send BD initiator control block */
8215         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8216                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8217
8218         /* Send BD completion control block */
8219         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8220
8221         /* Receive list placement control block */
8222         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8223                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8224         printk("       RCVLPC_STATSCTRL[%08x]\n",
8225                tr32(RCVLPC_STATSCTRL));
8226
8227         /* Receive data and receive BD initiator control block */
8228         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8229                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8230
8231         /* Receive data completion control block */
8232         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8233                tr32(RCVDCC_MODE));
8234
8235         /* Receive BD initiator control block */
8236         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8237                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8238
8239         /* Receive BD completion control block */
8240         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8241                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8242
8243         /* Receive list selector control block */
8244         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8245                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8246
8247         /* Mbuf cluster free block */
8248         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8249                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8250
8251         /* Host coalescing control block */
8252         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8253                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8254         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8255                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8256                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8257         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8258                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8259                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8260         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8261                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8262         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8263                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8264
8265         /* Memory arbiter control block */
8266         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8267                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8268
8269         /* Buffer manager control block */
8270         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8271                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8272         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8273                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8274         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8275                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8276                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8277                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8278
8279         /* Read DMA control block */
8280         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8281                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8282
8283         /* Write DMA control block */
8284         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8285                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8286
8287         /* DMA completion block */
8288         printk("DEBUG: DMAC_MODE[%08x]\n",
8289                tr32(DMAC_MODE));
8290
8291         /* GRC block */
8292         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8293                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8294         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8295                tr32(GRC_LOCAL_CTRL));
8296
8297         /* TG3_BDINFOs */
8298         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8299                tr32(RCVDBDI_JUMBO_BD + 0x0),
8300                tr32(RCVDBDI_JUMBO_BD + 0x4),
8301                tr32(RCVDBDI_JUMBO_BD + 0x8),
8302                tr32(RCVDBDI_JUMBO_BD + 0xc));
8303         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8304                tr32(RCVDBDI_STD_BD + 0x0),
8305                tr32(RCVDBDI_STD_BD + 0x4),
8306                tr32(RCVDBDI_STD_BD + 0x8),
8307                tr32(RCVDBDI_STD_BD + 0xc));
8308         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8309                tr32(RCVDBDI_MINI_BD + 0x0),
8310                tr32(RCVDBDI_MINI_BD + 0x4),
8311                tr32(RCVDBDI_MINI_BD + 0x8),
8312                tr32(RCVDBDI_MINI_BD + 0xc));
8313
8314         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8315         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8316         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8317         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8318         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8319                val32, val32_2, val32_3, val32_4);
8320
8321         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8322         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8323         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8324         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8325         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8326                val32, val32_2, val32_3, val32_4);
8327
8328         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8329         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8330         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8331         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8332         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8333         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8334                val32, val32_2, val32_3, val32_4, val32_5);
8335
8336         /* SW status block */
8337         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8338                tp->hw_status->status,
8339                tp->hw_status->status_tag,
8340                tp->hw_status->rx_jumbo_consumer,
8341                tp->hw_status->rx_consumer,
8342                tp->hw_status->rx_mini_consumer,
8343                tp->hw_status->idx[0].rx_producer,
8344                tp->hw_status->idx[0].tx_consumer);
8345
8346         /* SW statistics block */
8347         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8348                ((u32 *)tp->hw_stats)[0],
8349                ((u32 *)tp->hw_stats)[1],
8350                ((u32 *)tp->hw_stats)[2],
8351                ((u32 *)tp->hw_stats)[3]);
8352
8353         /* Mailboxes */
8354         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8355                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8356                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8357                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8358                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8359
8360         /* NIC side send descriptors. */
8361         for (i = 0; i < 6; i++) {
8362                 unsigned long txd;
8363
8364                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8365                         + (i * sizeof(struct tg3_tx_buffer_desc));
8366                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8367                        i,
8368                        readl(txd + 0x0), readl(txd + 0x4),
8369                        readl(txd + 0x8), readl(txd + 0xc));
8370         }
8371
8372         /* NIC side RX descriptors. */
8373         for (i = 0; i < 6; i++) {
8374                 unsigned long rxd;
8375
8376                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8377                         + (i * sizeof(struct tg3_rx_buffer_desc));
8378                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8379                        i,
8380                        readl(rxd + 0x0), readl(rxd + 0x4),
8381                        readl(rxd + 0x8), readl(rxd + 0xc));
8382                 rxd += (4 * sizeof(u32));
8383                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8384                        i,
8385                        readl(rxd + 0x0), readl(rxd + 0x4),
8386                        readl(rxd + 0x8), readl(rxd + 0xc));
8387         }
8388
8389         for (i = 0; i < 6; i++) {
8390                 unsigned long rxd;
8391
8392                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8393                         + (i * sizeof(struct tg3_rx_buffer_desc));
8394                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8395                        i,
8396                        readl(rxd + 0x0), readl(rxd + 0x4),
8397                        readl(rxd + 0x8), readl(rxd + 0xc));
8398                 rxd += (4 * sizeof(u32));
8399                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8400                        i,
8401                        readl(rxd + 0x0), readl(rxd + 0x4),
8402                        readl(rxd + 0x8), readl(rxd + 0xc));
8403         }
8404 }
8405 #endif
8406
8407 static struct net_device_stats *tg3_get_stats(struct net_device *);
8408 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8409
8410 static int tg3_close(struct net_device *dev)
8411 {
8412         struct tg3 *tp = netdev_priv(dev);
8413
8414         napi_disable(&tp->napi);
8415         cancel_work_sync(&tp->reset_task);
8416
8417         netif_stop_queue(dev);
8418
8419         del_timer_sync(&tp->timer);
8420
8421         tg3_full_lock(tp, 1);
8422 #if 0
8423         tg3_dump_state(tp);
8424 #endif
8425
8426         tg3_disable_ints(tp);
8427
8428         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8429         tg3_free_rings(tp);
8430         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8431
8432         tg3_full_unlock(tp);
8433
8434         free_irq(tp->pdev->irq, dev);
8435         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8436                 pci_disable_msi(tp->pdev);
8437                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8438         }
8439
8440         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8441                sizeof(tp->net_stats_prev));
8442         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8443                sizeof(tp->estats_prev));
8444
8445         tg3_free_consistent(tp);
8446
8447         tg3_set_power_state(tp, PCI_D3hot);
8448
8449         netif_carrier_off(tp->dev);
8450
8451         return 0;
8452 }
8453
8454 static inline unsigned long get_stat64(tg3_stat64_t *val)
8455 {
8456         unsigned long ret;
8457
8458 #if (BITS_PER_LONG == 32)
8459         ret = val->low;
8460 #else
8461         ret = ((u64)val->high << 32) | ((u64)val->low);
8462 #endif
8463         return ret;
8464 }
8465
8466 static inline u64 get_estat64(tg3_stat64_t *val)
8467 {
8468        return ((u64)val->high << 32) | ((u64)val->low);
8469 }
8470
8471 static unsigned long calc_crc_errors(struct tg3 *tp)
8472 {
8473         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8474
8475         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8476             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8477              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8478                 u32 val;
8479
8480                 spin_lock_bh(&tp->lock);
8481                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8482                         tg3_writephy(tp, MII_TG3_TEST1,
8483                                      val | MII_TG3_TEST1_CRC_EN);
8484                         tg3_readphy(tp, 0x14, &val);
8485                 } else
8486                         val = 0;
8487                 spin_unlock_bh(&tp->lock);
8488
8489                 tp->phy_crc_errors += val;
8490
8491                 return tp->phy_crc_errors;
8492         }
8493
8494         return get_stat64(&hw_stats->rx_fcs_errors);
8495 }
8496
8497 #define ESTAT_ADD(member) \
8498         estats->member =        old_estats->member + \
8499                                 get_estat64(&hw_stats->member)
8500
8501 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8502 {
8503         struct tg3_ethtool_stats *estats = &tp->estats;
8504         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8505         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8506
8507         if (!hw_stats)
8508                 return old_estats;
8509
8510         ESTAT_ADD(rx_octets);
8511         ESTAT_ADD(rx_fragments);
8512         ESTAT_ADD(rx_ucast_packets);
8513         ESTAT_ADD(rx_mcast_packets);
8514         ESTAT_ADD(rx_bcast_packets);
8515         ESTAT_ADD(rx_fcs_errors);
8516         ESTAT_ADD(rx_align_errors);
8517         ESTAT_ADD(rx_xon_pause_rcvd);
8518         ESTAT_ADD(rx_xoff_pause_rcvd);
8519         ESTAT_ADD(rx_mac_ctrl_rcvd);
8520         ESTAT_ADD(rx_xoff_entered);
8521         ESTAT_ADD(rx_frame_too_long_errors);
8522         ESTAT_ADD(rx_jabbers);
8523         ESTAT_ADD(rx_undersize_packets);
8524         ESTAT_ADD(rx_in_length_errors);
8525         ESTAT_ADD(rx_out_length_errors);
8526         ESTAT_ADD(rx_64_or_less_octet_packets);
8527         ESTAT_ADD(rx_65_to_127_octet_packets);
8528         ESTAT_ADD(rx_128_to_255_octet_packets);
8529         ESTAT_ADD(rx_256_to_511_octet_packets);
8530         ESTAT_ADD(rx_512_to_1023_octet_packets);
8531         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8532         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8533         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8534         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8535         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8536
8537         ESTAT_ADD(tx_octets);
8538         ESTAT_ADD(tx_collisions);
8539         ESTAT_ADD(tx_xon_sent);
8540         ESTAT_ADD(tx_xoff_sent);
8541         ESTAT_ADD(tx_flow_control);
8542         ESTAT_ADD(tx_mac_errors);
8543         ESTAT_ADD(tx_single_collisions);
8544         ESTAT_ADD(tx_mult_collisions);
8545         ESTAT_ADD(tx_deferred);
8546         ESTAT_ADD(tx_excessive_collisions);
8547         ESTAT_ADD(tx_late_collisions);
8548         ESTAT_ADD(tx_collide_2times);
8549         ESTAT_ADD(tx_collide_3times);
8550         ESTAT_ADD(tx_collide_4times);
8551         ESTAT_ADD(tx_collide_5times);
8552         ESTAT_ADD(tx_collide_6times);
8553         ESTAT_ADD(tx_collide_7times);
8554         ESTAT_ADD(tx_collide_8times);
8555         ESTAT_ADD(tx_collide_9times);
8556         ESTAT_ADD(tx_collide_10times);
8557         ESTAT_ADD(tx_collide_11times);
8558         ESTAT_ADD(tx_collide_12times);
8559         ESTAT_ADD(tx_collide_13times);
8560         ESTAT_ADD(tx_collide_14times);
8561         ESTAT_ADD(tx_collide_15times);
8562         ESTAT_ADD(tx_ucast_packets);
8563         ESTAT_ADD(tx_mcast_packets);
8564         ESTAT_ADD(tx_bcast_packets);
8565         ESTAT_ADD(tx_carrier_sense_errors);
8566         ESTAT_ADD(tx_discards);
8567         ESTAT_ADD(tx_errors);
8568
8569         ESTAT_ADD(dma_writeq_full);
8570         ESTAT_ADD(dma_write_prioq_full);
8571         ESTAT_ADD(rxbds_empty);
8572         ESTAT_ADD(rx_discards);
8573         ESTAT_ADD(rx_errors);
8574         ESTAT_ADD(rx_threshold_hit);
8575
8576         ESTAT_ADD(dma_readq_full);
8577         ESTAT_ADD(dma_read_prioq_full);
8578         ESTAT_ADD(tx_comp_queue_full);
8579
8580         ESTAT_ADD(ring_set_send_prod_index);
8581         ESTAT_ADD(ring_status_update);
8582         ESTAT_ADD(nic_irqs);
8583         ESTAT_ADD(nic_avoided_irqs);
8584         ESTAT_ADD(nic_tx_threshold_hit);
8585
8586         return estats;
8587 }
8588
8589 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8590 {
8591         struct tg3 *tp = netdev_priv(dev);
8592         struct net_device_stats *stats = &tp->net_stats;
8593         struct net_device_stats *old_stats = &tp->net_stats_prev;
8594         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8595
8596         if (!hw_stats)
8597                 return old_stats;
8598
8599         stats->rx_packets = old_stats->rx_packets +
8600                 get_stat64(&hw_stats->rx_ucast_packets) +
8601                 get_stat64(&hw_stats->rx_mcast_packets) +
8602                 get_stat64(&hw_stats->rx_bcast_packets);
8603
8604         stats->tx_packets = old_stats->tx_packets +
8605                 get_stat64(&hw_stats->tx_ucast_packets) +
8606                 get_stat64(&hw_stats->tx_mcast_packets) +
8607                 get_stat64(&hw_stats->tx_bcast_packets);
8608
8609         stats->rx_bytes = old_stats->rx_bytes +
8610                 get_stat64(&hw_stats->rx_octets);
8611         stats->tx_bytes = old_stats->tx_bytes +
8612                 get_stat64(&hw_stats->tx_octets);
8613
8614         stats->rx_errors = old_stats->rx_errors +
8615                 get_stat64(&hw_stats->rx_errors);
8616         stats->tx_errors = old_stats->tx_errors +
8617                 get_stat64(&hw_stats->tx_errors) +
8618                 get_stat64(&hw_stats->tx_mac_errors) +
8619                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8620                 get_stat64(&hw_stats->tx_discards);
8621
8622         stats->multicast = old_stats->multicast +
8623                 get_stat64(&hw_stats->rx_mcast_packets);
8624         stats->collisions = old_stats->collisions +
8625                 get_stat64(&hw_stats->tx_collisions);
8626
8627         stats->rx_length_errors = old_stats->rx_length_errors +
8628                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8629                 get_stat64(&hw_stats->rx_undersize_packets);
8630
8631         stats->rx_over_errors = old_stats->rx_over_errors +
8632                 get_stat64(&hw_stats->rxbds_empty);
8633         stats->rx_frame_errors = old_stats->rx_frame_errors +
8634                 get_stat64(&hw_stats->rx_align_errors);
8635         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8636                 get_stat64(&hw_stats->tx_discards);
8637         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8638                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8639
8640         stats->rx_crc_errors = old_stats->rx_crc_errors +
8641                 calc_crc_errors(tp);
8642
8643         stats->rx_missed_errors = old_stats->rx_missed_errors +
8644                 get_stat64(&hw_stats->rx_discards);
8645
8646         return stats;
8647 }
8648
8649 static inline u32 calc_crc(unsigned char *buf, int len)
8650 {
8651         u32 reg;
8652         u32 tmp;
8653         int j, k;
8654
8655         reg = 0xffffffff;
8656
8657         for (j = 0; j < len; j++) {
8658                 reg ^= buf[j];
8659
8660                 for (k = 0; k < 8; k++) {
8661                         tmp = reg & 0x01;
8662
8663                         reg >>= 1;
8664
8665                         if (tmp) {
8666                                 reg ^= 0xedb88320;
8667                         }
8668                 }
8669         }
8670
8671         return ~reg;
8672 }
8673
8674 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8675 {
8676         /* accept or reject all multicast frames */
8677         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8678         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8679         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8680         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8681 }
8682
8683 static void __tg3_set_rx_mode(struct net_device *dev)
8684 {
8685         struct tg3 *tp = netdev_priv(dev);
8686         u32 rx_mode;
8687
8688         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8689                                   RX_MODE_KEEP_VLAN_TAG);
8690
8691         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8692          * flag clear.
8693          */
8694 #if TG3_VLAN_TAG_USED
8695         if (!tp->vlgrp &&
8696             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8697                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8698 #else
8699         /* By definition, VLAN is disabled always in this
8700          * case.
8701          */
8702         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8703                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8704 #endif
8705
8706         if (dev->flags & IFF_PROMISC) {
8707                 /* Promiscuous mode. */
8708                 rx_mode |= RX_MODE_PROMISC;
8709         } else if (dev->flags & IFF_ALLMULTI) {
8710                 /* Accept all multicast. */
8711                 tg3_set_multi (tp, 1);
8712         } else if (dev->mc_count < 1) {
8713                 /* Reject all multicast. */
8714                 tg3_set_multi (tp, 0);
8715         } else {
8716                 /* Accept one or more multicast(s). */
8717                 struct dev_mc_list *mclist;
8718                 unsigned int i;
8719                 u32 mc_filter[4] = { 0, };
8720                 u32 regidx;
8721                 u32 bit;
8722                 u32 crc;
8723
8724                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8725                      i++, mclist = mclist->next) {
8726
8727                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8728                         bit = ~crc & 0x7f;
8729                         regidx = (bit & 0x60) >> 5;
8730                         bit &= 0x1f;
8731                         mc_filter[regidx] |= (1 << bit);
8732                 }
8733
8734                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8735                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8736                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8737                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8738         }
8739
8740         if (rx_mode != tp->rx_mode) {
8741                 tp->rx_mode = rx_mode;
8742                 tw32_f(MAC_RX_MODE, rx_mode);
8743                 udelay(10);
8744         }
8745 }
8746
8747 static void tg3_set_rx_mode(struct net_device *dev)
8748 {
8749         struct tg3 *tp = netdev_priv(dev);
8750
8751         if (!netif_running(dev))
8752                 return;
8753
8754         tg3_full_lock(tp, 0);
8755         __tg3_set_rx_mode(dev);
8756         tg3_full_unlock(tp);
8757 }
8758
8759 #define TG3_REGDUMP_LEN         (32 * 1024)
8760
8761 static int tg3_get_regs_len(struct net_device *dev)
8762 {
8763         return TG3_REGDUMP_LEN;
8764 }
8765
8766 static void tg3_get_regs(struct net_device *dev,
8767                 struct ethtool_regs *regs, void *_p)
8768 {
8769         u32 *p = _p;
8770         struct tg3 *tp = netdev_priv(dev);
8771         u8 *orig_p = _p;
8772         int i;
8773
8774         regs->version = 0;
8775
8776         memset(p, 0, TG3_REGDUMP_LEN);
8777
8778         if (tp->link_config.phy_is_low_power)
8779                 return;
8780
8781         tg3_full_lock(tp, 0);
8782
8783 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8784 #define GET_REG32_LOOP(base,len)                \
8785 do {    p = (u32 *)(orig_p + (base));           \
8786         for (i = 0; i < len; i += 4)            \
8787                 __GET_REG32((base) + i);        \
8788 } while (0)
8789 #define GET_REG32_1(reg)                        \
8790 do {    p = (u32 *)(orig_p + (reg));            \
8791         __GET_REG32((reg));                     \
8792 } while (0)
8793
8794         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8795         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8796         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8797         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8798         GET_REG32_1(SNDDATAC_MODE);
8799         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8800         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8801         GET_REG32_1(SNDBDC_MODE);
8802         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8803         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8804         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8805         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8806         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8807         GET_REG32_1(RCVDCC_MODE);
8808         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8809         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8810         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8811         GET_REG32_1(MBFREE_MODE);
8812         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8813         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8814         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8815         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8816         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8817         GET_REG32_1(RX_CPU_MODE);
8818         GET_REG32_1(RX_CPU_STATE);
8819         GET_REG32_1(RX_CPU_PGMCTR);
8820         GET_REG32_1(RX_CPU_HWBKPT);
8821         GET_REG32_1(TX_CPU_MODE);
8822         GET_REG32_1(TX_CPU_STATE);
8823         GET_REG32_1(TX_CPU_PGMCTR);
8824         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8825         GET_REG32_LOOP(FTQ_RESET, 0x120);
8826         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8827         GET_REG32_1(DMAC_MODE);
8828         GET_REG32_LOOP(GRC_MODE, 0x4c);
8829         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8830                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8831
8832 #undef __GET_REG32
8833 #undef GET_REG32_LOOP
8834 #undef GET_REG32_1
8835
8836         tg3_full_unlock(tp);
8837 }
8838
8839 static int tg3_get_eeprom_len(struct net_device *dev)
8840 {
8841         struct tg3 *tp = netdev_priv(dev);
8842
8843         return tp->nvram_size;
8844 }
8845
8846 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8847 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8848 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8849
8850 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8851 {
8852         struct tg3 *tp = netdev_priv(dev);
8853         int ret;
8854         u8  *pd;
8855         u32 i, offset, len, b_offset, b_count;
8856         __le32 val;
8857
8858         if (tp->link_config.phy_is_low_power)
8859                 return -EAGAIN;
8860
8861         offset = eeprom->offset;
8862         len = eeprom->len;
8863         eeprom->len = 0;
8864
8865         eeprom->magic = TG3_EEPROM_MAGIC;
8866
8867         if (offset & 3) {
8868                 /* adjustments to start on required 4 byte boundary */
8869                 b_offset = offset & 3;
8870                 b_count = 4 - b_offset;
8871                 if (b_count > len) {
8872                         /* i.e. offset=1 len=2 */
8873                         b_count = len;
8874                 }
8875                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8876                 if (ret)
8877                         return ret;
8878                 memcpy(data, ((char*)&val) + b_offset, b_count);
8879                 len -= b_count;
8880                 offset += b_count;
8881                 eeprom->len += b_count;
8882         }
8883
8884         /* read bytes upto the last 4 byte boundary */
8885         pd = &data[eeprom->len];
8886         for (i = 0; i < (len - (len & 3)); i += 4) {
8887                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8888                 if (ret) {
8889                         eeprom->len += i;
8890                         return ret;
8891                 }
8892                 memcpy(pd + i, &val, 4);
8893         }
8894         eeprom->len += i;
8895
8896         if (len & 3) {
8897                 /* read last bytes not ending on 4 byte boundary */
8898                 pd = &data[eeprom->len];
8899                 b_count = len & 3;
8900                 b_offset = offset + len - b_count;
8901                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8902                 if (ret)
8903                         return ret;
8904                 memcpy(pd, &val, b_count);
8905                 eeprom->len += b_count;
8906         }
8907         return 0;
8908 }
8909
8910 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8911
8912 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8913 {
8914         struct tg3 *tp = netdev_priv(dev);
8915         int ret;
8916         u32 offset, len, b_offset, odd_len;
8917         u8 *buf;
8918         __le32 start, end;
8919
8920         if (tp->link_config.phy_is_low_power)
8921                 return -EAGAIN;
8922
8923         if (eeprom->magic != TG3_EEPROM_MAGIC)
8924                 return -EINVAL;
8925
8926         offset = eeprom->offset;
8927         len = eeprom->len;
8928
8929         if ((b_offset = (offset & 3))) {
8930                 /* adjustments to start on required 4 byte boundary */
8931                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8932                 if (ret)
8933                         return ret;
8934                 len += b_offset;
8935                 offset &= ~3;
8936                 if (len < 4)
8937                         len = 4;
8938         }
8939
8940         odd_len = 0;
8941         if (len & 3) {
8942                 /* adjustments to end on required 4 byte boundary */
8943                 odd_len = 1;
8944                 len = (len + 3) & ~3;
8945                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8946                 if (ret)
8947                         return ret;
8948         }
8949
8950         buf = data;
8951         if (b_offset || odd_len) {
8952                 buf = kmalloc(len, GFP_KERNEL);
8953                 if (!buf)
8954                         return -ENOMEM;
8955                 if (b_offset)
8956                         memcpy(buf, &start, 4);
8957                 if (odd_len)
8958                         memcpy(buf+len-4, &end, 4);
8959                 memcpy(buf + b_offset, data, eeprom->len);
8960         }
8961
8962         ret = tg3_nvram_write_block(tp, offset, len, buf);
8963
8964         if (buf != data)
8965                 kfree(buf);
8966
8967         return ret;
8968 }
8969
8970 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971 {
8972         struct tg3 *tp = netdev_priv(dev);
8973
8974         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8975                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8976                         return -EAGAIN;
8977                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8978         }
8979
8980         cmd->supported = (SUPPORTED_Autoneg);
8981
8982         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8983                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8984                                    SUPPORTED_1000baseT_Full);
8985
8986         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8987                 cmd->supported |= (SUPPORTED_100baseT_Half |
8988                                   SUPPORTED_100baseT_Full |
8989                                   SUPPORTED_10baseT_Half |
8990                                   SUPPORTED_10baseT_Full |
8991                                   SUPPORTED_TP);
8992                 cmd->port = PORT_TP;
8993         } else {
8994                 cmd->supported |= SUPPORTED_FIBRE;
8995                 cmd->port = PORT_FIBRE;
8996         }
8997
8998         cmd->advertising = tp->link_config.advertising;
8999         if (netif_running(dev)) {
9000                 cmd->speed = tp->link_config.active_speed;
9001                 cmd->duplex = tp->link_config.active_duplex;
9002         }
9003         cmd->phy_address = PHY_ADDR;
9004         cmd->transceiver = 0;
9005         cmd->autoneg = tp->link_config.autoneg;
9006         cmd->maxtxpkt = 0;
9007         cmd->maxrxpkt = 0;
9008         return 0;
9009 }
9010
9011 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9012 {
9013         struct tg3 *tp = netdev_priv(dev);
9014
9015         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9016                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9017                         return -EAGAIN;
9018                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9019         }
9020
9021         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9022                 /* These are the only valid advertisement bits allowed.  */
9023                 if (cmd->autoneg == AUTONEG_ENABLE &&
9024                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9025                                           ADVERTISED_1000baseT_Full |
9026                                           ADVERTISED_Autoneg |
9027                                           ADVERTISED_FIBRE)))
9028                         return -EINVAL;
9029                 /* Fiber can only do SPEED_1000.  */
9030                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9031                          (cmd->speed != SPEED_1000))
9032                         return -EINVAL;
9033         /* Copper cannot force SPEED_1000.  */
9034         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9035                    (cmd->speed == SPEED_1000))
9036                 return -EINVAL;
9037         else if ((cmd->speed == SPEED_1000) &&
9038                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9039                 return -EINVAL;
9040
9041         tg3_full_lock(tp, 0);
9042
9043         tp->link_config.autoneg = cmd->autoneg;
9044         if (cmd->autoneg == AUTONEG_ENABLE) {
9045                 tp->link_config.advertising = (cmd->advertising |
9046                                               ADVERTISED_Autoneg);
9047                 tp->link_config.speed = SPEED_INVALID;
9048                 tp->link_config.duplex = DUPLEX_INVALID;
9049         } else {
9050                 tp->link_config.advertising = 0;
9051                 tp->link_config.speed = cmd->speed;
9052                 tp->link_config.duplex = cmd->duplex;
9053         }
9054
9055         tp->link_config.orig_speed = tp->link_config.speed;
9056         tp->link_config.orig_duplex = tp->link_config.duplex;
9057         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9058
9059         if (netif_running(dev))
9060                 tg3_setup_phy(tp, 1);
9061
9062         tg3_full_unlock(tp);
9063
9064         return 0;
9065 }
9066
9067 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9068 {
9069         struct tg3 *tp = netdev_priv(dev);
9070
9071         strcpy(info->driver, DRV_MODULE_NAME);
9072         strcpy(info->version, DRV_MODULE_VERSION);
9073         strcpy(info->fw_version, tp->fw_ver);
9074         strcpy(info->bus_info, pci_name(tp->pdev));
9075 }
9076
9077 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078 {
9079         struct tg3 *tp = netdev_priv(dev);
9080
9081         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9082             device_can_wakeup(&tp->pdev->dev))
9083                 wol->supported = WAKE_MAGIC;
9084         else
9085                 wol->supported = 0;
9086         wol->wolopts = 0;
9087         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9088                 wol->wolopts = WAKE_MAGIC;
9089         memset(&wol->sopass, 0, sizeof(wol->sopass));
9090 }
9091
9092 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9093 {
9094         struct tg3 *tp = netdev_priv(dev);
9095         struct device *dp = &tp->pdev->dev;
9096
9097         if (wol->wolopts & ~WAKE_MAGIC)
9098                 return -EINVAL;
9099         if ((wol->wolopts & WAKE_MAGIC) &&
9100             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9101                 return -EINVAL;
9102
9103         spin_lock_bh(&tp->lock);
9104         if (wol->wolopts & WAKE_MAGIC) {
9105                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9106                 device_set_wakeup_enable(dp, true);
9107         } else {
9108                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9109                 device_set_wakeup_enable(dp, false);
9110         }
9111         spin_unlock_bh(&tp->lock);
9112
9113         return 0;
9114 }
9115
9116 static u32 tg3_get_msglevel(struct net_device *dev)
9117 {
9118         struct tg3 *tp = netdev_priv(dev);
9119         return tp->msg_enable;
9120 }
9121
9122 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9123 {
9124         struct tg3 *tp = netdev_priv(dev);
9125         tp->msg_enable = value;
9126 }
9127
9128 static int tg3_set_tso(struct net_device *dev, u32 value)
9129 {
9130         struct tg3 *tp = netdev_priv(dev);
9131
9132         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9133                 if (value)
9134                         return -EINVAL;
9135                 return 0;
9136         }
9137         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9138             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9139                 if (value) {
9140                         dev->features |= NETIF_F_TSO6;
9141                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9142                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9143                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9144                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9145                                 dev->features |= NETIF_F_TSO_ECN;
9146                 } else
9147                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9148         }
9149         return ethtool_op_set_tso(dev, value);
9150 }
9151
9152 static int tg3_nway_reset(struct net_device *dev)
9153 {
9154         struct tg3 *tp = netdev_priv(dev);
9155         int r;
9156
9157         if (!netif_running(dev))
9158                 return -EAGAIN;
9159
9160         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9161                 return -EINVAL;
9162
9163         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9164                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9165                         return -EAGAIN;
9166                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9167         } else {
9168                 u32 bmcr;
9169
9170                 spin_lock_bh(&tp->lock);
9171                 r = -EINVAL;
9172                 tg3_readphy(tp, MII_BMCR, &bmcr);
9173                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9174                     ((bmcr & BMCR_ANENABLE) ||
9175                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9176                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9177                                                    BMCR_ANENABLE);
9178                         r = 0;
9179                 }
9180                 spin_unlock_bh(&tp->lock);
9181         }
9182
9183         return r;
9184 }
9185
9186 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9187 {
9188         struct tg3 *tp = netdev_priv(dev);
9189
9190         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9191         ering->rx_mini_max_pending = 0;
9192         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9193                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9194         else
9195                 ering->rx_jumbo_max_pending = 0;
9196
9197         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9198
9199         ering->rx_pending = tp->rx_pending;
9200         ering->rx_mini_pending = 0;
9201         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9202                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9203         else
9204                 ering->rx_jumbo_pending = 0;
9205
9206         ering->tx_pending = tp->tx_pending;
9207 }
9208
9209 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9210 {
9211         struct tg3 *tp = netdev_priv(dev);
9212         int irq_sync = 0, err = 0;
9213
9214         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9215             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9216             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9217             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9218             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9219              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9220                 return -EINVAL;
9221
9222         if (netif_running(dev)) {
9223                 tg3_phy_stop(tp);
9224                 tg3_netif_stop(tp);
9225                 irq_sync = 1;
9226         }
9227
9228         tg3_full_lock(tp, irq_sync);
9229
9230         tp->rx_pending = ering->rx_pending;
9231
9232         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9233             tp->rx_pending > 63)
9234                 tp->rx_pending = 63;
9235         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9236         tp->tx_pending = ering->tx_pending;
9237
9238         if (netif_running(dev)) {
9239                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9240                 err = tg3_restart_hw(tp, 1);
9241                 if (!err)
9242                         tg3_netif_start(tp);
9243         }
9244
9245         tg3_full_unlock(tp);
9246
9247         if (irq_sync && !err)
9248                 tg3_phy_start(tp);
9249
9250         return err;
9251 }
9252
9253 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9254 {
9255         struct tg3 *tp = netdev_priv(dev);
9256
9257         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9258
9259         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9260                 epause->rx_pause = 1;
9261         else
9262                 epause->rx_pause = 0;
9263
9264         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9265                 epause->tx_pause = 1;
9266         else
9267                 epause->tx_pause = 0;
9268 }
9269
9270 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9271 {
9272         struct tg3 *tp = netdev_priv(dev);
9273         int err = 0;
9274
9275         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9276                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9277                         return -EAGAIN;
9278
9279                 if (epause->autoneg) {
9280                         u32 newadv;
9281                         struct phy_device *phydev;
9282
9283                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9284
9285                         if (epause->rx_pause) {
9286                                 if (epause->tx_pause)
9287                                         newadv = ADVERTISED_Pause;
9288                                 else
9289                                         newadv = ADVERTISED_Pause |
9290                                                  ADVERTISED_Asym_Pause;
9291                         } else if (epause->tx_pause) {
9292                                 newadv = ADVERTISED_Asym_Pause;
9293                         } else
9294                                 newadv = 0;
9295
9296                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9297                                 u32 oldadv = phydev->advertising &
9298                                              (ADVERTISED_Pause |
9299                                               ADVERTISED_Asym_Pause);
9300                                 if (oldadv != newadv) {
9301                                         phydev->advertising &=
9302                                                 ~(ADVERTISED_Pause |
9303                                                   ADVERTISED_Asym_Pause);
9304                                         phydev->advertising |= newadv;
9305                                         err = phy_start_aneg(phydev);
9306                                 }
9307                         } else {
9308                                 tp->link_config.advertising &=
9309                                                 ~(ADVERTISED_Pause |
9310                                                   ADVERTISED_Asym_Pause);
9311                                 tp->link_config.advertising |= newadv;
9312                         }
9313                 } else {
9314                         if (epause->rx_pause)
9315                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9316                         else
9317                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9318
9319                         if (epause->tx_pause)
9320                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9321                         else
9322                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9323
9324                         if (netif_running(dev))
9325                                 tg3_setup_flow_control(tp, 0, 0);
9326                 }
9327         } else {
9328                 int irq_sync = 0;
9329
9330                 if (netif_running(dev)) {
9331                         tg3_netif_stop(tp);
9332                         irq_sync = 1;
9333                 }
9334
9335                 tg3_full_lock(tp, irq_sync);
9336
9337                 if (epause->autoneg)
9338                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9339                 else
9340                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9341                 if (epause->rx_pause)
9342                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9343                 else
9344                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9345                 if (epause->tx_pause)
9346                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9347                 else
9348                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9349
9350                 if (netif_running(dev)) {
9351                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9352                         err = tg3_restart_hw(tp, 1);
9353                         if (!err)
9354                                 tg3_netif_start(tp);
9355                 }
9356
9357                 tg3_full_unlock(tp);
9358         }
9359
9360         return err;
9361 }
9362
9363 static u32 tg3_get_rx_csum(struct net_device *dev)
9364 {
9365         struct tg3 *tp = netdev_priv(dev);
9366         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9367 }
9368
9369 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9370 {
9371         struct tg3 *tp = netdev_priv(dev);
9372
9373         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9374                 if (data != 0)
9375                         return -EINVAL;
9376                 return 0;
9377         }
9378
9379         spin_lock_bh(&tp->lock);
9380         if (data)
9381                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9382         else
9383                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9384         spin_unlock_bh(&tp->lock);
9385
9386         return 0;
9387 }
9388
9389 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9390 {
9391         struct tg3 *tp = netdev_priv(dev);
9392
9393         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9394                 if (data != 0)
9395                         return -EINVAL;
9396                 return 0;
9397         }
9398
9399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9400             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9401             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9404                 ethtool_op_set_tx_ipv6_csum(dev, data);
9405         else
9406                 ethtool_op_set_tx_csum(dev, data);
9407
9408         return 0;
9409 }
9410
9411 static int tg3_get_sset_count (struct net_device *dev, int sset)
9412 {
9413         switch (sset) {
9414         case ETH_SS_TEST:
9415                 return TG3_NUM_TEST;
9416         case ETH_SS_STATS:
9417                 return TG3_NUM_STATS;
9418         default:
9419                 return -EOPNOTSUPP;
9420         }
9421 }
9422
9423 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9424 {
9425         switch (stringset) {
9426         case ETH_SS_STATS:
9427                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9428                 break;
9429         case ETH_SS_TEST:
9430                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9431                 break;
9432         default:
9433                 WARN_ON(1);     /* we need a WARN() */
9434                 break;
9435         }
9436 }
9437
9438 static int tg3_phys_id(struct net_device *dev, u32 data)
9439 {
9440         struct tg3 *tp = netdev_priv(dev);
9441         int i;
9442
9443         if (!netif_running(tp->dev))
9444                 return -EAGAIN;
9445
9446         if (data == 0)
9447                 data = UINT_MAX / 2;
9448
9449         for (i = 0; i < (data * 2); i++) {
9450                 if ((i % 2) == 0)
9451                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9452                                            LED_CTRL_1000MBPS_ON |
9453                                            LED_CTRL_100MBPS_ON |
9454                                            LED_CTRL_10MBPS_ON |
9455                                            LED_CTRL_TRAFFIC_OVERRIDE |
9456                                            LED_CTRL_TRAFFIC_BLINK |
9457                                            LED_CTRL_TRAFFIC_LED);
9458
9459                 else
9460                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9461                                            LED_CTRL_TRAFFIC_OVERRIDE);
9462
9463                 if (msleep_interruptible(500))
9464                         break;
9465         }
9466         tw32(MAC_LED_CTRL, tp->led_ctrl);
9467         return 0;
9468 }
9469
9470 static void tg3_get_ethtool_stats (struct net_device *dev,
9471                                    struct ethtool_stats *estats, u64 *tmp_stats)
9472 {
9473         struct tg3 *tp = netdev_priv(dev);
9474         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9475 }
9476
9477 #define NVRAM_TEST_SIZE 0x100
9478 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9479 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9480 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9481 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9482 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9483
9484 static int tg3_test_nvram(struct tg3 *tp)
9485 {
9486         u32 csum, magic;
9487         __le32 *buf;
9488         int i, j, k, err = 0, size;
9489
9490         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9491                 return -EIO;
9492
9493         if (magic == TG3_EEPROM_MAGIC)
9494                 size = NVRAM_TEST_SIZE;
9495         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9496                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9497                     TG3_EEPROM_SB_FORMAT_1) {
9498                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9499                         case TG3_EEPROM_SB_REVISION_0:
9500                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9501                                 break;
9502                         case TG3_EEPROM_SB_REVISION_2:
9503                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9504                                 break;
9505                         case TG3_EEPROM_SB_REVISION_3:
9506                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9507                                 break;
9508                         default:
9509                                 return 0;
9510                         }
9511                 } else
9512                         return 0;
9513         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9514                 size = NVRAM_SELFBOOT_HW_SIZE;
9515         else
9516                 return -EIO;
9517
9518         buf = kmalloc(size, GFP_KERNEL);
9519         if (buf == NULL)
9520                 return -ENOMEM;
9521
9522         err = -EIO;
9523         for (i = 0, j = 0; i < size; i += 4, j++) {
9524                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9525                         break;
9526         }
9527         if (i < size)
9528                 goto out;
9529
9530         /* Selfboot format */
9531         magic = swab32(le32_to_cpu(buf[0]));
9532         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9533             TG3_EEPROM_MAGIC_FW) {
9534                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9535
9536                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9537                     TG3_EEPROM_SB_REVISION_2) {
9538                         /* For rev 2, the csum doesn't include the MBA. */
9539                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9540                                 csum8 += buf8[i];
9541                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9542                                 csum8 += buf8[i];
9543                 } else {
9544                         for (i = 0; i < size; i++)
9545                                 csum8 += buf8[i];
9546                 }
9547
9548                 if (csum8 == 0) {
9549                         err = 0;
9550                         goto out;
9551                 }
9552
9553                 err = -EIO;
9554                 goto out;
9555         }
9556
9557         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9558             TG3_EEPROM_MAGIC_HW) {
9559                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9560                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9561                 u8 *buf8 = (u8 *) buf;
9562
9563                 /* Separate the parity bits and the data bytes.  */
9564                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9565                         if ((i == 0) || (i == 8)) {
9566                                 int l;
9567                                 u8 msk;
9568
9569                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9570                                         parity[k++] = buf8[i] & msk;
9571                                 i++;
9572                         }
9573                         else if (i == 16) {
9574                                 int l;
9575                                 u8 msk;
9576
9577                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9578                                         parity[k++] = buf8[i] & msk;
9579                                 i++;
9580
9581                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9582                                         parity[k++] = buf8[i] & msk;
9583                                 i++;
9584                         }
9585                         data[j++] = buf8[i];
9586                 }
9587
9588                 err = -EIO;
9589                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9590                         u8 hw8 = hweight8(data[i]);
9591
9592                         if ((hw8 & 0x1) && parity[i])
9593                                 goto out;
9594                         else if (!(hw8 & 0x1) && !parity[i])
9595                                 goto out;
9596                 }
9597                 err = 0;
9598                 goto out;
9599         }
9600
9601         /* Bootstrap checksum at offset 0x10 */
9602         csum = calc_crc((unsigned char *) buf, 0x10);
9603         if(csum != le32_to_cpu(buf[0x10/4]))
9604                 goto out;
9605
9606         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9607         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9608         if (csum != le32_to_cpu(buf[0xfc/4]))
9609                  goto out;
9610
9611         err = 0;
9612
9613 out:
9614         kfree(buf);
9615         return err;
9616 }
9617
9618 #define TG3_SERDES_TIMEOUT_SEC  2
9619 #define TG3_COPPER_TIMEOUT_SEC  6
9620
9621 static int tg3_test_link(struct tg3 *tp)
9622 {
9623         int i, max;
9624
9625         if (!netif_running(tp->dev))
9626                 return -ENODEV;
9627
9628         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9629                 max = TG3_SERDES_TIMEOUT_SEC;
9630         else
9631                 max = TG3_COPPER_TIMEOUT_SEC;
9632
9633         for (i = 0; i < max; i++) {
9634                 if (netif_carrier_ok(tp->dev))
9635                         return 0;
9636
9637                 if (msleep_interruptible(1000))
9638                         break;
9639         }
9640
9641         return -EIO;
9642 }
9643
9644 /* Only test the commonly used registers */
9645 static int tg3_test_registers(struct tg3 *tp)
9646 {
9647         int i, is_5705, is_5750;
9648         u32 offset, read_mask, write_mask, val, save_val, read_val;
9649         static struct {
9650                 u16 offset;
9651                 u16 flags;
9652 #define TG3_FL_5705     0x1
9653 #define TG3_FL_NOT_5705 0x2
9654 #define TG3_FL_NOT_5788 0x4
9655 #define TG3_FL_NOT_5750 0x8
9656                 u32 read_mask;
9657                 u32 write_mask;
9658         } reg_tbl[] = {
9659                 /* MAC Control Registers */
9660                 { MAC_MODE, TG3_FL_NOT_5705,
9661                         0x00000000, 0x00ef6f8c },
9662                 { MAC_MODE, TG3_FL_5705,
9663                         0x00000000, 0x01ef6b8c },
9664                 { MAC_STATUS, TG3_FL_NOT_5705,
9665                         0x03800107, 0x00000000 },
9666                 { MAC_STATUS, TG3_FL_5705,
9667                         0x03800100, 0x00000000 },
9668                 { MAC_ADDR_0_HIGH, 0x0000,
9669                         0x00000000, 0x0000ffff },
9670                 { MAC_ADDR_0_LOW, 0x0000,
9671                         0x00000000, 0xffffffff },
9672                 { MAC_RX_MTU_SIZE, 0x0000,
9673                         0x00000000, 0x0000ffff },
9674                 { MAC_TX_MODE, 0x0000,
9675                         0x00000000, 0x00000070 },
9676                 { MAC_TX_LENGTHS, 0x0000,
9677                         0x00000000, 0x00003fff },
9678                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9679                         0x00000000, 0x000007fc },
9680                 { MAC_RX_MODE, TG3_FL_5705,
9681                         0x00000000, 0x000007dc },
9682                 { MAC_HASH_REG_0, 0x0000,
9683                         0x00000000, 0xffffffff },
9684                 { MAC_HASH_REG_1, 0x0000,
9685                         0x00000000, 0xffffffff },
9686                 { MAC_HASH_REG_2, 0x0000,
9687                         0x00000000, 0xffffffff },
9688                 { MAC_HASH_REG_3, 0x0000,
9689                         0x00000000, 0xffffffff },
9690
9691                 /* Receive Data and Receive BD Initiator Control Registers. */
9692                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9693                         0x00000000, 0xffffffff },
9694                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9695                         0x00000000, 0xffffffff },
9696                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9697                         0x00000000, 0x00000003 },
9698                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9699                         0x00000000, 0xffffffff },
9700                 { RCVDBDI_STD_BD+0, 0x0000,
9701                         0x00000000, 0xffffffff },
9702                 { RCVDBDI_STD_BD+4, 0x0000,
9703                         0x00000000, 0xffffffff },
9704                 { RCVDBDI_STD_BD+8, 0x0000,
9705                         0x00000000, 0xffff0002 },
9706                 { RCVDBDI_STD_BD+0xc, 0x0000,
9707                         0x00000000, 0xffffffff },
9708
9709                 /* Receive BD Initiator Control Registers. */
9710                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9713                         0x00000000, 0x000003ff },
9714                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9715                         0x00000000, 0xffffffff },
9716
9717                 /* Host Coalescing Control Registers. */
9718                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9719                         0x00000000, 0x00000004 },
9720                 { HOSTCC_MODE, TG3_FL_5705,
9721                         0x00000000, 0x000000f6 },
9722                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9725                         0x00000000, 0x000003ff },
9726                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9727                         0x00000000, 0xffffffff },
9728                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9729                         0x00000000, 0x000003ff },
9730                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9731                         0x00000000, 0xffffffff },
9732                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9733                         0x00000000, 0x000000ff },
9734                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9735                         0x00000000, 0xffffffff },
9736                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9737                         0x00000000, 0x000000ff },
9738                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9739                         0x00000000, 0xffffffff },
9740                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9741                         0x00000000, 0xffffffff },
9742                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9743                         0x00000000, 0xffffffff },
9744                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9745                         0x00000000, 0x000000ff },
9746                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9747                         0x00000000, 0xffffffff },
9748                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9749                         0x00000000, 0x000000ff },
9750                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9751                         0x00000000, 0xffffffff },
9752                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9753                         0x00000000, 0xffffffff },
9754                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9755                         0x00000000, 0xffffffff },
9756                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9757                         0x00000000, 0xffffffff },
9758                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9759                         0x00000000, 0xffffffff },
9760                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9761                         0xffffffff, 0x00000000 },
9762                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9763                         0xffffffff, 0x00000000 },
9764
9765                 /* Buffer Manager Control Registers. */
9766                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9767                         0x00000000, 0x007fff80 },
9768                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9769                         0x00000000, 0x007fffff },
9770                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9771                         0x00000000, 0x0000003f },
9772                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9773                         0x00000000, 0x000001ff },
9774                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9775                         0x00000000, 0x000001ff },
9776                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9777                         0xffffffff, 0x00000000 },
9778                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9779                         0xffffffff, 0x00000000 },
9780
9781                 /* Mailbox Registers */
9782                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9783                         0x00000000, 0x000001ff },
9784                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9785                         0x00000000, 0x000001ff },
9786                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9787                         0x00000000, 0x000007ff },
9788                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9789                         0x00000000, 0x000001ff },
9790
9791                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9792         };
9793
9794         is_5705 = is_5750 = 0;
9795         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9796                 is_5705 = 1;
9797                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9798                         is_5750 = 1;
9799         }
9800
9801         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9802                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9803                         continue;
9804
9805                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9806                         continue;
9807
9808                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9809                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9810                         continue;
9811
9812                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9813                         continue;
9814
9815                 offset = (u32) reg_tbl[i].offset;
9816                 read_mask = reg_tbl[i].read_mask;
9817                 write_mask = reg_tbl[i].write_mask;
9818
9819                 /* Save the original register content */
9820                 save_val = tr32(offset);
9821
9822                 /* Determine the read-only value. */
9823                 read_val = save_val & read_mask;
9824
9825                 /* Write zero to the register, then make sure the read-only bits
9826                  * are not changed and the read/write bits are all zeros.
9827                  */
9828                 tw32(offset, 0);
9829
9830                 val = tr32(offset);
9831
9832                 /* Test the read-only and read/write bits. */
9833                 if (((val & read_mask) != read_val) || (val & write_mask))
9834                         goto out;
9835
9836                 /* Write ones to all the bits defined by RdMask and WrMask, then
9837                  * make sure the read-only bits are not changed and the
9838                  * read/write bits are all ones.
9839                  */
9840                 tw32(offset, read_mask | write_mask);
9841
9842                 val = tr32(offset);
9843
9844                 /* Test the read-only bits. */
9845                 if ((val & read_mask) != read_val)
9846                         goto out;
9847
9848                 /* Test the read/write bits. */
9849                 if ((val & write_mask) != write_mask)
9850                         goto out;
9851
9852                 tw32(offset, save_val);
9853         }
9854
9855         return 0;
9856
9857 out:
9858         if (netif_msg_hw(tp))
9859                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9860                        offset);
9861         tw32(offset, save_val);
9862         return -EIO;
9863 }
9864
9865 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9866 {
9867         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9868         int i;
9869         u32 j;
9870
9871         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9872                 for (j = 0; j < len; j += 4) {
9873                         u32 val;
9874
9875                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9876                         tg3_read_mem(tp, offset + j, &val);
9877                         if (val != test_pattern[i])
9878                                 return -EIO;
9879                 }
9880         }
9881         return 0;
9882 }
9883
9884 static int tg3_test_memory(struct tg3 *tp)
9885 {
9886         static struct mem_entry {
9887                 u32 offset;
9888                 u32 len;
9889         } mem_tbl_570x[] = {
9890                 { 0x00000000, 0x00b50},
9891                 { 0x00002000, 0x1c000},
9892                 { 0xffffffff, 0x00000}
9893         }, mem_tbl_5705[] = {
9894                 { 0x00000100, 0x0000c},
9895                 { 0x00000200, 0x00008},
9896                 { 0x00004000, 0x00800},
9897                 { 0x00006000, 0x01000},
9898                 { 0x00008000, 0x02000},
9899                 { 0x00010000, 0x0e000},
9900                 { 0xffffffff, 0x00000}
9901         }, mem_tbl_5755[] = {
9902                 { 0x00000200, 0x00008},
9903                 { 0x00004000, 0x00800},
9904                 { 0x00006000, 0x00800},
9905                 { 0x00008000, 0x02000},
9906                 { 0x00010000, 0x0c000},
9907                 { 0xffffffff, 0x00000}
9908         }, mem_tbl_5906[] = {
9909                 { 0x00000200, 0x00008},
9910                 { 0x00004000, 0x00400},
9911                 { 0x00006000, 0x00400},
9912                 { 0x00008000, 0x01000},
9913                 { 0x00010000, 0x01000},
9914                 { 0xffffffff, 0x00000}
9915         };
9916         struct mem_entry *mem_tbl;
9917         int err = 0;
9918         int i;
9919
9920         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9923                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9924                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9925                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9926                         mem_tbl = mem_tbl_5755;
9927                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9928                         mem_tbl = mem_tbl_5906;
9929                 else
9930                         mem_tbl = mem_tbl_5705;
9931         } else
9932                 mem_tbl = mem_tbl_570x;
9933
9934         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9935                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9936                     mem_tbl[i].len)) != 0)
9937                         break;
9938         }
9939
9940         return err;
9941 }
9942
9943 #define TG3_MAC_LOOPBACK        0
9944 #define TG3_PHY_LOOPBACK        1
9945
9946 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9947 {
9948         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9949         u32 desc_idx;
9950         struct sk_buff *skb, *rx_skb;
9951         u8 *tx_data;
9952         dma_addr_t map;
9953         int num_pkts, tx_len, rx_len, i, err;
9954         struct tg3_rx_buffer_desc *desc;
9955
9956         if (loopback_mode == TG3_MAC_LOOPBACK) {
9957                 /* HW errata - mac loopback fails in some cases on 5780.
9958                  * Normal traffic and PHY loopback are not affected by
9959                  * errata.
9960                  */
9961                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9962                         return 0;
9963
9964                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9965                            MAC_MODE_PORT_INT_LPBACK;
9966                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9967                         mac_mode |= MAC_MODE_LINK_POLARITY;
9968                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9969                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9970                 else
9971                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9972                 tw32(MAC_MODE, mac_mode);
9973         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9974                 u32 val;
9975
9976                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9977                         u32 phytest;
9978
9979                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9980                                 u32 phy;
9981
9982                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9983                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9984                                 if (!tg3_readphy(tp, 0x1b, &phy))
9985                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9986                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9987                         }
9988                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9989                 } else
9990                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9991
9992                 tg3_phy_toggle_automdix(tp, 0);
9993
9994                 tg3_writephy(tp, MII_BMCR, val);
9995                 udelay(40);
9996
9997                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9998                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9999                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10000                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10001                 } else
10002                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10003
10004                 /* reset to prevent losing 1st rx packet intermittently */
10005                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10006                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10007                         udelay(10);
10008                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10009                 }
10010                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10011                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10012                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10013                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10014                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10015                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10016                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10017                 }
10018                 tw32(MAC_MODE, mac_mode);
10019         }
10020         else
10021                 return -EINVAL;
10022
10023         err = -EIO;
10024
10025         tx_len = 1514;
10026         skb = netdev_alloc_skb(tp->dev, tx_len);
10027         if (!skb)
10028                 return -ENOMEM;
10029
10030         tx_data = skb_put(skb, tx_len);
10031         memcpy(tx_data, tp->dev->dev_addr, 6);
10032         memset(tx_data + 6, 0x0, 8);
10033
10034         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10035
10036         for (i = 14; i < tx_len; i++)
10037                 tx_data[i] = (u8) (i & 0xff);
10038
10039         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10040
10041         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10042              HOSTCC_MODE_NOW);
10043
10044         udelay(10);
10045
10046         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10047
10048         num_pkts = 0;
10049
10050         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10051
10052         tp->tx_prod++;
10053         num_pkts++;
10054
10055         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10056                      tp->tx_prod);
10057         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10058
10059         udelay(10);
10060
10061         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10062         for (i = 0; i < 25; i++) {
10063                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10064                        HOSTCC_MODE_NOW);
10065
10066                 udelay(10);
10067
10068                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10069                 rx_idx = tp->hw_status->idx[0].rx_producer;
10070                 if ((tx_idx == tp->tx_prod) &&
10071                     (rx_idx == (rx_start_idx + num_pkts)))
10072                         break;
10073         }
10074
10075         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10076         dev_kfree_skb(skb);
10077
10078         if (tx_idx != tp->tx_prod)
10079                 goto out;
10080
10081         if (rx_idx != rx_start_idx + num_pkts)
10082                 goto out;
10083
10084         desc = &tp->rx_rcb[rx_start_idx];
10085         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10086         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10087         if (opaque_key != RXD_OPAQUE_RING_STD)
10088                 goto out;
10089
10090         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10091             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10092                 goto out;
10093
10094         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10095         if (rx_len != tx_len)
10096                 goto out;
10097
10098         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10099
10100         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10101         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10102
10103         for (i = 14; i < tx_len; i++) {
10104                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10105                         goto out;
10106         }
10107         err = 0;
10108
10109         /* tg3_free_rings will unmap and free the rx_skb */
10110 out:
10111         return err;
10112 }
10113
10114 #define TG3_MAC_LOOPBACK_FAILED         1
10115 #define TG3_PHY_LOOPBACK_FAILED         2
10116 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10117                                          TG3_PHY_LOOPBACK_FAILED)
10118
10119 static int tg3_test_loopback(struct tg3 *tp)
10120 {
10121         int err = 0;
10122         u32 cpmuctrl = 0;
10123
10124         if (!netif_running(tp->dev))
10125                 return TG3_LOOPBACK_FAILED;
10126
10127         err = tg3_reset_hw(tp, 1);
10128         if (err)
10129                 return TG3_LOOPBACK_FAILED;
10130
10131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10133             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10134                 int i;
10135                 u32 status;
10136
10137                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10138
10139                 /* Wait for up to 40 microseconds to acquire lock. */
10140                 for (i = 0; i < 4; i++) {
10141                         status = tr32(TG3_CPMU_MUTEX_GNT);
10142                         if (status == CPMU_MUTEX_GNT_DRIVER)
10143                                 break;
10144                         udelay(10);
10145                 }
10146
10147                 if (status != CPMU_MUTEX_GNT_DRIVER)
10148                         return TG3_LOOPBACK_FAILED;
10149
10150                 /* Turn off link-based power management. */
10151                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10152                 tw32(TG3_CPMU_CTRL,
10153                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10154                                   CPMU_CTRL_LINK_AWARE_MODE));
10155         }
10156
10157         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10158                 err |= TG3_MAC_LOOPBACK_FAILED;
10159
10160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10163                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10164
10165                 /* Release the mutex */
10166                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10167         }
10168
10169         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10170             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10171                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10172                         err |= TG3_PHY_LOOPBACK_FAILED;
10173         }
10174
10175         return err;
10176 }
10177
10178 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10179                           u64 *data)
10180 {
10181         struct tg3 *tp = netdev_priv(dev);
10182
10183         if (tp->link_config.phy_is_low_power)
10184                 tg3_set_power_state(tp, PCI_D0);
10185
10186         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10187
10188         if (tg3_test_nvram(tp) != 0) {
10189                 etest->flags |= ETH_TEST_FL_FAILED;
10190                 data[0] = 1;
10191         }
10192         if (tg3_test_link(tp) != 0) {
10193                 etest->flags |= ETH_TEST_FL_FAILED;
10194                 data[1] = 1;
10195         }
10196         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10197                 int err, err2 = 0, irq_sync = 0;
10198
10199                 if (netif_running(dev)) {
10200                         tg3_phy_stop(tp);
10201                         tg3_netif_stop(tp);
10202                         irq_sync = 1;
10203                 }
10204
10205                 tg3_full_lock(tp, irq_sync);
10206
10207                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10208                 err = tg3_nvram_lock(tp);
10209                 tg3_halt_cpu(tp, RX_CPU_BASE);
10210                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10211                         tg3_halt_cpu(tp, TX_CPU_BASE);
10212                 if (!err)
10213                         tg3_nvram_unlock(tp);
10214
10215                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10216                         tg3_phy_reset(tp);
10217
10218                 if (tg3_test_registers(tp) != 0) {
10219                         etest->flags |= ETH_TEST_FL_FAILED;
10220                         data[2] = 1;
10221                 }
10222                 if (tg3_test_memory(tp) != 0) {
10223                         etest->flags |= ETH_TEST_FL_FAILED;
10224                         data[3] = 1;
10225                 }
10226                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10227                         etest->flags |= ETH_TEST_FL_FAILED;
10228
10229                 tg3_full_unlock(tp);
10230
10231                 if (tg3_test_interrupt(tp) != 0) {
10232                         etest->flags |= ETH_TEST_FL_FAILED;
10233                         data[5] = 1;
10234                 }
10235
10236                 tg3_full_lock(tp, 0);
10237
10238                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10239                 if (netif_running(dev)) {
10240                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10241                         err2 = tg3_restart_hw(tp, 1);
10242                         if (!err2)
10243                                 tg3_netif_start(tp);
10244                 }
10245
10246                 tg3_full_unlock(tp);
10247
10248                 if (irq_sync && !err2)
10249                         tg3_phy_start(tp);
10250         }
10251         if (tp->link_config.phy_is_low_power)
10252                 tg3_set_power_state(tp, PCI_D3hot);
10253
10254 }
10255
10256 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10257 {
10258         struct mii_ioctl_data *data = if_mii(ifr);
10259         struct tg3 *tp = netdev_priv(dev);
10260         int err;
10261
10262         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10263                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10264                         return -EAGAIN;
10265                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10266         }
10267
10268         switch(cmd) {
10269         case SIOCGMIIPHY:
10270                 data->phy_id = PHY_ADDR;
10271
10272                 /* fallthru */
10273         case SIOCGMIIREG: {
10274                 u32 mii_regval;
10275
10276                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10277                         break;                  /* We have no PHY */
10278
10279                 if (tp->link_config.phy_is_low_power)
10280                         return -EAGAIN;
10281
10282                 spin_lock_bh(&tp->lock);
10283                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10284                 spin_unlock_bh(&tp->lock);
10285
10286                 data->val_out = mii_regval;
10287
10288                 return err;
10289         }
10290
10291         case SIOCSMIIREG:
10292                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10293                         break;                  /* We have no PHY */
10294
10295                 if (!capable(CAP_NET_ADMIN))
10296                         return -EPERM;
10297
10298                 if (tp->link_config.phy_is_low_power)
10299                         return -EAGAIN;
10300
10301                 spin_lock_bh(&tp->lock);
10302                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10303                 spin_unlock_bh(&tp->lock);
10304
10305                 return err;
10306
10307         default:
10308                 /* do nothing */
10309                 break;
10310         }
10311         return -EOPNOTSUPP;
10312 }
10313
10314 #if TG3_VLAN_TAG_USED
10315 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10316 {
10317         struct tg3 *tp = netdev_priv(dev);
10318
10319         if (netif_running(dev))
10320                 tg3_netif_stop(tp);
10321
10322         tg3_full_lock(tp, 0);
10323
10324         tp->vlgrp = grp;
10325
10326         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10327         __tg3_set_rx_mode(dev);
10328
10329         if (netif_running(dev))
10330                 tg3_netif_start(tp);
10331
10332         tg3_full_unlock(tp);
10333 }
10334 #endif
10335
10336 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10337 {
10338         struct tg3 *tp = netdev_priv(dev);
10339
10340         memcpy(ec, &tp->coal, sizeof(*ec));
10341         return 0;
10342 }
10343
10344 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10345 {
10346         struct tg3 *tp = netdev_priv(dev);
10347         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10348         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10349
10350         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10351                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10352                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10353                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10354                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10355         }
10356
10357         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10358             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10359             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10360             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10361             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10362             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10363             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10364             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10365             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10366             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10367                 return -EINVAL;
10368
10369         /* No rx interrupts will be generated if both are zero */
10370         if ((ec->rx_coalesce_usecs == 0) &&
10371             (ec->rx_max_coalesced_frames == 0))
10372                 return -EINVAL;
10373
10374         /* No tx interrupts will be generated if both are zero */
10375         if ((ec->tx_coalesce_usecs == 0) &&
10376             (ec->tx_max_coalesced_frames == 0))
10377                 return -EINVAL;
10378
10379         /* Only copy relevant parameters, ignore all others. */
10380         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10381         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10382         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10383         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10384         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10385         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10386         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10387         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10388         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10389
10390         if (netif_running(dev)) {
10391                 tg3_full_lock(tp, 0);
10392                 __tg3_set_coalesce(tp, &tp->coal);
10393                 tg3_full_unlock(tp);
10394         }
10395         return 0;
10396 }
10397
10398 static const struct ethtool_ops tg3_ethtool_ops = {
10399         .get_settings           = tg3_get_settings,
10400         .set_settings           = tg3_set_settings,
10401         .get_drvinfo            = tg3_get_drvinfo,
10402         .get_regs_len           = tg3_get_regs_len,
10403         .get_regs               = tg3_get_regs,
10404         .get_wol                = tg3_get_wol,
10405         .set_wol                = tg3_set_wol,
10406         .get_msglevel           = tg3_get_msglevel,
10407         .set_msglevel           = tg3_set_msglevel,
10408         .nway_reset             = tg3_nway_reset,
10409         .get_link               = ethtool_op_get_link,
10410         .get_eeprom_len         = tg3_get_eeprom_len,
10411         .get_eeprom             = tg3_get_eeprom,
10412         .set_eeprom             = tg3_set_eeprom,
10413         .get_ringparam          = tg3_get_ringparam,
10414         .set_ringparam          = tg3_set_ringparam,
10415         .get_pauseparam         = tg3_get_pauseparam,
10416         .set_pauseparam         = tg3_set_pauseparam,
10417         .get_rx_csum            = tg3_get_rx_csum,
10418         .set_rx_csum            = tg3_set_rx_csum,
10419         .set_tx_csum            = tg3_set_tx_csum,
10420         .set_sg                 = ethtool_op_set_sg,
10421         .set_tso                = tg3_set_tso,
10422         .self_test              = tg3_self_test,
10423         .get_strings            = tg3_get_strings,
10424         .phys_id                = tg3_phys_id,
10425         .get_ethtool_stats      = tg3_get_ethtool_stats,
10426         .get_coalesce           = tg3_get_coalesce,
10427         .set_coalesce           = tg3_set_coalesce,
10428         .get_sset_count         = tg3_get_sset_count,
10429 };
10430
10431 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10432 {
10433         u32 cursize, val, magic;
10434
10435         tp->nvram_size = EEPROM_CHIP_SIZE;
10436
10437         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10438                 return;
10439
10440         if ((magic != TG3_EEPROM_MAGIC) &&
10441             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10442             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10443                 return;
10444
10445         /*
10446          * Size the chip by reading offsets at increasing powers of two.
10447          * When we encounter our validation signature, we know the addressing
10448          * has wrapped around, and thus have our chip size.
10449          */
10450         cursize = 0x10;
10451
10452         while (cursize < tp->nvram_size) {
10453                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10454                         return;
10455
10456                 if (val == magic)
10457                         break;
10458
10459                 cursize <<= 1;
10460         }
10461
10462         tp->nvram_size = cursize;
10463 }
10464
10465 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10466 {
10467         u32 val;
10468
10469         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10470                 return;
10471
10472         /* Selfboot format */
10473         if (val != TG3_EEPROM_MAGIC) {
10474                 tg3_get_eeprom_size(tp);
10475                 return;
10476         }
10477
10478         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10479                 if (val != 0) {
10480                         tp->nvram_size = (val >> 16) * 1024;
10481                         return;
10482                 }
10483         }
10484         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10485 }
10486
10487 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10488 {
10489         u32 nvcfg1;
10490
10491         nvcfg1 = tr32(NVRAM_CFG1);
10492         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10493                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10494         }
10495         else {
10496                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10497                 tw32(NVRAM_CFG1, nvcfg1);
10498         }
10499
10500         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10501             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10502                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10503                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10504                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10505                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10506                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10507                                 break;
10508                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10509                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10510                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10511                                 break;
10512                         case FLASH_VENDOR_ATMEL_EEPROM:
10513                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10514                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10515                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10516                                 break;
10517                         case FLASH_VENDOR_ST:
10518                                 tp->nvram_jedecnum = JEDEC_ST;
10519                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10520                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10521                                 break;
10522                         case FLASH_VENDOR_SAIFUN:
10523                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10524                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10525                                 break;
10526                         case FLASH_VENDOR_SST_SMALL:
10527                         case FLASH_VENDOR_SST_LARGE:
10528                                 tp->nvram_jedecnum = JEDEC_SST;
10529                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10530                                 break;
10531                 }
10532         }
10533         else {
10534                 tp->nvram_jedecnum = JEDEC_ATMEL;
10535                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10536                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10537         }
10538 }
10539
10540 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10541 {
10542         u32 nvcfg1;
10543
10544         nvcfg1 = tr32(NVRAM_CFG1);
10545
10546         /* NVRAM protection for TPM */
10547         if (nvcfg1 & (1 << 27))
10548                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10549
10550         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10551                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10552                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10553                         tp->nvram_jedecnum = JEDEC_ATMEL;
10554                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10555                         break;
10556                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10557                         tp->nvram_jedecnum = JEDEC_ATMEL;
10558                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10559                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10560                         break;
10561                 case FLASH_5752VENDOR_ST_M45PE10:
10562                 case FLASH_5752VENDOR_ST_M45PE20:
10563                 case FLASH_5752VENDOR_ST_M45PE40:
10564                         tp->nvram_jedecnum = JEDEC_ST;
10565                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10566                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10567                         break;
10568         }
10569
10570         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10571                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10572                         case FLASH_5752PAGE_SIZE_256:
10573                                 tp->nvram_pagesize = 256;
10574                                 break;
10575                         case FLASH_5752PAGE_SIZE_512:
10576                                 tp->nvram_pagesize = 512;
10577                                 break;
10578                         case FLASH_5752PAGE_SIZE_1K:
10579                                 tp->nvram_pagesize = 1024;
10580                                 break;
10581                         case FLASH_5752PAGE_SIZE_2K:
10582                                 tp->nvram_pagesize = 2048;
10583                                 break;
10584                         case FLASH_5752PAGE_SIZE_4K:
10585                                 tp->nvram_pagesize = 4096;
10586                                 break;
10587                         case FLASH_5752PAGE_SIZE_264:
10588                                 tp->nvram_pagesize = 264;
10589                                 break;
10590                 }
10591         }
10592         else {
10593                 /* For eeprom, set pagesize to maximum eeprom size */
10594                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10595
10596                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10597                 tw32(NVRAM_CFG1, nvcfg1);
10598         }
10599 }
10600
10601 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10602 {
10603         u32 nvcfg1, protect = 0;
10604
10605         nvcfg1 = tr32(NVRAM_CFG1);
10606
10607         /* NVRAM protection for TPM */
10608         if (nvcfg1 & (1 << 27)) {
10609                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10610                 protect = 1;
10611         }
10612
10613         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10614         switch (nvcfg1) {
10615                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10616                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10617                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10618                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10619                         tp->nvram_jedecnum = JEDEC_ATMEL;
10620                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10621                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10622                         tp->nvram_pagesize = 264;
10623                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10624                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10625                                 tp->nvram_size = (protect ? 0x3e200 :
10626                                                   TG3_NVRAM_SIZE_512KB);
10627                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10628                                 tp->nvram_size = (protect ? 0x1f200 :
10629                                                   TG3_NVRAM_SIZE_256KB);
10630                         else
10631                                 tp->nvram_size = (protect ? 0x1f200 :
10632                                                   TG3_NVRAM_SIZE_128KB);
10633                         break;
10634                 case FLASH_5752VENDOR_ST_M45PE10:
10635                 case FLASH_5752VENDOR_ST_M45PE20:
10636                 case FLASH_5752VENDOR_ST_M45PE40:
10637                         tp->nvram_jedecnum = JEDEC_ST;
10638                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10639                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10640                         tp->nvram_pagesize = 256;
10641                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10642                                 tp->nvram_size = (protect ?
10643                                                   TG3_NVRAM_SIZE_64KB :
10644                                                   TG3_NVRAM_SIZE_128KB);
10645                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10646                                 tp->nvram_size = (protect ?
10647                                                   TG3_NVRAM_SIZE_64KB :
10648                                                   TG3_NVRAM_SIZE_256KB);
10649                         else
10650                                 tp->nvram_size = (protect ?
10651                                                   TG3_NVRAM_SIZE_128KB :
10652                                                   TG3_NVRAM_SIZE_512KB);
10653                         break;
10654         }
10655 }
10656
10657 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10658 {
10659         u32 nvcfg1;
10660
10661         nvcfg1 = tr32(NVRAM_CFG1);
10662
10663         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10664                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10665                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10666                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10667                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10668                         tp->nvram_jedecnum = JEDEC_ATMEL;
10669                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10670                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10671
10672                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10673                         tw32(NVRAM_CFG1, nvcfg1);
10674                         break;
10675                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10676                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10677                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10678                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10679                         tp->nvram_jedecnum = JEDEC_ATMEL;
10680                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10681                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10682                         tp->nvram_pagesize = 264;
10683                         break;
10684                 case FLASH_5752VENDOR_ST_M45PE10:
10685                 case FLASH_5752VENDOR_ST_M45PE20:
10686                 case FLASH_5752VENDOR_ST_M45PE40:
10687                         tp->nvram_jedecnum = JEDEC_ST;
10688                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10689                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10690                         tp->nvram_pagesize = 256;
10691                         break;
10692         }
10693 }
10694
10695 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10696 {
10697         u32 nvcfg1, protect = 0;
10698
10699         nvcfg1 = tr32(NVRAM_CFG1);
10700
10701         /* NVRAM protection for TPM */
10702         if (nvcfg1 & (1 << 27)) {
10703                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10704                 protect = 1;
10705         }
10706
10707         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10708         switch (nvcfg1) {
10709                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10710                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10711                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10712                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10713                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10714                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10715                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10716                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10717                         tp->nvram_jedecnum = JEDEC_ATMEL;
10718                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10719                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10720                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10721                         tp->nvram_pagesize = 256;
10722                         break;
10723                 case FLASH_5761VENDOR_ST_A_M45PE20:
10724                 case FLASH_5761VENDOR_ST_A_M45PE40:
10725                 case FLASH_5761VENDOR_ST_A_M45PE80:
10726                 case FLASH_5761VENDOR_ST_A_M45PE16:
10727                 case FLASH_5761VENDOR_ST_M_M45PE20:
10728                 case FLASH_5761VENDOR_ST_M_M45PE40:
10729                 case FLASH_5761VENDOR_ST_M_M45PE80:
10730                 case FLASH_5761VENDOR_ST_M_M45PE16:
10731                         tp->nvram_jedecnum = JEDEC_ST;
10732                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10733                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10734                         tp->nvram_pagesize = 256;
10735                         break;
10736         }
10737
10738         if (protect) {
10739                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10740         } else {
10741                 switch (nvcfg1) {
10742                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10743                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10744                         case FLASH_5761VENDOR_ST_A_M45PE16:
10745                         case FLASH_5761VENDOR_ST_M_M45PE16:
10746                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10747                                 break;
10748                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10749                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10750                         case FLASH_5761VENDOR_ST_A_M45PE80:
10751                         case FLASH_5761VENDOR_ST_M_M45PE80:
10752                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10753                                 break;
10754                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10755                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10756                         case FLASH_5761VENDOR_ST_A_M45PE40:
10757                         case FLASH_5761VENDOR_ST_M_M45PE40:
10758                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10759                                 break;
10760                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10761                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10762                         case FLASH_5761VENDOR_ST_A_M45PE20:
10763                         case FLASH_5761VENDOR_ST_M_M45PE20:
10764                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10765                                 break;
10766                 }
10767         }
10768 }
10769
10770 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10771 {
10772         tp->nvram_jedecnum = JEDEC_ATMEL;
10773         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10775 }
10776
10777 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10778 static void __devinit tg3_nvram_init(struct tg3 *tp)
10779 {
10780         tw32_f(GRC_EEPROM_ADDR,
10781              (EEPROM_ADDR_FSM_RESET |
10782               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10783                EEPROM_ADDR_CLKPERD_SHIFT)));
10784
10785         msleep(1);
10786
10787         /* Enable seeprom accesses. */
10788         tw32_f(GRC_LOCAL_CTRL,
10789              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10790         udelay(100);
10791
10792         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10793             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10794                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10795
10796                 if (tg3_nvram_lock(tp)) {
10797                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10798                                "tg3_nvram_init failed.\n", tp->dev->name);
10799                         return;
10800                 }
10801                 tg3_enable_nvram_access(tp);
10802
10803                 tp->nvram_size = 0;
10804
10805                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10806                         tg3_get_5752_nvram_info(tp);
10807                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10808                         tg3_get_5755_nvram_info(tp);
10809                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10810                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10811                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10812                         tg3_get_5787_nvram_info(tp);
10813                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10814                         tg3_get_5761_nvram_info(tp);
10815                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10816                         tg3_get_5906_nvram_info(tp);
10817                 else
10818                         tg3_get_nvram_info(tp);
10819
10820                 if (tp->nvram_size == 0)
10821                         tg3_get_nvram_size(tp);
10822
10823                 tg3_disable_nvram_access(tp);
10824                 tg3_nvram_unlock(tp);
10825
10826         } else {
10827                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10828
10829                 tg3_get_eeprom_size(tp);
10830         }
10831 }
10832
10833 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10834                                         u32 offset, u32 *val)
10835 {
10836         u32 tmp;
10837         int i;
10838
10839         if (offset > EEPROM_ADDR_ADDR_MASK ||
10840             (offset % 4) != 0)
10841                 return -EINVAL;
10842
10843         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10844                                         EEPROM_ADDR_DEVID_MASK |
10845                                         EEPROM_ADDR_READ);
10846         tw32(GRC_EEPROM_ADDR,
10847              tmp |
10848              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10849              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10850               EEPROM_ADDR_ADDR_MASK) |
10851              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10852
10853         for (i = 0; i < 1000; i++) {
10854                 tmp = tr32(GRC_EEPROM_ADDR);
10855
10856                 if (tmp & EEPROM_ADDR_COMPLETE)
10857                         break;
10858                 msleep(1);
10859         }
10860         if (!(tmp & EEPROM_ADDR_COMPLETE))
10861                 return -EBUSY;
10862
10863         *val = tr32(GRC_EEPROM_DATA);
10864         return 0;
10865 }
10866
10867 #define NVRAM_CMD_TIMEOUT 10000
10868
10869 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10870 {
10871         int i;
10872
10873         tw32(NVRAM_CMD, nvram_cmd);
10874         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10875                 udelay(10);
10876                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10877                         udelay(10);
10878                         break;
10879                 }
10880         }
10881         if (i == NVRAM_CMD_TIMEOUT) {
10882                 return -EBUSY;
10883         }
10884         return 0;
10885 }
10886
10887 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10888 {
10889         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10890             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10891             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10892            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10893             (tp->nvram_jedecnum == JEDEC_ATMEL))
10894
10895                 addr = ((addr / tp->nvram_pagesize) <<
10896                         ATMEL_AT45DB0X1B_PAGE_POS) +
10897                        (addr % tp->nvram_pagesize);
10898
10899         return addr;
10900 }
10901
10902 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10903 {
10904         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10905             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10906             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10907            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10908             (tp->nvram_jedecnum == JEDEC_ATMEL))
10909
10910                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10911                         tp->nvram_pagesize) +
10912                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10913
10914         return addr;
10915 }
10916
10917 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10918 {
10919         int ret;
10920
10921         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10922                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10923
10924         offset = tg3_nvram_phys_addr(tp, offset);
10925
10926         if (offset > NVRAM_ADDR_MSK)
10927                 return -EINVAL;
10928
10929         ret = tg3_nvram_lock(tp);
10930         if (ret)
10931                 return ret;
10932
10933         tg3_enable_nvram_access(tp);
10934
10935         tw32(NVRAM_ADDR, offset);
10936         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10937                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10938
10939         if (ret == 0)
10940                 *val = swab32(tr32(NVRAM_RDDATA));
10941
10942         tg3_disable_nvram_access(tp);
10943
10944         tg3_nvram_unlock(tp);
10945
10946         return ret;
10947 }
10948
10949 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10950 {
10951         u32 v;
10952         int res = tg3_nvram_read(tp, offset, &v);
10953         if (!res)
10954                 *val = cpu_to_le32(v);
10955         return res;
10956 }
10957
10958 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10959 {
10960         int err;
10961         u32 tmp;
10962
10963         err = tg3_nvram_read(tp, offset, &tmp);
10964         *val = swab32(tmp);
10965         return err;
10966 }
10967
10968 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10969                                     u32 offset, u32 len, u8 *buf)
10970 {
10971         int i, j, rc = 0;
10972         u32 val;
10973
10974         for (i = 0; i < len; i += 4) {
10975                 u32 addr;
10976                 __le32 data;
10977
10978                 addr = offset + i;
10979
10980                 memcpy(&data, buf + i, 4);
10981
10982                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10983
10984                 val = tr32(GRC_EEPROM_ADDR);
10985                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10986
10987                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10988                         EEPROM_ADDR_READ);
10989                 tw32(GRC_EEPROM_ADDR, val |
10990                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10991                         (addr & EEPROM_ADDR_ADDR_MASK) |
10992                         EEPROM_ADDR_START |
10993                         EEPROM_ADDR_WRITE);
10994
10995                 for (j = 0; j < 1000; j++) {
10996                         val = tr32(GRC_EEPROM_ADDR);
10997
10998                         if (val & EEPROM_ADDR_COMPLETE)
10999                                 break;
11000                         msleep(1);
11001                 }
11002                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11003                         rc = -EBUSY;
11004                         break;
11005                 }
11006         }
11007
11008         return rc;
11009 }
11010
11011 /* offset and length are dword aligned */
11012 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11013                 u8 *buf)
11014 {
11015         int ret = 0;
11016         u32 pagesize = tp->nvram_pagesize;
11017         u32 pagemask = pagesize - 1;
11018         u32 nvram_cmd;
11019         u8 *tmp;
11020
11021         tmp = kmalloc(pagesize, GFP_KERNEL);
11022         if (tmp == NULL)
11023                 return -ENOMEM;
11024
11025         while (len) {
11026                 int j;
11027                 u32 phy_addr, page_off, size;
11028
11029                 phy_addr = offset & ~pagemask;
11030
11031                 for (j = 0; j < pagesize; j += 4) {
11032                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11033                                                 (__le32 *) (tmp + j))))
11034                                 break;
11035                 }
11036                 if (ret)
11037                         break;
11038
11039                 page_off = offset & pagemask;
11040                 size = pagesize;
11041                 if (len < size)
11042                         size = len;
11043
11044                 len -= size;
11045
11046                 memcpy(tmp + page_off, buf, size);
11047
11048                 offset = offset + (pagesize - page_off);
11049
11050                 tg3_enable_nvram_access(tp);
11051
11052                 /*
11053                  * Before we can erase the flash page, we need
11054                  * to issue a special "write enable" command.
11055                  */
11056                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11057
11058                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11059                         break;
11060
11061                 /* Erase the target page */
11062                 tw32(NVRAM_ADDR, phy_addr);
11063
11064                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11065                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11066
11067                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11068                         break;
11069
11070                 /* Issue another write enable to start the write. */
11071                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11072
11073                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11074                         break;
11075
11076                 for (j = 0; j < pagesize; j += 4) {
11077                         __be32 data;
11078
11079                         data = *((__be32 *) (tmp + j));
11080                         /* swab32(le32_to_cpu(data)), actually */
11081                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11082
11083                         tw32(NVRAM_ADDR, phy_addr + j);
11084
11085                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11086                                 NVRAM_CMD_WR;
11087
11088                         if (j == 0)
11089                                 nvram_cmd |= NVRAM_CMD_FIRST;
11090                         else if (j == (pagesize - 4))
11091                                 nvram_cmd |= NVRAM_CMD_LAST;
11092
11093                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11094                                 break;
11095                 }
11096                 if (ret)
11097                         break;
11098         }
11099
11100         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11101         tg3_nvram_exec_cmd(tp, nvram_cmd);
11102
11103         kfree(tmp);
11104
11105         return ret;
11106 }
11107
11108 /* offset and length are dword aligned */
11109 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11110                 u8 *buf)
11111 {
11112         int i, ret = 0;
11113
11114         for (i = 0; i < len; i += 4, offset += 4) {
11115                 u32 page_off, phy_addr, nvram_cmd;
11116                 __be32 data;
11117
11118                 memcpy(&data, buf + i, 4);
11119                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11120
11121                 page_off = offset % tp->nvram_pagesize;
11122
11123                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11124
11125                 tw32(NVRAM_ADDR, phy_addr);
11126
11127                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11128
11129                 if ((page_off == 0) || (i == 0))
11130                         nvram_cmd |= NVRAM_CMD_FIRST;
11131                 if (page_off == (tp->nvram_pagesize - 4))
11132                         nvram_cmd |= NVRAM_CMD_LAST;
11133
11134                 if (i == (len - 4))
11135                         nvram_cmd |= NVRAM_CMD_LAST;
11136
11137                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11138                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11139                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11140                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11141                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11142                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11143                     (tp->nvram_jedecnum == JEDEC_ST) &&
11144                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11145
11146                         if ((ret = tg3_nvram_exec_cmd(tp,
11147                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11148                                 NVRAM_CMD_DONE)))
11149
11150                                 break;
11151                 }
11152                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11153                         /* We always do complete word writes to eeprom. */
11154                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11155                 }
11156
11157                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11158                         break;
11159         }
11160         return ret;
11161 }
11162
11163 /* offset and length are dword aligned */
11164 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11165 {
11166         int ret;
11167
11168         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11169                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11170                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11171                 udelay(40);
11172         }
11173
11174         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11175                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11176         }
11177         else {
11178                 u32 grc_mode;
11179
11180                 ret = tg3_nvram_lock(tp);
11181                 if (ret)
11182                         return ret;
11183
11184                 tg3_enable_nvram_access(tp);
11185                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11186                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11187                         tw32(NVRAM_WRITE1, 0x406);
11188
11189                 grc_mode = tr32(GRC_MODE);
11190                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11191
11192                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11193                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11194
11195                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11196                                 buf);
11197                 }
11198                 else {
11199                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11200                                 buf);
11201                 }
11202
11203                 grc_mode = tr32(GRC_MODE);
11204                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11205
11206                 tg3_disable_nvram_access(tp);
11207                 tg3_nvram_unlock(tp);
11208         }
11209
11210         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11211                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11212                 udelay(40);
11213         }
11214
11215         return ret;
11216 }
11217
11218 struct subsys_tbl_ent {
11219         u16 subsys_vendor, subsys_devid;
11220         u32 phy_id;
11221 };
11222
11223 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11224         /* Broadcom boards. */
11225         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11226         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11227         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11228         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11229         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11230         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11231         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11232         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11233         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11234         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11235         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11236
11237         /* 3com boards. */
11238         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11239         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11240         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11241         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11242         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11243
11244         /* DELL boards. */
11245         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11246         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11247         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11248         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11249
11250         /* Compaq boards. */
11251         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11252         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11253         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11254         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11255         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11256
11257         /* IBM boards. */
11258         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11259 };
11260
11261 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11262 {
11263         int i;
11264
11265         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11266                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11267                      tp->pdev->subsystem_vendor) &&
11268                     (subsys_id_to_phy_id[i].subsys_devid ==
11269                      tp->pdev->subsystem_device))
11270                         return &subsys_id_to_phy_id[i];
11271         }
11272         return NULL;
11273 }
11274
11275 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11276 {
11277         u32 val;
11278         u16 pmcsr;
11279
11280         /* On some early chips the SRAM cannot be accessed in D3hot state,
11281          * so need make sure we're in D0.
11282          */
11283         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11284         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11285         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11286         msleep(1);
11287
11288         /* Make sure register accesses (indirect or otherwise)
11289          * will function correctly.
11290          */
11291         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11292                                tp->misc_host_ctrl);
11293
11294         /* The memory arbiter has to be enabled in order for SRAM accesses
11295          * to succeed.  Normally on powerup the tg3 chip firmware will make
11296          * sure it is enabled, but other entities such as system netboot
11297          * code might disable it.
11298          */
11299         val = tr32(MEMARB_MODE);
11300         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11301
11302         tp->phy_id = PHY_ID_INVALID;
11303         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11304
11305         /* Assume an onboard device and WOL capable by default.  */
11306         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11307
11308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11309                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11310                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11311                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11312                 }
11313                 val = tr32(VCPU_CFGSHDW);
11314                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11315                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11316                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11317                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11318                     device_may_wakeup(&tp->pdev->dev))
11319                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11320                 return;
11321         }
11322
11323         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11324         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11325                 u32 nic_cfg, led_cfg;
11326                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11327                 int eeprom_phy_serdes = 0;
11328
11329                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11330                 tp->nic_sram_data_cfg = nic_cfg;
11331
11332                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11333                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11334                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11335                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11336                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11337                     (ver > 0) && (ver < 0x100))
11338                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11339
11340                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11341                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11342
11343                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11344                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11345                         eeprom_phy_serdes = 1;
11346
11347                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11348                 if (nic_phy_id != 0) {
11349                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11350                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11351
11352                         eeprom_phy_id  = (id1 >> 16) << 10;
11353                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11354                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11355                 } else
11356                         eeprom_phy_id = 0;
11357
11358                 tp->phy_id = eeprom_phy_id;
11359                 if (eeprom_phy_serdes) {
11360                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11361                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11362                         else
11363                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11364                 }
11365
11366                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11367                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11368                                     SHASTA_EXT_LED_MODE_MASK);
11369                 else
11370                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11371
11372                 switch (led_cfg) {
11373                 default:
11374                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11375                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11376                         break;
11377
11378                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11379                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11380                         break;
11381
11382                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11383                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11384
11385                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11386                          * read on some older 5700/5701 bootcode.
11387                          */
11388                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11389                             ASIC_REV_5700 ||
11390                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11391                             ASIC_REV_5701)
11392                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11393
11394                         break;
11395
11396                 case SHASTA_EXT_LED_SHARED:
11397                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11398                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11399                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11400                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11401                                                  LED_CTRL_MODE_PHY_2);
11402                         break;
11403
11404                 case SHASTA_EXT_LED_MAC:
11405                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11406                         break;
11407
11408                 case SHASTA_EXT_LED_COMBO:
11409                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11410                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11411                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11412                                                  LED_CTRL_MODE_PHY_2);
11413                         break;
11414
11415                 }
11416
11417                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11418                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11419                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11420                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11421
11422                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11423                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11424
11425                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11426                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11427                         if ((tp->pdev->subsystem_vendor ==
11428                              PCI_VENDOR_ID_ARIMA) &&
11429                             (tp->pdev->subsystem_device == 0x205a ||
11430                              tp->pdev->subsystem_device == 0x2063))
11431                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11432                 } else {
11433                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11434                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11435                 }
11436
11437                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11438                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11439                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11440                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11441                 }
11442                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11443                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11444                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11445                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11446                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11447
11448                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11449                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11450                     device_may_wakeup(&tp->pdev->dev))
11451                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11452
11453                 if (cfg2 & (1 << 17))
11454                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11455
11456                 /* serdes signal pre-emphasis in register 0x590 set by */
11457                 /* bootcode if bit 18 is set */
11458                 if (cfg2 & (1 << 18))
11459                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11460
11461                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11462                         u32 cfg3;
11463
11464                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11465                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11466                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11467                 }
11468
11469                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11470                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11471                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11472                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11473                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11474                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11475         }
11476 }
11477
11478 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11479 {
11480         int i;
11481         u32 val;
11482
11483         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11484         tw32(OTP_CTRL, cmd);
11485
11486         /* Wait for up to 1 ms for command to execute. */
11487         for (i = 0; i < 100; i++) {
11488                 val = tr32(OTP_STATUS);
11489                 if (val & OTP_STATUS_CMD_DONE)
11490                         break;
11491                 udelay(10);
11492         }
11493
11494         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11495 }
11496
11497 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11498  * configuration is a 32-bit value that straddles the alignment boundary.
11499  * We do two 32-bit reads and then shift and merge the results.
11500  */
11501 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11502 {
11503         u32 bhalf_otp, thalf_otp;
11504
11505         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11506
11507         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11508                 return 0;
11509
11510         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11511
11512         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11513                 return 0;
11514
11515         thalf_otp = tr32(OTP_READ_DATA);
11516
11517         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11518
11519         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11520                 return 0;
11521
11522         bhalf_otp = tr32(OTP_READ_DATA);
11523
11524         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11525 }
11526
11527 static int __devinit tg3_phy_probe(struct tg3 *tp)
11528 {
11529         u32 hw_phy_id_1, hw_phy_id_2;
11530         u32 hw_phy_id, hw_phy_id_masked;
11531         int err;
11532
11533         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11534                 return tg3_phy_init(tp);
11535
11536         /* Reading the PHY ID register can conflict with ASF
11537          * firwmare access to the PHY hardware.
11538          */
11539         err = 0;
11540         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11541             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11542                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11543         } else {
11544                 /* Now read the physical PHY_ID from the chip and verify
11545                  * that it is sane.  If it doesn't look good, we fall back
11546                  * to either the hard-coded table based PHY_ID and failing
11547                  * that the value found in the eeprom area.
11548                  */
11549                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11550                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11551
11552                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11553                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11554                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11555
11556                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11557         }
11558
11559         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11560                 tp->phy_id = hw_phy_id;
11561                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11562                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11563                 else
11564                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11565         } else {
11566                 if (tp->phy_id != PHY_ID_INVALID) {
11567                         /* Do nothing, phy ID already set up in
11568                          * tg3_get_eeprom_hw_cfg().
11569                          */
11570                 } else {
11571                         struct subsys_tbl_ent *p;
11572
11573                         /* No eeprom signature?  Try the hardcoded
11574                          * subsys device table.
11575                          */
11576                         p = lookup_by_subsys(tp);
11577                         if (!p)
11578                                 return -ENODEV;
11579
11580                         tp->phy_id = p->phy_id;
11581                         if (!tp->phy_id ||
11582                             tp->phy_id == PHY_ID_BCM8002)
11583                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11584                 }
11585         }
11586
11587         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11588             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11589             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11590                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11591
11592                 tg3_readphy(tp, MII_BMSR, &bmsr);
11593                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11594                     (bmsr & BMSR_LSTATUS))
11595                         goto skip_phy_reset;
11596
11597                 err = tg3_phy_reset(tp);
11598                 if (err)
11599                         return err;
11600
11601                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11602                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11603                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11604                 tg3_ctrl = 0;
11605                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11606                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11607                                     MII_TG3_CTRL_ADV_1000_FULL);
11608                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11609                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11610                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11611                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11612                 }
11613
11614                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11615                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11616                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11617                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11618                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11619
11620                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11621                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11622
11623                         tg3_writephy(tp, MII_BMCR,
11624                                      BMCR_ANENABLE | BMCR_ANRESTART);
11625                 }
11626                 tg3_phy_set_wirespeed(tp);
11627
11628                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11629                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11630                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11631         }
11632
11633 skip_phy_reset:
11634         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11635                 err = tg3_init_5401phy_dsp(tp);
11636                 if (err)
11637                         return err;
11638         }
11639
11640         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11641                 err = tg3_init_5401phy_dsp(tp);
11642         }
11643
11644         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11645                 tp->link_config.advertising =
11646                         (ADVERTISED_1000baseT_Half |
11647                          ADVERTISED_1000baseT_Full |
11648                          ADVERTISED_Autoneg |
11649                          ADVERTISED_FIBRE);
11650         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11651                 tp->link_config.advertising &=
11652                         ~(ADVERTISED_1000baseT_Half |
11653                           ADVERTISED_1000baseT_Full);
11654
11655         return err;
11656 }
11657
11658 static void __devinit tg3_read_partno(struct tg3 *tp)
11659 {
11660         unsigned char vpd_data[256];
11661         unsigned int i;
11662         u32 magic;
11663
11664         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11665                 goto out_not_found;
11666
11667         if (magic == TG3_EEPROM_MAGIC) {
11668                 for (i = 0; i < 256; i += 4) {
11669                         u32 tmp;
11670
11671                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11672                                 goto out_not_found;
11673
11674                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11675                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11676                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11677                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11678                 }
11679         } else {
11680                 int vpd_cap;
11681
11682                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11683                 for (i = 0; i < 256; i += 4) {
11684                         u32 tmp, j = 0;
11685                         __le32 v;
11686                         u16 tmp16;
11687
11688                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11689                                               i);
11690                         while (j++ < 100) {
11691                                 pci_read_config_word(tp->pdev, vpd_cap +
11692                                                      PCI_VPD_ADDR, &tmp16);
11693                                 if (tmp16 & 0x8000)
11694                                         break;
11695                                 msleep(1);
11696                         }
11697                         if (!(tmp16 & 0x8000))
11698                                 goto out_not_found;
11699
11700                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11701                                               &tmp);
11702                         v = cpu_to_le32(tmp);
11703                         memcpy(&vpd_data[i], &v, 4);
11704                 }
11705         }
11706
11707         /* Now parse and find the part number. */
11708         for (i = 0; i < 254; ) {
11709                 unsigned char val = vpd_data[i];
11710                 unsigned int block_end;
11711
11712                 if (val == 0x82 || val == 0x91) {
11713                         i = (i + 3 +
11714                              (vpd_data[i + 1] +
11715                               (vpd_data[i + 2] << 8)));
11716                         continue;
11717                 }
11718
11719                 if (val != 0x90)
11720                         goto out_not_found;
11721
11722                 block_end = (i + 3 +
11723                              (vpd_data[i + 1] +
11724                               (vpd_data[i + 2] << 8)));
11725                 i += 3;
11726
11727                 if (block_end > 256)
11728                         goto out_not_found;
11729
11730                 while (i < (block_end - 2)) {
11731                         if (vpd_data[i + 0] == 'P' &&
11732                             vpd_data[i + 1] == 'N') {
11733                                 int partno_len = vpd_data[i + 2];
11734
11735                                 i += 3;
11736                                 if (partno_len > 24 || (partno_len + i) > 256)
11737                                         goto out_not_found;
11738
11739                                 memcpy(tp->board_part_number,
11740                                        &vpd_data[i], partno_len);
11741
11742                                 /* Success. */
11743                                 return;
11744                         }
11745                         i += 3 + vpd_data[i + 2];
11746                 }
11747
11748                 /* Part number not found. */
11749                 goto out_not_found;
11750         }
11751
11752 out_not_found:
11753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11754                 strcpy(tp->board_part_number, "BCM95906");
11755         else
11756                 strcpy(tp->board_part_number, "none");
11757 }
11758
11759 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11760 {
11761         u32 val;
11762
11763         if (tg3_nvram_read_swab(tp, offset, &val) ||
11764             (val & 0xfc000000) != 0x0c000000 ||
11765             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11766             val != 0)
11767                 return 0;
11768
11769         return 1;
11770 }
11771
11772 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11773 {
11774         u32 val, offset, start;
11775         u32 ver_offset;
11776         int i, bcnt;
11777
11778         if (tg3_nvram_read_swab(tp, 0, &val))
11779                 return;
11780
11781         if (val != TG3_EEPROM_MAGIC)
11782                 return;
11783
11784         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11785             tg3_nvram_read_swab(tp, 0x4, &start))
11786                 return;
11787
11788         offset = tg3_nvram_logical_addr(tp, offset);
11789
11790         if (!tg3_fw_img_is_valid(tp, offset) ||
11791             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11792                 return;
11793
11794         offset = offset + ver_offset - start;
11795         for (i = 0; i < 16; i += 4) {
11796                 __le32 v;
11797                 if (tg3_nvram_read_le(tp, offset + i, &v))
11798                         return;
11799
11800                 memcpy(tp->fw_ver + i, &v, 4);
11801         }
11802
11803         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11804              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11805                 return;
11806
11807         for (offset = TG3_NVM_DIR_START;
11808              offset < TG3_NVM_DIR_END;
11809              offset += TG3_NVM_DIRENT_SIZE) {
11810                 if (tg3_nvram_read_swab(tp, offset, &val))
11811                         return;
11812
11813                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11814                         break;
11815         }
11816
11817         if (offset == TG3_NVM_DIR_END)
11818                 return;
11819
11820         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11821                 start = 0x08000000;
11822         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11823                 return;
11824
11825         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11826             !tg3_fw_img_is_valid(tp, offset) ||
11827             tg3_nvram_read_swab(tp, offset + 8, &val))
11828                 return;
11829
11830         offset += val - start;
11831
11832         bcnt = strlen(tp->fw_ver);
11833
11834         tp->fw_ver[bcnt++] = ',';
11835         tp->fw_ver[bcnt++] = ' ';
11836
11837         for (i = 0; i < 4; i++) {
11838                 __le32 v;
11839                 if (tg3_nvram_read_le(tp, offset, &v))
11840                         return;
11841
11842                 offset += sizeof(v);
11843
11844                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11845                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11846                         break;
11847                 }
11848
11849                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11850                 bcnt += sizeof(v);
11851         }
11852
11853         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11854 }
11855
11856 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11857
11858 static int __devinit tg3_get_invariants(struct tg3 *tp)
11859 {
11860         static struct pci_device_id write_reorder_chipsets[] = {
11861                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11862                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11863                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11864                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11865                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11866                              PCI_DEVICE_ID_VIA_8385_0) },
11867                 { },
11868         };
11869         u32 misc_ctrl_reg;
11870         u32 cacheline_sz_reg;
11871         u32 pci_state_reg, grc_misc_cfg;
11872         u32 val;
11873         u16 pci_cmd;
11874         int err, pcie_cap;
11875
11876         /* Force memory write invalidate off.  If we leave it on,
11877          * then on 5700_BX chips we have to enable a workaround.
11878          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11879          * to match the cacheline size.  The Broadcom driver have this
11880          * workaround but turns MWI off all the times so never uses
11881          * it.  This seems to suggest that the workaround is insufficient.
11882          */
11883         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11884         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11885         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11886
11887         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11888          * has the register indirect write enable bit set before
11889          * we try to access any of the MMIO registers.  It is also
11890          * critical that the PCI-X hw workaround situation is decided
11891          * before that as well.
11892          */
11893         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11894                               &misc_ctrl_reg);
11895
11896         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11897                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11899                 u32 prod_id_asic_rev;
11900
11901                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11902                                       &prod_id_asic_rev);
11903                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11904         }
11905
11906         /* Wrong chip ID in 5752 A0. This code can be removed later
11907          * as A0 is not in production.
11908          */
11909         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11910                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11911
11912         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11913          * we need to disable memory and use config. cycles
11914          * only to access all registers. The 5702/03 chips
11915          * can mistakenly decode the special cycles from the
11916          * ICH chipsets as memory write cycles, causing corruption
11917          * of register and memory space. Only certain ICH bridges
11918          * will drive special cycles with non-zero data during the
11919          * address phase which can fall within the 5703's address
11920          * range. This is not an ICH bug as the PCI spec allows
11921          * non-zero address during special cycles. However, only
11922          * these ICH bridges are known to drive non-zero addresses
11923          * during special cycles.
11924          *
11925          * Since special cycles do not cross PCI bridges, we only
11926          * enable this workaround if the 5703 is on the secondary
11927          * bus of these ICH bridges.
11928          */
11929         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11930             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11931                 static struct tg3_dev_id {
11932                         u32     vendor;
11933                         u32     device;
11934                         u32     rev;
11935                 } ich_chipsets[] = {
11936                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11937                           PCI_ANY_ID },
11938                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11939                           PCI_ANY_ID },
11940                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11941                           0xa },
11942                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11943                           PCI_ANY_ID },
11944                         { },
11945                 };
11946                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11947                 struct pci_dev *bridge = NULL;
11948
11949                 while (pci_id->vendor != 0) {
11950                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11951                                                 bridge);
11952                         if (!bridge) {
11953                                 pci_id++;
11954                                 continue;
11955                         }
11956                         if (pci_id->rev != PCI_ANY_ID) {
11957                                 if (bridge->revision > pci_id->rev)
11958                                         continue;
11959                         }
11960                         if (bridge->subordinate &&
11961                             (bridge->subordinate->number ==
11962                              tp->pdev->bus->number)) {
11963
11964                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11965                                 pci_dev_put(bridge);
11966                                 break;
11967                         }
11968                 }
11969         }
11970
11971         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11972                 static struct tg3_dev_id {
11973                         u32     vendor;
11974                         u32     device;
11975                 } bridge_chipsets[] = {
11976                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11977                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11978                         { },
11979                 };
11980                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11981                 struct pci_dev *bridge = NULL;
11982
11983                 while (pci_id->vendor != 0) {
11984                         bridge = pci_get_device(pci_id->vendor,
11985                                                 pci_id->device,
11986                                                 bridge);
11987                         if (!bridge) {
11988                                 pci_id++;
11989                                 continue;
11990                         }
11991                         if (bridge->subordinate &&
11992                             (bridge->subordinate->number <=
11993                              tp->pdev->bus->number) &&
11994                             (bridge->subordinate->subordinate >=
11995                              tp->pdev->bus->number)) {
11996                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11997                                 pci_dev_put(bridge);
11998                                 break;
11999                         }
12000                 }
12001         }
12002
12003         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12004          * DMA addresses > 40-bit. This bridge may have other additional
12005          * 57xx devices behind it in some 4-port NIC designs for example.
12006          * Any tg3 device found behind the bridge will also need the 40-bit
12007          * DMA workaround.
12008          */
12009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12011                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12012                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12013                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12014         }
12015         else {
12016                 struct pci_dev *bridge = NULL;
12017
12018                 do {
12019                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12020                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12021                                                 bridge);
12022                         if (bridge && bridge->subordinate &&
12023                             (bridge->subordinate->number <=
12024                              tp->pdev->bus->number) &&
12025                             (bridge->subordinate->subordinate >=
12026                              tp->pdev->bus->number)) {
12027                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12028                                 pci_dev_put(bridge);
12029                                 break;
12030                         }
12031                 } while (bridge);
12032         }
12033
12034         /* Initialize misc host control in PCI block. */
12035         tp->misc_host_ctrl |= (misc_ctrl_reg &
12036                                MISC_HOST_CTRL_CHIPREV);
12037         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12038                                tp->misc_host_ctrl);
12039
12040         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12041                               &cacheline_sz_reg);
12042
12043         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12044         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12045         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12046         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12047
12048         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12049             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12050                 tp->pdev_peer = tg3_find_peer(tp);
12051
12052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12056             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12058             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12059             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12060             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12061                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12062
12063         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12064             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12065                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12066
12067         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12068                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12069                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12070                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12071                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12072                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12073                      tp->pdev_peer == tp->pdev))
12074                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12075
12076                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12077                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12078                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12080                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12081                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12082                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12083                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12084                 } else {
12085                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12086                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12087                                 ASIC_REV_5750 &&
12088                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12089                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12090                 }
12091         }
12092
12093         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12094              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12095                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12096
12097         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12098         if (pcie_cap != 0) {
12099                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12100
12101                 pcie_set_readrq(tp->pdev, 4096);
12102
12103                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12104                         u16 lnkctl;
12105
12106                         pci_read_config_word(tp->pdev,
12107                                              pcie_cap + PCI_EXP_LNKCTL,
12108                                              &lnkctl);
12109                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12110                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12111                 }
12112         }
12113
12114         /* If we have an AMD 762 or VIA K8T800 chipset, write
12115          * reordering to the mailbox registers done by the host
12116          * controller can cause major troubles.  We read back from
12117          * every mailbox register write to force the writes to be
12118          * posted to the chip in order.
12119          */
12120         if (pci_dev_present(write_reorder_chipsets) &&
12121             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12122                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12123
12124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12125             tp->pci_lat_timer < 64) {
12126                 tp->pci_lat_timer = 64;
12127
12128                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12129                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12130                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12131                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12132
12133                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12134                                        cacheline_sz_reg);
12135         }
12136
12137         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12138             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12139                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12140                 if (!tp->pcix_cap) {
12141                         printk(KERN_ERR PFX "Cannot find PCI-X "
12142                                             "capability, aborting.\n");
12143                         return -EIO;
12144                 }
12145         }
12146
12147         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12148                               &pci_state_reg);
12149
12150         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12151                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12152
12153                 /* If this is a 5700 BX chipset, and we are in PCI-X
12154                  * mode, enable register write workaround.
12155                  *
12156                  * The workaround is to use indirect register accesses
12157                  * for all chip writes not to mailbox registers.
12158                  */
12159                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12160                         u32 pm_reg;
12161
12162                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12163
12164                         /* The chip can have it's power management PCI config
12165                          * space registers clobbered due to this bug.
12166                          * So explicitly force the chip into D0 here.
12167                          */
12168                         pci_read_config_dword(tp->pdev,
12169                                               tp->pm_cap + PCI_PM_CTRL,
12170                                               &pm_reg);
12171                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12172                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12173                         pci_write_config_dword(tp->pdev,
12174                                                tp->pm_cap + PCI_PM_CTRL,
12175                                                pm_reg);
12176
12177                         /* Also, force SERR#/PERR# in PCI command. */
12178                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12179                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12180                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12181                 }
12182         }
12183
12184         /* 5700 BX chips need to have their TX producer index mailboxes
12185          * written twice to workaround a bug.
12186          */
12187         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12188                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12189
12190         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12191                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12192         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12193                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12194
12195         /* Chip-specific fixup from Broadcom driver */
12196         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12197             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12198                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12199                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12200         }
12201
12202         /* Default fast path register access methods */
12203         tp->read32 = tg3_read32;
12204         tp->write32 = tg3_write32;
12205         tp->read32_mbox = tg3_read32;
12206         tp->write32_mbox = tg3_write32;
12207         tp->write32_tx_mbox = tg3_write32;
12208         tp->write32_rx_mbox = tg3_write32;
12209
12210         /* Various workaround register access methods */
12211         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12212                 tp->write32 = tg3_write_indirect_reg32;
12213         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12214                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12215                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12216                 /*
12217                  * Back to back register writes can cause problems on these
12218                  * chips, the workaround is to read back all reg writes
12219                  * except those to mailbox regs.
12220                  *
12221                  * See tg3_write_indirect_reg32().
12222                  */
12223                 tp->write32 = tg3_write_flush_reg32;
12224         }
12225
12226
12227         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12228             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12229                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12230                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12231                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12232         }
12233
12234         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12235                 tp->read32 = tg3_read_indirect_reg32;
12236                 tp->write32 = tg3_write_indirect_reg32;
12237                 tp->read32_mbox = tg3_read_indirect_mbox;
12238                 tp->write32_mbox = tg3_write_indirect_mbox;
12239                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12240                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12241
12242                 iounmap(tp->regs);
12243                 tp->regs = NULL;
12244
12245                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12246                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12247                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12248         }
12249         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12250                 tp->read32_mbox = tg3_read32_mbox_5906;
12251                 tp->write32_mbox = tg3_write32_mbox_5906;
12252                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12253                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12254         }
12255
12256         if (tp->write32 == tg3_write_indirect_reg32 ||
12257             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12258              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12259               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12260                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12261
12262         /* Get eeprom hw config before calling tg3_set_power_state().
12263          * In particular, the TG3_FLG2_IS_NIC flag must be
12264          * determined before calling tg3_set_power_state() so that
12265          * we know whether or not to switch out of Vaux power.
12266          * When the flag is set, it means that GPIO1 is used for eeprom
12267          * write protect and also implies that it is a LOM where GPIOs
12268          * are not used to switch power.
12269          */
12270         tg3_get_eeprom_hw_cfg(tp);
12271
12272         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12273                 /* Allow reads and writes to the
12274                  * APE register and memory space.
12275                  */
12276                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12277                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12278                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12279                                        pci_state_reg);
12280         }
12281
12282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12284             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12285                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12286
12287                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12288                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12289                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12290                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12291                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12292         }
12293
12294         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12295          * GPIO1 driven high will bring 5700's external PHY out of reset.
12296          * It is also used as eeprom write protect on LOMs.
12297          */
12298         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12299         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12300             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12301                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12302                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12303         /* Unused GPIO3 must be driven as output on 5752 because there
12304          * are no pull-up resistors on unused GPIO pins.
12305          */
12306         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12307                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12308
12309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12310                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12311
12312         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12313                 /* Turn off the debug UART. */
12314                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12315                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12316                         /* Keep VMain power. */
12317                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12318                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12319         }
12320
12321         /* Force the chip into D0. */
12322         err = tg3_set_power_state(tp, PCI_D0);
12323         if (err) {
12324                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12325                        pci_name(tp->pdev));
12326                 return err;
12327         }
12328
12329         /* 5700 B0 chips do not support checksumming correctly due
12330          * to hardware bugs.
12331          */
12332         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12333                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12334
12335         /* Derive initial jumbo mode from MTU assigned in
12336          * ether_setup() via the alloc_etherdev() call
12337          */
12338         if (tp->dev->mtu > ETH_DATA_LEN &&
12339             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12340                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12341
12342         /* Determine WakeOnLan speed to use. */
12343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12344             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12345             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12346             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12347                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12348         } else {
12349                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12350         }
12351
12352         /* A few boards don't want Ethernet@WireSpeed phy feature */
12353         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12354             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12355              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12356              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12357             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12358             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12359                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12360
12361         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12362             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12363                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12364         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12365                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12366
12367         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12368                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12369                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12370                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12371                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12372                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12373                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12374                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12375                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12376                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12377                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12378                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12379                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12380         }
12381
12382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12383             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12384                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12385                 if (tp->phy_otp == 0)
12386                         tp->phy_otp = TG3_OTP_DEFAULT;
12387         }
12388
12389         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12390                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12391         else
12392                 tp->mi_mode = MAC_MI_MODE_BASE;
12393
12394         tp->coalesce_mode = 0;
12395         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12396             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12397                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12398
12399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12400                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12401
12402         err = tg3_mdio_init(tp);
12403         if (err)
12404                 return err;
12405
12406         /* Initialize data/descriptor byte/word swapping. */
12407         val = tr32(GRC_MODE);
12408         val &= GRC_MODE_HOST_STACKUP;
12409         tw32(GRC_MODE, val | tp->grc_mode);
12410
12411         tg3_switch_clocks(tp);
12412
12413         /* Clear this out for sanity. */
12414         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12415
12416         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12417                               &pci_state_reg);
12418         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12419             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12420                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12421
12422                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12423                     chiprevid == CHIPREV_ID_5701_B0 ||
12424                     chiprevid == CHIPREV_ID_5701_B2 ||
12425                     chiprevid == CHIPREV_ID_5701_B5) {
12426                         void __iomem *sram_base;
12427
12428                         /* Write some dummy words into the SRAM status block
12429                          * area, see if it reads back correctly.  If the return
12430                          * value is bad, force enable the PCIX workaround.
12431                          */
12432                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12433
12434                         writel(0x00000000, sram_base);
12435                         writel(0x00000000, sram_base + 4);
12436                         writel(0xffffffff, sram_base + 4);
12437                         if (readl(sram_base) != 0x00000000)
12438                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12439                 }
12440         }
12441
12442         udelay(50);
12443         tg3_nvram_init(tp);
12444
12445         grc_misc_cfg = tr32(GRC_MISC_CFG);
12446         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12447
12448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12449             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12450              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12451                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12452
12453         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12454             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12455                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12456         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12457                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12458                                       HOSTCC_MODE_CLRTICK_TXBD);
12459
12460                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12461                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12462                                        tp->misc_host_ctrl);
12463         }
12464
12465         /* Preserve the APE MAC_MODE bits */
12466         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12467                 tp->mac_mode = tr32(MAC_MODE) |
12468                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12469         else
12470                 tp->mac_mode = TG3_DEF_MAC_MODE;
12471
12472         /* these are limited to 10/100 only */
12473         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12474              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12475             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12476              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12477              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12478               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12479               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12480             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12481              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12482               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12483               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12485                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12486
12487         err = tg3_phy_probe(tp);
12488         if (err) {
12489                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12490                        pci_name(tp->pdev), err);
12491                 /* ... but do not return immediately ... */
12492                 tg3_mdio_fini(tp);
12493         }
12494
12495         tg3_read_partno(tp);
12496         tg3_read_fw_ver(tp);
12497
12498         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12499                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12500         } else {
12501                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12502                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12503                 else
12504                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12505         }
12506
12507         /* 5700 {AX,BX} chips have a broken status block link
12508          * change bit implementation, so we must use the
12509          * status register in those cases.
12510          */
12511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12512                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12513         else
12514                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12515
12516         /* The led_ctrl is set during tg3_phy_probe, here we might
12517          * have to force the link status polling mechanism based
12518          * upon subsystem IDs.
12519          */
12520         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12522             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12523                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12524                                   TG3_FLAG_USE_LINKCHG_REG);
12525         }
12526
12527         /* For all SERDES we poll the MAC status register. */
12528         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12529                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12530         else
12531                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12532
12533         /* All chips before 5787 can get confused if TX buffers
12534          * straddle the 4GB address boundary in some cases.
12535          */
12536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12537             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12539             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12540             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12541             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12542                 tp->dev->hard_start_xmit = tg3_start_xmit;
12543         else
12544                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12545
12546         tp->rx_offset = 2;
12547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12548             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12549                 tp->rx_offset = 0;
12550
12551         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12552
12553         /* Increment the rx prod index on the rx std ring by at most
12554          * 8 for these chips to workaround hw errata.
12555          */
12556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12558             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12559                 tp->rx_std_max_post = 8;
12560
12561         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12562                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12563                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12564
12565         return err;
12566 }
12567
12568 #ifdef CONFIG_SPARC
12569 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12570 {
12571         struct net_device *dev = tp->dev;
12572         struct pci_dev *pdev = tp->pdev;
12573         struct device_node *dp = pci_device_to_OF_node(pdev);
12574         const unsigned char *addr;
12575         int len;
12576
12577         addr = of_get_property(dp, "local-mac-address", &len);
12578         if (addr && len == 6) {
12579                 memcpy(dev->dev_addr, addr, 6);
12580                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12581                 return 0;
12582         }
12583         return -ENODEV;
12584 }
12585
12586 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12587 {
12588         struct net_device *dev = tp->dev;
12589
12590         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12591         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12592         return 0;
12593 }
12594 #endif
12595
12596 static int __devinit tg3_get_device_address(struct tg3 *tp)
12597 {
12598         struct net_device *dev = tp->dev;
12599         u32 hi, lo, mac_offset;
12600         int addr_ok = 0;
12601
12602 #ifdef CONFIG_SPARC
12603         if (!tg3_get_macaddr_sparc(tp))
12604                 return 0;
12605 #endif
12606
12607         mac_offset = 0x7c;
12608         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12609             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12610                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12611                         mac_offset = 0xcc;
12612                 if (tg3_nvram_lock(tp))
12613                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12614                 else
12615                         tg3_nvram_unlock(tp);
12616         }
12617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12618                 mac_offset = 0x10;
12619
12620         /* First try to get it from MAC address mailbox. */
12621         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12622         if ((hi >> 16) == 0x484b) {
12623                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12624                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12625
12626                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12627                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12628                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12629                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12630                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12631
12632                 /* Some old bootcode may report a 0 MAC address in SRAM */
12633                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12634         }
12635         if (!addr_ok) {
12636                 /* Next, try NVRAM. */
12637                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12638                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12639                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12640                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12641                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12642                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12643                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12644                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12645                 }
12646                 /* Finally just fetch it out of the MAC control regs. */
12647                 else {
12648                         hi = tr32(MAC_ADDR_0_HIGH);
12649                         lo = tr32(MAC_ADDR_0_LOW);
12650
12651                         dev->dev_addr[5] = lo & 0xff;
12652                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12653                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12654                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12655                         dev->dev_addr[1] = hi & 0xff;
12656                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12657                 }
12658         }
12659
12660         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12661 #ifdef CONFIG_SPARC
12662                 if (!tg3_get_default_macaddr_sparc(tp))
12663                         return 0;
12664 #endif
12665                 return -EINVAL;
12666         }
12667         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12668         return 0;
12669 }
12670
12671 #define BOUNDARY_SINGLE_CACHELINE       1
12672 #define BOUNDARY_MULTI_CACHELINE        2
12673
12674 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12675 {
12676         int cacheline_size;
12677         u8 byte;
12678         int goal;
12679
12680         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12681         if (byte == 0)
12682                 cacheline_size = 1024;
12683         else
12684                 cacheline_size = (int) byte * 4;
12685
12686         /* On 5703 and later chips, the boundary bits have no
12687          * effect.
12688          */
12689         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12690             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12691             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12692                 goto out;
12693
12694 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12695         goal = BOUNDARY_MULTI_CACHELINE;
12696 #else
12697 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12698         goal = BOUNDARY_SINGLE_CACHELINE;
12699 #else
12700         goal = 0;
12701 #endif
12702 #endif
12703
12704         if (!goal)
12705                 goto out;
12706
12707         /* PCI controllers on most RISC systems tend to disconnect
12708          * when a device tries to burst across a cache-line boundary.
12709          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12710          *
12711          * Unfortunately, for PCI-E there are only limited
12712          * write-side controls for this, and thus for reads
12713          * we will still get the disconnects.  We'll also waste
12714          * these PCI cycles for both read and write for chips
12715          * other than 5700 and 5701 which do not implement the
12716          * boundary bits.
12717          */
12718         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12719             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12720                 switch (cacheline_size) {
12721                 case 16:
12722                 case 32:
12723                 case 64:
12724                 case 128:
12725                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12726                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12727                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12728                         } else {
12729                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12730                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12731                         }
12732                         break;
12733
12734                 case 256:
12735                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12736                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12737                         break;
12738
12739                 default:
12740                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12741                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12742                         break;
12743                 }
12744         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12745                 switch (cacheline_size) {
12746                 case 16:
12747                 case 32:
12748                 case 64:
12749                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12750                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12751                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12752                                 break;
12753                         }
12754                         /* fallthrough */
12755                 case 128:
12756                 default:
12757                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12758                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12759                         break;
12760                 }
12761         } else {
12762                 switch (cacheline_size) {
12763                 case 16:
12764                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12765                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12766                                         DMA_RWCTRL_WRITE_BNDRY_16);
12767                                 break;
12768                         }
12769                         /* fallthrough */
12770                 case 32:
12771                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12772                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12773                                         DMA_RWCTRL_WRITE_BNDRY_32);
12774                                 break;
12775                         }
12776                         /* fallthrough */
12777                 case 64:
12778                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12779                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12780                                         DMA_RWCTRL_WRITE_BNDRY_64);
12781                                 break;
12782                         }
12783                         /* fallthrough */
12784                 case 128:
12785                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12786                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12787                                         DMA_RWCTRL_WRITE_BNDRY_128);
12788                                 break;
12789                         }
12790                         /* fallthrough */
12791                 case 256:
12792                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12793                                 DMA_RWCTRL_WRITE_BNDRY_256);
12794                         break;
12795                 case 512:
12796                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12797                                 DMA_RWCTRL_WRITE_BNDRY_512);
12798                         break;
12799                 case 1024:
12800                 default:
12801                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12802                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12803                         break;
12804                 }
12805         }
12806
12807 out:
12808         return val;
12809 }
12810
12811 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12812 {
12813         struct tg3_internal_buffer_desc test_desc;
12814         u32 sram_dma_descs;
12815         int i, ret;
12816
12817         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12818
12819         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12820         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12821         tw32(RDMAC_STATUS, 0);
12822         tw32(WDMAC_STATUS, 0);
12823
12824         tw32(BUFMGR_MODE, 0);
12825         tw32(FTQ_RESET, 0);
12826
12827         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12828         test_desc.addr_lo = buf_dma & 0xffffffff;
12829         test_desc.nic_mbuf = 0x00002100;
12830         test_desc.len = size;
12831
12832         /*
12833          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12834          * the *second* time the tg3 driver was getting loaded after an
12835          * initial scan.
12836          *
12837          * Broadcom tells me:
12838          *   ...the DMA engine is connected to the GRC block and a DMA
12839          *   reset may affect the GRC block in some unpredictable way...
12840          *   The behavior of resets to individual blocks has not been tested.
12841          *
12842          * Broadcom noted the GRC reset will also reset all sub-components.
12843          */
12844         if (to_device) {
12845                 test_desc.cqid_sqid = (13 << 8) | 2;
12846
12847                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12848                 udelay(40);
12849         } else {
12850                 test_desc.cqid_sqid = (16 << 8) | 7;
12851
12852                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12853                 udelay(40);
12854         }
12855         test_desc.flags = 0x00000005;
12856
12857         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12858                 u32 val;
12859
12860                 val = *(((u32 *)&test_desc) + i);
12861                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12862                                        sram_dma_descs + (i * sizeof(u32)));
12863                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12864         }
12865         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12866
12867         if (to_device) {
12868                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12869         } else {
12870                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12871         }
12872
12873         ret = -ENODEV;
12874         for (i = 0; i < 40; i++) {
12875                 u32 val;
12876
12877                 if (to_device)
12878                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12879                 else
12880                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12881                 if ((val & 0xffff) == sram_dma_descs) {
12882                         ret = 0;
12883                         break;
12884                 }
12885
12886                 udelay(100);
12887         }
12888
12889         return ret;
12890 }
12891
12892 #define TEST_BUFFER_SIZE        0x2000
12893
12894 static int __devinit tg3_test_dma(struct tg3 *tp)
12895 {
12896         dma_addr_t buf_dma;
12897         u32 *buf, saved_dma_rwctrl;
12898         int ret;
12899
12900         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12901         if (!buf) {
12902                 ret = -ENOMEM;
12903                 goto out_nofree;
12904         }
12905
12906         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12907                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12908
12909         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12910
12911         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12912                 /* DMA read watermark not used on PCIE */
12913                 tp->dma_rwctrl |= 0x00180000;
12914         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12916                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12917                         tp->dma_rwctrl |= 0x003f0000;
12918                 else
12919                         tp->dma_rwctrl |= 0x003f000f;
12920         } else {
12921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12923                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12924                         u32 read_water = 0x7;
12925
12926                         /* If the 5704 is behind the EPB bridge, we can
12927                          * do the less restrictive ONE_DMA workaround for
12928                          * better performance.
12929                          */
12930                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12931                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12932                                 tp->dma_rwctrl |= 0x8000;
12933                         else if (ccval == 0x6 || ccval == 0x7)
12934                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12935
12936                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12937                                 read_water = 4;
12938                         /* Set bit 23 to enable PCIX hw bug fix */
12939                         tp->dma_rwctrl |=
12940                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12941                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12942                                 (1 << 23);
12943                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12944                         /* 5780 always in PCIX mode */
12945                         tp->dma_rwctrl |= 0x00144000;
12946                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12947                         /* 5714 always in PCIX mode */
12948                         tp->dma_rwctrl |= 0x00148000;
12949                 } else {
12950                         tp->dma_rwctrl |= 0x001b000f;
12951                 }
12952         }
12953
12954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12956                 tp->dma_rwctrl &= 0xfffffff0;
12957
12958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12960                 /* Remove this if it causes problems for some boards. */
12961                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12962
12963                 /* On 5700/5701 chips, we need to set this bit.
12964                  * Otherwise the chip will issue cacheline transactions
12965                  * to streamable DMA memory with not all the byte
12966                  * enables turned on.  This is an error on several
12967                  * RISC PCI controllers, in particular sparc64.
12968                  *
12969                  * On 5703/5704 chips, this bit has been reassigned
12970                  * a different meaning.  In particular, it is used
12971                  * on those chips to enable a PCI-X workaround.
12972                  */
12973                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12974         }
12975
12976         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12977
12978 #if 0
12979         /* Unneeded, already done by tg3_get_invariants.  */
12980         tg3_switch_clocks(tp);
12981 #endif
12982
12983         ret = 0;
12984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12985             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12986                 goto out;
12987
12988         /* It is best to perform DMA test with maximum write burst size
12989          * to expose the 5700/5701 write DMA bug.
12990          */
12991         saved_dma_rwctrl = tp->dma_rwctrl;
12992         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12993         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12994
12995         while (1) {
12996                 u32 *p = buf, i;
12997
12998                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12999                         p[i] = i;
13000
13001                 /* Send the buffer to the chip. */
13002                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13003                 if (ret) {
13004                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13005                         break;
13006                 }
13007
13008 #if 0
13009                 /* validate data reached card RAM correctly. */
13010                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13011                         u32 val;
13012                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13013                         if (le32_to_cpu(val) != p[i]) {
13014                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13015                                 /* ret = -ENODEV here? */
13016                         }
13017                         p[i] = 0;
13018                 }
13019 #endif
13020                 /* Now read it back. */
13021                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13022                 if (ret) {
13023                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13024
13025                         break;
13026                 }
13027
13028                 /* Verify it. */
13029                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13030                         if (p[i] == i)
13031                                 continue;
13032
13033                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13034                             DMA_RWCTRL_WRITE_BNDRY_16) {
13035                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13036                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13037                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13038                                 break;
13039                         } else {
13040                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13041                                 ret = -ENODEV;
13042                                 goto out;
13043                         }
13044                 }
13045
13046                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13047                         /* Success. */
13048                         ret = 0;
13049                         break;
13050                 }
13051         }
13052         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13053             DMA_RWCTRL_WRITE_BNDRY_16) {
13054                 static struct pci_device_id dma_wait_state_chipsets[] = {
13055                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13056                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13057                         { },
13058                 };
13059
13060                 /* DMA test passed without adjusting DMA boundary,
13061                  * now look for chipsets that are known to expose the
13062                  * DMA bug without failing the test.
13063                  */
13064                 if (pci_dev_present(dma_wait_state_chipsets)) {
13065                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13066                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13067                 }
13068                 else
13069                         /* Safe to use the calculated DMA boundary. */
13070                         tp->dma_rwctrl = saved_dma_rwctrl;
13071
13072                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13073         }
13074
13075 out:
13076         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13077 out_nofree:
13078         return ret;
13079 }
13080
13081 static void __devinit tg3_init_link_config(struct tg3 *tp)
13082 {
13083         tp->link_config.advertising =
13084                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13085                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13086                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13087                  ADVERTISED_Autoneg | ADVERTISED_MII);
13088         tp->link_config.speed = SPEED_INVALID;
13089         tp->link_config.duplex = DUPLEX_INVALID;
13090         tp->link_config.autoneg = AUTONEG_ENABLE;
13091         tp->link_config.active_speed = SPEED_INVALID;
13092         tp->link_config.active_duplex = DUPLEX_INVALID;
13093         tp->link_config.phy_is_low_power = 0;
13094         tp->link_config.orig_speed = SPEED_INVALID;
13095         tp->link_config.orig_duplex = DUPLEX_INVALID;
13096         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13097 }
13098
13099 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13100 {
13101         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13102                 tp->bufmgr_config.mbuf_read_dma_low_water =
13103                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13104                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13105                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13106                 tp->bufmgr_config.mbuf_high_water =
13107                         DEFAULT_MB_HIGH_WATER_5705;
13108                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13109                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13110                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13111                         tp->bufmgr_config.mbuf_high_water =
13112                                 DEFAULT_MB_HIGH_WATER_5906;
13113                 }
13114
13115                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13116                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13117                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13118                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13119                 tp->bufmgr_config.mbuf_high_water_jumbo =
13120                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13121         } else {
13122                 tp->bufmgr_config.mbuf_read_dma_low_water =
13123                         DEFAULT_MB_RDMA_LOW_WATER;
13124                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13125                         DEFAULT_MB_MACRX_LOW_WATER;
13126                 tp->bufmgr_config.mbuf_high_water =
13127                         DEFAULT_MB_HIGH_WATER;
13128
13129                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13130                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13131                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13132                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13133                 tp->bufmgr_config.mbuf_high_water_jumbo =
13134                         DEFAULT_MB_HIGH_WATER_JUMBO;
13135         }
13136
13137         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13138         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13139 }
13140
13141 static char * __devinit tg3_phy_string(struct tg3 *tp)
13142 {
13143         switch (tp->phy_id & PHY_ID_MASK) {
13144         case PHY_ID_BCM5400:    return "5400";
13145         case PHY_ID_BCM5401:    return "5401";
13146         case PHY_ID_BCM5411:    return "5411";
13147         case PHY_ID_BCM5701:    return "5701";
13148         case PHY_ID_BCM5703:    return "5703";
13149         case PHY_ID_BCM5704:    return "5704";
13150         case PHY_ID_BCM5705:    return "5705";
13151         case PHY_ID_BCM5750:    return "5750";
13152         case PHY_ID_BCM5752:    return "5752";
13153         case PHY_ID_BCM5714:    return "5714";
13154         case PHY_ID_BCM5780:    return "5780";
13155         case PHY_ID_BCM5755:    return "5755";
13156         case PHY_ID_BCM5787:    return "5787";
13157         case PHY_ID_BCM5784:    return "5784";
13158         case PHY_ID_BCM5756:    return "5722/5756";
13159         case PHY_ID_BCM5906:    return "5906";
13160         case PHY_ID_BCM5761:    return "5761";
13161         case PHY_ID_BCM8002:    return "8002/serdes";
13162         case 0:                 return "serdes";
13163         default:                return "unknown";
13164         }
13165 }
13166
13167 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13168 {
13169         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13170                 strcpy(str, "PCI Express");
13171                 return str;
13172         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13173                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13174
13175                 strcpy(str, "PCIX:");
13176
13177                 if ((clock_ctrl == 7) ||
13178                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13179                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13180                         strcat(str, "133MHz");
13181                 else if (clock_ctrl == 0)
13182                         strcat(str, "33MHz");
13183                 else if (clock_ctrl == 2)
13184                         strcat(str, "50MHz");
13185                 else if (clock_ctrl == 4)
13186                         strcat(str, "66MHz");
13187                 else if (clock_ctrl == 6)
13188                         strcat(str, "100MHz");
13189         } else {
13190                 strcpy(str, "PCI:");
13191                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13192                         strcat(str, "66MHz");
13193                 else
13194                         strcat(str, "33MHz");
13195         }
13196         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13197                 strcat(str, ":32-bit");
13198         else
13199                 strcat(str, ":64-bit");
13200         return str;
13201 }
13202
13203 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13204 {
13205         struct pci_dev *peer;
13206         unsigned int func, devnr = tp->pdev->devfn & ~7;
13207
13208         for (func = 0; func < 8; func++) {
13209                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13210                 if (peer && peer != tp->pdev)
13211                         break;
13212                 pci_dev_put(peer);
13213         }
13214         /* 5704 can be configured in single-port mode, set peer to
13215          * tp->pdev in that case.
13216          */
13217         if (!peer) {
13218                 peer = tp->pdev;
13219                 return peer;
13220         }
13221
13222         /*
13223          * We don't need to keep the refcount elevated; there's no way
13224          * to remove one half of this device without removing the other
13225          */
13226         pci_dev_put(peer);
13227
13228         return peer;
13229 }
13230
13231 static void __devinit tg3_init_coal(struct tg3 *tp)
13232 {
13233         struct ethtool_coalesce *ec = &tp->coal;
13234
13235         memset(ec, 0, sizeof(*ec));
13236         ec->cmd = ETHTOOL_GCOALESCE;
13237         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13238         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13239         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13240         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13241         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13242         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13243         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13244         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13245         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13246
13247         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13248                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13249                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13250                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13251                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13252                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13253         }
13254
13255         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13256                 ec->rx_coalesce_usecs_irq = 0;
13257                 ec->tx_coalesce_usecs_irq = 0;
13258                 ec->stats_block_coalesce_usecs = 0;
13259         }
13260 }
13261
13262 static int __devinit tg3_init_one(struct pci_dev *pdev,
13263                                   const struct pci_device_id *ent)
13264 {
13265         static int tg3_version_printed = 0;
13266         resource_size_t tg3reg_len;
13267         struct net_device *dev;
13268         struct tg3 *tp;
13269         int err, pm_cap;
13270         char str[40];
13271         u64 dma_mask, persist_dma_mask;
13272
13273         if (tg3_version_printed++ == 0)
13274                 printk(KERN_INFO "%s", version);
13275
13276         err = pci_enable_device(pdev);
13277         if (err) {
13278                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13279                        "aborting.\n");
13280                 return err;
13281         }
13282
13283         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13284                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13285                        "base address, aborting.\n");
13286                 err = -ENODEV;
13287                 goto err_out_disable_pdev;
13288         }
13289
13290         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13291         if (err) {
13292                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13293                        "aborting.\n");
13294                 goto err_out_disable_pdev;
13295         }
13296
13297         pci_set_master(pdev);
13298
13299         /* Find power-management capability. */
13300         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13301         if (pm_cap == 0) {
13302                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13303                        "aborting.\n");
13304                 err = -EIO;
13305                 goto err_out_free_res;
13306         }
13307
13308         dev = alloc_etherdev(sizeof(*tp));
13309         if (!dev) {
13310                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13311                 err = -ENOMEM;
13312                 goto err_out_free_res;
13313         }
13314
13315         SET_NETDEV_DEV(dev, &pdev->dev);
13316
13317 #if TG3_VLAN_TAG_USED
13318         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13319         dev->vlan_rx_register = tg3_vlan_rx_register;
13320 #endif
13321
13322         tp = netdev_priv(dev);
13323         tp->pdev = pdev;
13324         tp->dev = dev;
13325         tp->pm_cap = pm_cap;
13326         tp->rx_mode = TG3_DEF_RX_MODE;
13327         tp->tx_mode = TG3_DEF_TX_MODE;
13328
13329         if (tg3_debug > 0)
13330                 tp->msg_enable = tg3_debug;
13331         else
13332                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13333
13334         /* The word/byte swap controls here control register access byte
13335          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13336          * setting below.
13337          */
13338         tp->misc_host_ctrl =
13339                 MISC_HOST_CTRL_MASK_PCI_INT |
13340                 MISC_HOST_CTRL_WORD_SWAP |
13341                 MISC_HOST_CTRL_INDIR_ACCESS |
13342                 MISC_HOST_CTRL_PCISTATE_RW;
13343
13344         /* The NONFRM (non-frame) byte/word swap controls take effect
13345          * on descriptor entries, anything which isn't packet data.
13346          *
13347          * The StrongARM chips on the board (one for tx, one for rx)
13348          * are running in big-endian mode.
13349          */
13350         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13351                         GRC_MODE_WSWAP_NONFRM_DATA);
13352 #ifdef __BIG_ENDIAN
13353         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13354 #endif
13355         spin_lock_init(&tp->lock);
13356         spin_lock_init(&tp->indirect_lock);
13357         INIT_WORK(&tp->reset_task, tg3_reset_task);
13358
13359         dev->mem_start = pci_resource_start(pdev, BAR_0);
13360         tg3reg_len = pci_resource_len(pdev, BAR_0);
13361         dev->mem_end = dev->mem_start + tg3reg_len;
13362
13363         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13364         if (!tp->regs) {
13365                 printk(KERN_ERR PFX "Cannot map device registers, "
13366                        "aborting.\n");
13367                 err = -ENOMEM;
13368                 goto err_out_free_dev;
13369         }
13370
13371         tg3_init_link_config(tp);
13372
13373         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13374         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13375         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13376
13377         dev->open = tg3_open;
13378         dev->stop = tg3_close;
13379         dev->get_stats = tg3_get_stats;
13380         dev->set_multicast_list = tg3_set_rx_mode;
13381         dev->set_mac_address = tg3_set_mac_addr;
13382         dev->do_ioctl = tg3_ioctl;
13383         dev->tx_timeout = tg3_tx_timeout;
13384         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13385         dev->ethtool_ops = &tg3_ethtool_ops;
13386         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13387         dev->change_mtu = tg3_change_mtu;
13388         dev->irq = pdev->irq;
13389 #ifdef CONFIG_NET_POLL_CONTROLLER
13390         dev->poll_controller = tg3_poll_controller;
13391 #endif
13392
13393         err = tg3_get_invariants(tp);
13394         if (err) {
13395                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13396                        "aborting.\n");
13397                 goto err_out_iounmap;
13398         }
13399
13400         /* The EPB bridge inside 5714, 5715, and 5780 and any
13401          * device behind the EPB cannot support DMA addresses > 40-bit.
13402          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13403          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13404          * do DMA address check in tg3_start_xmit().
13405          */
13406         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13407                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13408         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13409                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13410 #ifdef CONFIG_HIGHMEM
13411                 dma_mask = DMA_64BIT_MASK;
13412 #endif
13413         } else
13414                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13415
13416         /* Configure DMA attributes. */
13417         if (dma_mask > DMA_32BIT_MASK) {
13418                 err = pci_set_dma_mask(pdev, dma_mask);
13419                 if (!err) {
13420                         dev->features |= NETIF_F_HIGHDMA;
13421                         err = pci_set_consistent_dma_mask(pdev,
13422                                                           persist_dma_mask);
13423                         if (err < 0) {
13424                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13425                                        "DMA for consistent allocations\n");
13426                                 goto err_out_iounmap;
13427                         }
13428                 }
13429         }
13430         if (err || dma_mask == DMA_32BIT_MASK) {
13431                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13432                 if (err) {
13433                         printk(KERN_ERR PFX "No usable DMA configuration, "
13434                                "aborting.\n");
13435                         goto err_out_iounmap;
13436                 }
13437         }
13438
13439         tg3_init_bufmgr_config(tp);
13440
13441         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13442                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13443         }
13444         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13446             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13447             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13448             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13449                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13450         } else {
13451                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13452         }
13453
13454         /* TSO is on by default on chips that support hardware TSO.
13455          * Firmware TSO on older chips gives lower performance, so it
13456          * is off by default, but can be enabled using ethtool.
13457          */
13458         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13459                 dev->features |= NETIF_F_TSO;
13460                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13461                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13462                         dev->features |= NETIF_F_TSO6;
13463                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13464                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13465                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13466                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13467                         dev->features |= NETIF_F_TSO_ECN;
13468         }
13469
13470
13471         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13472             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13473             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13474                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13475                 tp->rx_pending = 63;
13476         }
13477
13478         err = tg3_get_device_address(tp);
13479         if (err) {
13480                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13481                        "aborting.\n");
13482                 goto err_out_iounmap;
13483         }
13484
13485         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13486                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13487                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13488                                "base address for APE, aborting.\n");
13489                         err = -ENODEV;
13490                         goto err_out_iounmap;
13491                 }
13492
13493                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13494                 if (!tp->aperegs) {
13495                         printk(KERN_ERR PFX "Cannot map APE registers, "
13496                                "aborting.\n");
13497                         err = -ENOMEM;
13498                         goto err_out_iounmap;
13499                 }
13500
13501                 tg3_ape_lock_init(tp);
13502         }
13503
13504         /*
13505          * Reset chip in case UNDI or EFI driver did not shutdown
13506          * DMA self test will enable WDMAC and we'll see (spurious)
13507          * pending DMA on the PCI bus at that point.
13508          */
13509         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13510             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13511                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13512                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13513         }
13514
13515         err = tg3_test_dma(tp);
13516         if (err) {
13517                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13518                 goto err_out_apeunmap;
13519         }
13520
13521         /* Tigon3 can do ipv4 only... and some chips have buggy
13522          * checksumming.
13523          */
13524         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13525                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13526                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13527                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13528                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13529                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13530                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13531                         dev->features |= NETIF_F_IPV6_CSUM;
13532
13533                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13534         } else
13535                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13536
13537         /* flow control autonegotiation is default behavior */
13538         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13539         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13540
13541         tg3_init_coal(tp);
13542
13543         pci_set_drvdata(pdev, dev);
13544
13545         err = register_netdev(dev);
13546         if (err) {
13547                 printk(KERN_ERR PFX "Cannot register net device, "
13548                        "aborting.\n");
13549                 goto err_out_apeunmap;
13550         }
13551
13552         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13553                "(%s) %s Ethernet %pM\n",
13554                dev->name,
13555                tp->board_part_number,
13556                tp->pci_chip_rev_id,
13557                tg3_phy_string(tp),
13558                tg3_bus_string(tp, str),
13559                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13560                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13561                  "10/100/1000Base-T")),
13562                dev->dev_addr);
13563
13564         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13565                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13566                dev->name,
13567                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13568                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13569                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13570                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13571                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13572                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13573         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13574                dev->name, tp->dma_rwctrl,
13575                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13576                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13577
13578         return 0;
13579
13580 err_out_apeunmap:
13581         if (tp->aperegs) {
13582                 iounmap(tp->aperegs);
13583                 tp->aperegs = NULL;
13584         }
13585
13586 err_out_iounmap:
13587         if (tp->regs) {
13588                 iounmap(tp->regs);
13589                 tp->regs = NULL;
13590         }
13591
13592 err_out_free_dev:
13593         free_netdev(dev);
13594
13595 err_out_free_res:
13596         pci_release_regions(pdev);
13597
13598 err_out_disable_pdev:
13599         pci_disable_device(pdev);
13600         pci_set_drvdata(pdev, NULL);
13601         return err;
13602 }
13603
13604 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13605 {
13606         struct net_device *dev = pci_get_drvdata(pdev);
13607
13608         if (dev) {
13609                 struct tg3 *tp = netdev_priv(dev);
13610
13611                 flush_scheduled_work();
13612
13613                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13614                         tg3_phy_fini(tp);
13615                         tg3_mdio_fini(tp);
13616                 }
13617
13618                 unregister_netdev(dev);
13619                 if (tp->aperegs) {
13620                         iounmap(tp->aperegs);
13621                         tp->aperegs = NULL;
13622                 }
13623                 if (tp->regs) {
13624                         iounmap(tp->regs);
13625                         tp->regs = NULL;
13626                 }
13627                 free_netdev(dev);
13628                 pci_release_regions(pdev);
13629                 pci_disable_device(pdev);
13630                 pci_set_drvdata(pdev, NULL);
13631         }
13632 }
13633
13634 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13635 {
13636         struct net_device *dev = pci_get_drvdata(pdev);
13637         struct tg3 *tp = netdev_priv(dev);
13638         pci_power_t target_state;
13639         int err;
13640
13641         /* PCI register 4 needs to be saved whether netif_running() or not.
13642          * MSI address and data need to be saved if using MSI and
13643          * netif_running().
13644          */
13645         pci_save_state(pdev);
13646
13647         if (!netif_running(dev))
13648                 return 0;
13649
13650         flush_scheduled_work();
13651         tg3_phy_stop(tp);
13652         tg3_netif_stop(tp);
13653
13654         del_timer_sync(&tp->timer);
13655
13656         tg3_full_lock(tp, 1);
13657         tg3_disable_ints(tp);
13658         tg3_full_unlock(tp);
13659
13660         netif_device_detach(dev);
13661
13662         tg3_full_lock(tp, 0);
13663         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13664         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13665         tg3_full_unlock(tp);
13666
13667         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13668
13669         err = tg3_set_power_state(tp, target_state);
13670         if (err) {
13671                 int err2;
13672
13673                 tg3_full_lock(tp, 0);
13674
13675                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13676                 err2 = tg3_restart_hw(tp, 1);
13677                 if (err2)
13678                         goto out;
13679
13680                 tp->timer.expires = jiffies + tp->timer_offset;
13681                 add_timer(&tp->timer);
13682
13683                 netif_device_attach(dev);
13684                 tg3_netif_start(tp);
13685
13686 out:
13687                 tg3_full_unlock(tp);
13688
13689                 if (!err2)
13690                         tg3_phy_start(tp);
13691         }
13692
13693         return err;
13694 }
13695
13696 static int tg3_resume(struct pci_dev *pdev)
13697 {
13698         struct net_device *dev = pci_get_drvdata(pdev);
13699         struct tg3 *tp = netdev_priv(dev);
13700         int err;
13701
13702         pci_restore_state(tp->pdev);
13703
13704         if (!netif_running(dev))
13705                 return 0;
13706
13707         err = tg3_set_power_state(tp, PCI_D0);
13708         if (err)
13709                 return err;
13710
13711         netif_device_attach(dev);
13712
13713         tg3_full_lock(tp, 0);
13714
13715         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13716         err = tg3_restart_hw(tp, 1);
13717         if (err)
13718                 goto out;
13719
13720         tp->timer.expires = jiffies + tp->timer_offset;
13721         add_timer(&tp->timer);
13722
13723         tg3_netif_start(tp);
13724
13725 out:
13726         tg3_full_unlock(tp);
13727
13728         if (!err)
13729                 tg3_phy_start(tp);
13730
13731         return err;
13732 }
13733
13734 static struct pci_driver tg3_driver = {
13735         .name           = DRV_MODULE_NAME,
13736         .id_table       = tg3_pci_tbl,
13737         .probe          = tg3_init_one,
13738         .remove         = __devexit_p(tg3_remove_one),
13739         .suspend        = tg3_suspend,
13740         .resume         = tg3_resume
13741 };
13742
13743 static int __init tg3_init(void)
13744 {
13745         return pci_register_driver(&tg3_driver);
13746 }
13747
13748 static void __exit tg3_cleanup(void)
13749 {
13750         pci_unregister_driver(&tg3_driver);
13751 }
13752
13753 module_init(tg3_init);
13754 module_exit(tg3_cleanup);