]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
tg3: Qualify use of tp->pcix_cap
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.95"
73 #define DRV_MODULE_RELDATE      "November 3, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 #define TG3_RAW_IP_ALIGN 2
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
216         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
217         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
220         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
221         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
222         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
223         {}
224 };
225
226 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
228 static const struct {
229         const char string[ETH_GSTRING_LEN];
230 } ethtool_stats_keys[TG3_NUM_STATS] = {
231         { "rx_octets" },
232         { "rx_fragments" },
233         { "rx_ucast_packets" },
234         { "rx_mcast_packets" },
235         { "rx_bcast_packets" },
236         { "rx_fcs_errors" },
237         { "rx_align_errors" },
238         { "rx_xon_pause_rcvd" },
239         { "rx_xoff_pause_rcvd" },
240         { "rx_mac_ctrl_rcvd" },
241         { "rx_xoff_entered" },
242         { "rx_frame_too_long_errors" },
243         { "rx_jabbers" },
244         { "rx_undersize_packets" },
245         { "rx_in_length_errors" },
246         { "rx_out_length_errors" },
247         { "rx_64_or_less_octet_packets" },
248         { "rx_65_to_127_octet_packets" },
249         { "rx_128_to_255_octet_packets" },
250         { "rx_256_to_511_octet_packets" },
251         { "rx_512_to_1023_octet_packets" },
252         { "rx_1024_to_1522_octet_packets" },
253         { "rx_1523_to_2047_octet_packets" },
254         { "rx_2048_to_4095_octet_packets" },
255         { "rx_4096_to_8191_octet_packets" },
256         { "rx_8192_to_9022_octet_packets" },
257
258         { "tx_octets" },
259         { "tx_collisions" },
260
261         { "tx_xon_sent" },
262         { "tx_xoff_sent" },
263         { "tx_flow_control" },
264         { "tx_mac_errors" },
265         { "tx_single_collisions" },
266         { "tx_mult_collisions" },
267         { "tx_deferred" },
268         { "tx_excessive_collisions" },
269         { "tx_late_collisions" },
270         { "tx_collide_2times" },
271         { "tx_collide_3times" },
272         { "tx_collide_4times" },
273         { "tx_collide_5times" },
274         { "tx_collide_6times" },
275         { "tx_collide_7times" },
276         { "tx_collide_8times" },
277         { "tx_collide_9times" },
278         { "tx_collide_10times" },
279         { "tx_collide_11times" },
280         { "tx_collide_12times" },
281         { "tx_collide_13times" },
282         { "tx_collide_14times" },
283         { "tx_collide_15times" },
284         { "tx_ucast_packets" },
285         { "tx_mcast_packets" },
286         { "tx_bcast_packets" },
287         { "tx_carrier_sense_errors" },
288         { "tx_discards" },
289         { "tx_errors" },
290
291         { "dma_writeq_full" },
292         { "dma_write_prioq_full" },
293         { "rxbds_empty" },
294         { "rx_discards" },
295         { "rx_errors" },
296         { "rx_threshold_hit" },
297
298         { "dma_readq_full" },
299         { "dma_read_prioq_full" },
300         { "tx_comp_queue_full" },
301
302         { "ring_set_send_prod_index" },
303         { "ring_status_update" },
304         { "nic_irqs" },
305         { "nic_avoided_irqs" },
306         { "nic_tx_threshold_hit" }
307 };
308
309 static const struct {
310         const char string[ETH_GSTRING_LEN];
311 } ethtool_test_keys[TG3_NUM_TEST] = {
312         { "nvram test     (online) " },
313         { "link test      (online) " },
314         { "register test  (offline)" },
315         { "memory test    (offline)" },
316         { "loopback test  (offline)" },
317         { "interrupt test (offline)" },
318 };
319
320 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->regs + off);
323 }
324
325 static u32 tg3_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->regs + off));
328 }
329
330 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
331 {
332         writel(val, tp->aperegs + off);
333 }
334
335 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
336 {
337         return (readl(tp->aperegs + off));
338 }
339
340 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         unsigned long flags;
343
344         spin_lock_irqsave(&tp->indirect_lock, flags);
345         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347         spin_unlock_irqrestore(&tp->indirect_lock, flags);
348 }
349
350 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
351 {
352         writel(val, tp->regs + off);
353         readl(tp->regs + off);
354 }
355
356 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
357 {
358         unsigned long flags;
359         u32 val;
360
361         spin_lock_irqsave(&tp->indirect_lock, flags);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
363         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
364         spin_unlock_irqrestore(&tp->indirect_lock, flags);
365         return val;
366 }
367
368 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
369 {
370         unsigned long flags;
371
372         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
373                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
374                                        TG3_64BIT_REG_LOW, val);
375                 return;
376         }
377         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
378                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
379                                        TG3_64BIT_REG_LOW, val);
380                 return;
381         }
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
385         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
386         spin_unlock_irqrestore(&tp->indirect_lock, flags);
387
388         /* In indirect mode when disabling interrupts, we also need
389          * to clear the interrupt bit in the GRC local ctrl register.
390          */
391         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
392             (val == 0x1)) {
393                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
394                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
395         }
396 }
397
398 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
399 {
400         unsigned long flags;
401         u32 val;
402
403         spin_lock_irqsave(&tp->indirect_lock, flags);
404         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
406         spin_unlock_irqrestore(&tp->indirect_lock, flags);
407         return val;
408 }
409
410 /* usec_wait specifies the wait time in usec when writing to certain registers
411  * where it is unsafe to read back the register without some delay.
412  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
413  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
414  */
415 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
416 {
417         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
418             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 /* Non-posted methods */
420                 tp->write32(tp, off, val);
421         else {
422                 /* Posted method */
423                 tg3_write32(tp, off, val);
424                 if (usec_wait)
425                         udelay(usec_wait);
426                 tp->read32(tp, off);
427         }
428         /* Wait again after the read for the posted method to guarantee that
429          * the wait time is met.
430          */
431         if (usec_wait)
432                 udelay(usec_wait);
433 }
434
435 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
436 {
437         tp->write32_mbox(tp, off, val);
438         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
439             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440                 tp->read32_mbox(tp, off);
441 }
442
443 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
444 {
445         void __iomem *mbox = tp->regs + off;
446         writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
448                 writel(val, mbox);
449         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
450                 readl(mbox);
451 }
452
453 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
454 {
455         return (readl(tp->regs + off + GRCMBOX_BASE));
456 }
457
458 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
459 {
460         writel(val, tp->regs + off + GRCMBOX_BASE);
461 }
462
463 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
464 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
465 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
466 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
467 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
468
469 #define tw32(reg,val)           tp->write32(tp, reg, val)
470 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
471 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
472 #define tr32(reg)               tp->read32(tp, reg)
473
474 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480                 return;
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
484                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
486
487                 /* Always leave this as zero. */
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489         } else {
490                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
491                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
492
493                 /* Always leave this as zero. */
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
495         }
496         spin_unlock_irqrestore(&tp->indirect_lock, flags);
497 }
498
499 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
500 {
501         unsigned long flags;
502
503         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
504             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
505                 *val = 0;
506                 return;
507         }
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
512                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
513
514                 /* Always leave this as zero. */
515                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
516         } else {
517                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
518                 *val = tr32(TG3PCI_MEM_WIN_DATA);
519
520                 /* Always leave this as zero. */
521                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
522         }
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_ape_lock_init(struct tg3 *tp)
527 {
528         int i;
529
530         /* Make sure the driver hasn't any stale locks. */
531         for (i = 0; i < 8; i++)
532                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
533                                 APE_LOCK_GRANT_DRIVER);
534 }
535
536 static int tg3_ape_lock(struct tg3 *tp, int locknum)
537 {
538         int i, off;
539         int ret = 0;
540         u32 status;
541
542         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
543                 return 0;
544
545         switch (locknum) {
546                 case TG3_APE_LOCK_GRC:
547                 case TG3_APE_LOCK_MEM:
548                         break;
549                 default:
550                         return -EINVAL;
551         }
552
553         off = 4 * locknum;
554
555         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
556
557         /* Wait for up to 1 millisecond to acquire lock. */
558         for (i = 0; i < 100; i++) {
559                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
560                 if (status == APE_LOCK_GRANT_DRIVER)
561                         break;
562                 udelay(10);
563         }
564
565         if (status != APE_LOCK_GRANT_DRIVER) {
566                 /* Revoke the lock request. */
567                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
568                                 APE_LOCK_GRANT_DRIVER);
569
570                 ret = -EBUSY;
571         }
572
573         return ret;
574 }
575
576 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
577 {
578         int off;
579
580         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
581                 return;
582
583         switch (locknum) {
584                 case TG3_APE_LOCK_GRC:
585                 case TG3_APE_LOCK_MEM:
586                         break;
587                 default:
588                         return;
589         }
590
591         off = 4 * locknum;
592         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593 }
594
595 static void tg3_disable_ints(struct tg3 *tp)
596 {
597         tw32(TG3PCI_MISC_HOST_CTRL,
598              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
599         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
600 }
601
602 static inline void tg3_cond_int(struct tg3 *tp)
603 {
604         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605             (tp->hw_status->status & SD_STATUS_UPDATED))
606                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
607         else
608                 tw32(HOSTCC_MODE, tp->coalesce_mode |
609                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
610 }
611
612 static void tg3_enable_ints(struct tg3 *tp)
613 {
614         tp->irq_sync = 0;
615         wmb();
616
617         tw32(TG3PCI_MISC_HOST_CTRL,
618              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
619         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
620                        (tp->last_tag << 24));
621         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
622                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
623                                (tp->last_tag << 24));
624         tg3_cond_int(tp);
625 }
626
627 static inline unsigned int tg3_has_work(struct tg3 *tp)
628 {
629         struct tg3_hw_status *sblk = tp->hw_status;
630         unsigned int work_exists = 0;
631
632         /* check for phy events */
633         if (!(tp->tg3_flags &
634               (TG3_FLAG_USE_LINKCHG_REG |
635                TG3_FLAG_POLL_SERDES))) {
636                 if (sblk->status & SD_STATUS_LINK_CHG)
637                         work_exists = 1;
638         }
639         /* check for RX/TX work to do */
640         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
641             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
642                 work_exists = 1;
643
644         return work_exists;
645 }
646
647 /* tg3_restart_ints
648  *  similar to tg3_enable_ints, but it accurately determines whether there
649  *  is new work pending and can return without flushing the PIO write
650  *  which reenables interrupts
651  */
652 static void tg3_restart_ints(struct tg3 *tp)
653 {
654         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
655                      tp->last_tag << 24);
656         mmiowb();
657
658         /* When doing tagged status, this work check is unnecessary.
659          * The last_tag we write above tells the chip which piece of
660          * work we've completed.
661          */
662         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663             tg3_has_work(tp))
664                 tw32(HOSTCC_MODE, tp->coalesce_mode |
665                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
666 }
667
668 static inline void tg3_netif_stop(struct tg3 *tp)
669 {
670         tp->dev->trans_start = jiffies; /* prevent tx timeout */
671         napi_disable(&tp->napi);
672         netif_tx_disable(tp->dev);
673 }
674
675 static inline void tg3_netif_start(struct tg3 *tp)
676 {
677         netif_wake_queue(tp->dev);
678         /* NOTE: unconditional netif_wake_queue is only appropriate
679          * so long as all callers are assured to have free tx slots
680          * (such as after tg3_init_hw)
681          */
682         napi_enable(&tp->napi);
683         tp->hw_status->status |= SD_STATUS_UPDATED;
684         tg3_enable_ints(tp);
685 }
686
687 static void tg3_switch_clocks(struct tg3 *tp)
688 {
689         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690         u32 orig_clock_ctrl;
691
692         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
693             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
694                 return;
695
696         orig_clock_ctrl = clock_ctrl;
697         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
698                        CLOCK_CTRL_CLKRUN_OENABLE |
699                        0x1f);
700         tp->pci_clock_ctrl = clock_ctrl;
701
702         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
703                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
704                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
705                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
706                 }
707         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
708                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
709                             clock_ctrl |
710                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
711                             40);
712                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
714                             40);
715         }
716         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
717 }
718
719 #define PHY_BUSY_LOOPS  5000
720
721 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722 {
723         u32 frame_val;
724         unsigned int loops;
725         int ret;
726
727         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
728                 tw32_f(MAC_MI_MODE,
729                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730                 udelay(80);
731         }
732
733         *val = 0x0;
734
735         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
736                       MI_COM_PHY_ADDR_MASK);
737         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
738                       MI_COM_REG_ADDR_MASK);
739         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
740
741         tw32_f(MAC_MI_COM, frame_val);
742
743         loops = PHY_BUSY_LOOPS;
744         while (loops != 0) {
745                 udelay(10);
746                 frame_val = tr32(MAC_MI_COM);
747
748                 if ((frame_val & MI_COM_BUSY) == 0) {
749                         udelay(5);
750                         frame_val = tr32(MAC_MI_COM);
751                         break;
752                 }
753                 loops -= 1;
754         }
755
756         ret = -EBUSY;
757         if (loops != 0) {
758                 *val = frame_val & MI_COM_DATA_MASK;
759                 ret = 0;
760         }
761
762         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
763                 tw32_f(MAC_MI_MODE, tp->mi_mode);
764                 udelay(80);
765         }
766
767         return ret;
768 }
769
770 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771 {
772         u32 frame_val;
773         unsigned int loops;
774         int ret;
775
776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
777             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778                 return 0;
779
780         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
781                 tw32_f(MAC_MI_MODE,
782                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
783                 udelay(80);
784         }
785
786         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
787                       MI_COM_PHY_ADDR_MASK);
788         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
789                       MI_COM_REG_ADDR_MASK);
790         frame_val |= (val & MI_COM_DATA_MASK);
791         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
792
793         tw32_f(MAC_MI_COM, frame_val);
794
795         loops = PHY_BUSY_LOOPS;
796         while (loops != 0) {
797                 udelay(10);
798                 frame_val = tr32(MAC_MI_COM);
799                 if ((frame_val & MI_COM_BUSY) == 0) {
800                         udelay(5);
801                         frame_val = tr32(MAC_MI_COM);
802                         break;
803                 }
804                 loops -= 1;
805         }
806
807         ret = -EBUSY;
808         if (loops != 0)
809                 ret = 0;
810
811         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812                 tw32_f(MAC_MI_MODE, tp->mi_mode);
813                 udelay(80);
814         }
815
816         return ret;
817 }
818
819 static int tg3_bmcr_reset(struct tg3 *tp)
820 {
821         u32 phy_control;
822         int limit, err;
823
824         /* OK, reset it, and poll the BMCR_RESET bit until it
825          * clears or we time out.
826          */
827         phy_control = BMCR_RESET;
828         err = tg3_writephy(tp, MII_BMCR, phy_control);
829         if (err != 0)
830                 return -EBUSY;
831
832         limit = 5000;
833         while (limit--) {
834                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
835                 if (err != 0)
836                         return -EBUSY;
837
838                 if ((phy_control & BMCR_RESET) == 0) {
839                         udelay(40);
840                         break;
841                 }
842                 udelay(10);
843         }
844         if (limit <= 0)
845                 return -EBUSY;
846
847         return 0;
848 }
849
850 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
851 {
852         struct tg3 *tp = (struct tg3 *)bp->priv;
853         u32 val;
854
855         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856                 return -EAGAIN;
857
858         if (tg3_readphy(tp, reg, &val))
859                 return -EIO;
860
861         return val;
862 }
863
864 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
865 {
866         struct tg3 *tp = (struct tg3 *)bp->priv;
867
868         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869                 return -EAGAIN;
870
871         if (tg3_writephy(tp, reg, val))
872                 return -EIO;
873
874         return 0;
875 }
876
877 static int tg3_mdio_reset(struct mii_bus *bp)
878 {
879         return 0;
880 }
881
882 static void tg3_mdio_config_5785(struct tg3 *tp)
883 {
884         u32 val;
885         struct phy_device *phydev;
886
887         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
888         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
889         case TG3_PHY_ID_BCM50610:
890                 val = MAC_PHYCFG2_50610_LED_MODES;
891                 break;
892         case TG3_PHY_ID_BCMAC131:
893                 val = MAC_PHYCFG2_AC131_LED_MODES;
894                 break;
895         case TG3_PHY_ID_RTL8211C:
896                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
897                 break;
898         case TG3_PHY_ID_RTL8201E:
899                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
900                 break;
901         default:
902                 return;
903         }
904
905         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
906                 tw32(MAC_PHYCFG2, val);
907
908                 val = tr32(MAC_PHYCFG1);
909                 val &= ~MAC_PHYCFG1_RGMII_INT;
910                 tw32(MAC_PHYCFG1, val);
911
912                 return;
913         }
914
915         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
916                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
917                        MAC_PHYCFG2_FMODE_MASK_MASK |
918                        MAC_PHYCFG2_GMODE_MASK_MASK |
919                        MAC_PHYCFG2_ACT_MASK_MASK   |
920                        MAC_PHYCFG2_QUAL_MASK_MASK |
921                        MAC_PHYCFG2_INBAND_ENABLE;
922
923         tw32(MAC_PHYCFG2, val);
924
925         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
926                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
927         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
928                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
929                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
930                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
931                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
932         }
933         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
934
935         val = tr32(MAC_EXT_RGMII_MODE);
936         val &= ~(MAC_RGMII_MODE_RX_INT_B |
937                  MAC_RGMII_MODE_RX_QUALITY |
938                  MAC_RGMII_MODE_RX_ACTIVITY |
939                  MAC_RGMII_MODE_RX_ENG_DET |
940                  MAC_RGMII_MODE_TX_ENABLE |
941                  MAC_RGMII_MODE_TX_LOWPWR |
942                  MAC_RGMII_MODE_TX_RESET);
943         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
944                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
945                         val |= MAC_RGMII_MODE_RX_INT_B |
946                                MAC_RGMII_MODE_RX_QUALITY |
947                                MAC_RGMII_MODE_RX_ACTIVITY |
948                                MAC_RGMII_MODE_RX_ENG_DET;
949                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
950                         val |= MAC_RGMII_MODE_TX_ENABLE |
951                                MAC_RGMII_MODE_TX_LOWPWR |
952                                MAC_RGMII_MODE_TX_RESET;
953         }
954         tw32(MAC_EXT_RGMII_MODE, val);
955 }
956
957 static void tg3_mdio_start(struct tg3 *tp)
958 {
959         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
960                 mutex_lock(&tp->mdio_bus->mdio_lock);
961                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
962                 mutex_unlock(&tp->mdio_bus->mdio_lock);
963         }
964
965         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
966         tw32_f(MAC_MI_MODE, tp->mi_mode);
967         udelay(80);
968
969         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
971                 tg3_mdio_config_5785(tp);
972 }
973
974 static void tg3_mdio_stop(struct tg3 *tp)
975 {
976         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
977                 mutex_lock(&tp->mdio_bus->mdio_lock);
978                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
979                 mutex_unlock(&tp->mdio_bus->mdio_lock);
980         }
981 }
982
983 static int tg3_mdio_init(struct tg3 *tp)
984 {
985         int i;
986         u32 reg;
987         struct phy_device *phydev;
988
989         tg3_mdio_start(tp);
990
991         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
992             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993                 return 0;
994
995         tp->mdio_bus = mdiobus_alloc();
996         if (tp->mdio_bus == NULL)
997                 return -ENOMEM;
998
999         tp->mdio_bus->name     = "tg3 mdio bus";
1000         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1001                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1002         tp->mdio_bus->priv     = tp;
1003         tp->mdio_bus->parent   = &tp->pdev->dev;
1004         tp->mdio_bus->read     = &tg3_mdio_read;
1005         tp->mdio_bus->write    = &tg3_mdio_write;
1006         tp->mdio_bus->reset    = &tg3_mdio_reset;
1007         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1008         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1009
1010         for (i = 0; i < PHY_MAX_ADDR; i++)
1011                 tp->mdio_bus->irq[i] = PHY_POLL;
1012
1013         /* The bus registration will look for all the PHYs on the mdio bus.
1014          * Unfortunately, it does not ensure the PHY is powered up before
1015          * accessing the PHY ID registers.  A chip reset is the
1016          * quickest way to bring the device back to an operational state..
1017          */
1018         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1019                 tg3_bmcr_reset(tp);
1020
1021         i = mdiobus_register(tp->mdio_bus);
1022         if (i) {
1023                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1024                         tp->dev->name, i);
1025                 mdiobus_free(tp->mdio_bus);
1026                 return i;
1027         }
1028
1029         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1030
1031         if (!phydev || !phydev->drv) {
1032                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1033                 mdiobus_unregister(tp->mdio_bus);
1034                 mdiobus_free(tp->mdio_bus);
1035                 return -ENODEV;
1036         }
1037
1038         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1039         case TG3_PHY_ID_BCM50610:
1040                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1041                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1042                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1043                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1044                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1045                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1046                 /* fallthru */
1047         case TG3_PHY_ID_RTL8211C:
1048                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1049                 break;
1050         case TG3_PHY_ID_RTL8201E:
1051         case TG3_PHY_ID_BCMAC131:
1052                 phydev->interface = PHY_INTERFACE_MODE_MII;
1053                 break;
1054         }
1055
1056         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1057
1058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059                 tg3_mdio_config_5785(tp);
1060
1061         return 0;
1062 }
1063
1064 static void tg3_mdio_fini(struct tg3 *tp)
1065 {
1066         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1067                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1068                 mdiobus_unregister(tp->mdio_bus);
1069                 mdiobus_free(tp->mdio_bus);
1070                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1071         }
1072 }
1073
1074 /* tp->lock is held. */
1075 static inline void tg3_generate_fw_event(struct tg3 *tp)
1076 {
1077         u32 val;
1078
1079         val = tr32(GRC_RX_CPU_EVENT);
1080         val |= GRC_RX_CPU_DRIVER_EVENT;
1081         tw32_f(GRC_RX_CPU_EVENT, val);
1082
1083         tp->last_event_jiffies = jiffies;
1084 }
1085
1086 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1087
1088 /* tp->lock is held. */
1089 static void tg3_wait_for_event_ack(struct tg3 *tp)
1090 {
1091         int i;
1092         unsigned int delay_cnt;
1093         long time_remain;
1094
1095         /* If enough time has passed, no wait is necessary. */
1096         time_remain = (long)(tp->last_event_jiffies + 1 +
1097                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1098                       (long)jiffies;
1099         if (time_remain < 0)
1100                 return;
1101
1102         /* Check if we can shorten the wait time. */
1103         delay_cnt = jiffies_to_usecs(time_remain);
1104         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1105                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1106         delay_cnt = (delay_cnt >> 3) + 1;
1107
1108         for (i = 0; i < delay_cnt; i++) {
1109                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1110                         break;
1111                 udelay(8);
1112         }
1113 }
1114
1115 /* tp->lock is held. */
1116 static void tg3_ump_link_report(struct tg3 *tp)
1117 {
1118         u32 reg;
1119         u32 val;
1120
1121         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1122             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1123                 return;
1124
1125         tg3_wait_for_event_ack(tp);
1126
1127         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1128
1129         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130
1131         val = 0;
1132         if (!tg3_readphy(tp, MII_BMCR, &reg))
1133                 val = reg << 16;
1134         if (!tg3_readphy(tp, MII_BMSR, &reg))
1135                 val |= (reg & 0xffff);
1136         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137
1138         val = 0;
1139         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1140                 val = reg << 16;
1141         if (!tg3_readphy(tp, MII_LPA, &reg))
1142                 val |= (reg & 0xffff);
1143         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144
1145         val = 0;
1146         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1147                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1148                         val = reg << 16;
1149                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1150                         val |= (reg & 0xffff);
1151         }
1152         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1153
1154         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1155                 val = reg << 16;
1156         else
1157                 val = 0;
1158         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1159
1160         tg3_generate_fw_event(tp);
1161 }
1162
1163 static void tg3_link_report(struct tg3 *tp)
1164 {
1165         if (!netif_carrier_ok(tp->dev)) {
1166                 if (netif_msg_link(tp))
1167                         printk(KERN_INFO PFX "%s: Link is down.\n",
1168                                tp->dev->name);
1169                 tg3_ump_link_report(tp);
1170         } else if (netif_msg_link(tp)) {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX
1181                        "%s: Flow control is %s for TX and %s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1184                        "on" : "off",
1185                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1186                        "on" : "off");
1187                 tg3_ump_link_report(tp);
1188         }
1189 }
1190
1191 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1192 {
1193         u16 miireg;
1194
1195         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1196                 miireg = ADVERTISE_PAUSE_CAP;
1197         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1198                 miireg = ADVERTISE_PAUSE_ASYM;
1199         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1200                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1201         else
1202                 miireg = 0;
1203
1204         return miireg;
1205 }
1206
1207 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1208 {
1209         u16 miireg;
1210
1211         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1212                 miireg = ADVERTISE_1000XPAUSE;
1213         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1214                 miireg = ADVERTISE_1000XPSE_ASYM;
1215         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1216                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1217         else
1218                 miireg = 0;
1219
1220         return miireg;
1221 }
1222
1223 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1224 {
1225         u8 cap = 0;
1226
1227         if (lcladv & ADVERTISE_PAUSE_CAP) {
1228                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1229                         if (rmtadv & LPA_PAUSE_CAP)
1230                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1231                         else if (rmtadv & LPA_PAUSE_ASYM)
1232                                 cap = TG3_FLOW_CTRL_RX;
1233                 } else {
1234                         if (rmtadv & LPA_PAUSE_CAP)
1235                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1236                 }
1237         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1238                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1239                         cap = TG3_FLOW_CTRL_TX;
1240         }
1241
1242         return cap;
1243 }
1244
1245 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1246 {
1247         u8 cap = 0;
1248
1249         if (lcladv & ADVERTISE_1000XPAUSE) {
1250                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1251                         if (rmtadv & LPA_1000XPAUSE)
1252                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1253                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1254                                 cap = TG3_FLOW_CTRL_RX;
1255                 } else {
1256                         if (rmtadv & LPA_1000XPAUSE)
1257                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1258                 }
1259         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1261                         cap = TG3_FLOW_CTRL_TX;
1262         }
1263
1264         return cap;
1265 }
1266
1267 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1268 {
1269         u8 autoneg;
1270         u8 flowctrl = 0;
1271         u32 old_rx_mode = tp->rx_mode;
1272         u32 old_tx_mode = tp->tx_mode;
1273
1274         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1275                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1276         else
1277                 autoneg = tp->link_config.autoneg;
1278
1279         if (autoneg == AUTONEG_ENABLE &&
1280             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1281                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1282                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1283                 else
1284                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1285         } else
1286                 flowctrl = tp->link_config.flowctrl;
1287
1288         tp->link_config.active_flowctrl = flowctrl;
1289
1290         if (flowctrl & TG3_FLOW_CTRL_RX)
1291                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292         else
1293                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294
1295         if (old_rx_mode != tp->rx_mode)
1296                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1297
1298         if (flowctrl & TG3_FLOW_CTRL_TX)
1299                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300         else
1301                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302
1303         if (old_tx_mode != tp->tx_mode)
1304                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1305 }
1306
1307 static void tg3_adjust_link(struct net_device *dev)
1308 {
1309         u8 oldflowctrl, linkmesg = 0;
1310         u32 mac_mode, lcl_adv, rmt_adv;
1311         struct tg3 *tp = netdev_priv(dev);
1312         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1313
1314         spin_lock(&tp->lock);
1315
1316         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1317                                     MAC_MODE_HALF_DUPLEX);
1318
1319         oldflowctrl = tp->link_config.active_flowctrl;
1320
1321         if (phydev->link) {
1322                 lcl_adv = 0;
1323                 rmt_adv = 0;
1324
1325                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1326                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1327                 else
1328                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329
1330                 if (phydev->duplex == DUPLEX_HALF)
1331                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1332                 else {
1333                         lcl_adv = tg3_advert_flowctrl_1000T(
1334                                   tp->link_config.flowctrl);
1335
1336                         if (phydev->pause)
1337                                 rmt_adv = LPA_PAUSE_CAP;
1338                         if (phydev->asym_pause)
1339                                 rmt_adv |= LPA_PAUSE_ASYM;
1340                 }
1341
1342                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343         } else
1344                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345
1346         if (mac_mode != tp->mac_mode) {
1347                 tp->mac_mode = mac_mode;
1348                 tw32_f(MAC_MODE, tp->mac_mode);
1349                 udelay(40);
1350         }
1351
1352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1353                 if (phydev->speed == SPEED_10)
1354                         tw32(MAC_MI_STAT,
1355                              MAC_MI_STAT_10MBPS_MODE |
1356                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357                 else
1358                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359         }
1360
1361         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1362                 tw32(MAC_TX_LENGTHS,
1363                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1364                       (6 << TX_LENGTHS_IPG_SHIFT) |
1365                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366         else
1367                 tw32(MAC_TX_LENGTHS,
1368                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1369                       (6 << TX_LENGTHS_IPG_SHIFT) |
1370                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371
1372         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1373             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1374             phydev->speed != tp->link_config.active_speed ||
1375             phydev->duplex != tp->link_config.active_duplex ||
1376             oldflowctrl != tp->link_config.active_flowctrl)
1377             linkmesg = 1;
1378
1379         tp->link_config.active_speed = phydev->speed;
1380         tp->link_config.active_duplex = phydev->duplex;
1381
1382         spin_unlock(&tp->lock);
1383
1384         if (linkmesg)
1385                 tg3_link_report(tp);
1386 }
1387
1388 static int tg3_phy_init(struct tg3 *tp)
1389 {
1390         struct phy_device *phydev;
1391
1392         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393                 return 0;
1394
1395         /* Bring the PHY back to a known state. */
1396         tg3_bmcr_reset(tp);
1397
1398         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1399
1400         /* Attach the MAC to the PHY. */
1401         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1402                              phydev->dev_flags, phydev->interface);
1403         if (IS_ERR(phydev)) {
1404                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1405                 return PTR_ERR(phydev);
1406         }
1407
1408         /* Mask with MAC supported features. */
1409         switch (phydev->interface) {
1410         case PHY_INTERFACE_MODE_GMII:
1411         case PHY_INTERFACE_MODE_RGMII:
1412                 phydev->supported &= (PHY_GBIT_FEATURES |
1413                                       SUPPORTED_Pause |
1414                                       SUPPORTED_Asym_Pause);
1415                 break;
1416         case PHY_INTERFACE_MODE_MII:
1417                 phydev->supported &= (PHY_BASIC_FEATURES |
1418                                       SUPPORTED_Pause |
1419                                       SUPPORTED_Asym_Pause);
1420                 break;
1421         default:
1422                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423                 return -EINVAL;
1424         }
1425
1426         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1427
1428         phydev->advertising = phydev->supported;
1429
1430         return 0;
1431 }
1432
1433 static void tg3_phy_start(struct tg3 *tp)
1434 {
1435         struct phy_device *phydev;
1436
1437         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438                 return;
1439
1440         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1441
1442         if (tp->link_config.phy_is_low_power) {
1443                 tp->link_config.phy_is_low_power = 0;
1444                 phydev->speed = tp->link_config.orig_speed;
1445                 phydev->duplex = tp->link_config.orig_duplex;
1446                 phydev->autoneg = tp->link_config.orig_autoneg;
1447                 phydev->advertising = tp->link_config.orig_advertising;
1448         }
1449
1450         phy_start(phydev);
1451
1452         phy_start_aneg(phydev);
1453 }
1454
1455 static void tg3_phy_stop(struct tg3 *tp)
1456 {
1457         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458                 return;
1459
1460         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1461 }
1462
1463 static void tg3_phy_fini(struct tg3 *tp)
1464 {
1465         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1466                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1467                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468         }
1469 }
1470
1471 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472 {
1473         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475 }
1476
1477 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1478 {
1479         u32 phy;
1480
1481         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1482             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1483                 return;
1484
1485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1486                 u32 ephy;
1487
1488                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1489                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1490                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1491                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1492                                 if (enable)
1493                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494                                 else
1495                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1496                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1497                         }
1498                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1499                 }
1500         } else {
1501                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1502                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1503                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1504                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1505                         if (enable)
1506                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507                         else
1508                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1509                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1510                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1511                 }
1512         }
1513 }
1514
1515 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1516 {
1517         u32 val;
1518
1519         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1520                 return;
1521
1522         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1523             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1524                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1525                              (val | (1 << 15) | (1 << 4)));
1526 }
1527
1528 static void tg3_phy_apply_otp(struct tg3 *tp)
1529 {
1530         u32 otp, phy;
1531
1532         if (!tp->phy_otp)
1533                 return;
1534
1535         otp = tp->phy_otp;
1536
1537         /* Enable SM_DSP clock and tx 6dB coding. */
1538         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1539               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1540               MII_TG3_AUXCTL_ACTL_TX_6DB;
1541         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1542
1543         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1544         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1545         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1546
1547         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1548               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1549         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1550
1551         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1552         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1553         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1554
1555         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1556         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1557
1558         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1559         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1560
1561         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1562               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1563         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1564
1565         /* Turn off SM_DSP clock. */
1566         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1567               MII_TG3_AUXCTL_ACTL_TX_6DB;
1568         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1569 }
1570
1571 static int tg3_wait_macro_done(struct tg3 *tp)
1572 {
1573         int limit = 100;
1574
1575         while (limit--) {
1576                 u32 tmp32;
1577
1578                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1579                         if ((tmp32 & 0x1000) == 0)
1580                                 break;
1581                 }
1582         }
1583         if (limit <= 0)
1584                 return -EBUSY;
1585
1586         return 0;
1587 }
1588
1589 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1590 {
1591         static const u32 test_pat[4][6] = {
1592         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1593         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1594         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1595         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1596         };
1597         int chan;
1598
1599         for (chan = 0; chan < 4; chan++) {
1600                 int i;
1601
1602                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1603                              (chan * 0x2000) | 0x0200);
1604                 tg3_writephy(tp, 0x16, 0x0002);
1605
1606                 for (i = 0; i < 6; i++)
1607                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1608                                      test_pat[chan][i]);
1609
1610                 tg3_writephy(tp, 0x16, 0x0202);
1611                 if (tg3_wait_macro_done(tp)) {
1612                         *resetp = 1;
1613                         return -EBUSY;
1614                 }
1615
1616                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1617                              (chan * 0x2000) | 0x0200);
1618                 tg3_writephy(tp, 0x16, 0x0082);
1619                 if (tg3_wait_macro_done(tp)) {
1620                         *resetp = 1;
1621                         return -EBUSY;
1622                 }
1623
1624                 tg3_writephy(tp, 0x16, 0x0802);
1625                 if (tg3_wait_macro_done(tp)) {
1626                         *resetp = 1;
1627                         return -EBUSY;
1628                 }
1629
1630                 for (i = 0; i < 6; i += 2) {
1631                         u32 low, high;
1632
1633                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1634                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1635                             tg3_wait_macro_done(tp)) {
1636                                 *resetp = 1;
1637                                 return -EBUSY;
1638                         }
1639                         low &= 0x7fff;
1640                         high &= 0x000f;
1641                         if (low != test_pat[chan][i] ||
1642                             high != test_pat[chan][i+1]) {
1643                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1644                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1645                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1646
1647                                 return -EBUSY;
1648                         }
1649                 }
1650         }
1651
1652         return 0;
1653 }
1654
1655 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1656 {
1657         int chan;
1658
1659         for (chan = 0; chan < 4; chan++) {
1660                 int i;
1661
1662                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1663                              (chan * 0x2000) | 0x0200);
1664                 tg3_writephy(tp, 0x16, 0x0002);
1665                 for (i = 0; i < 6; i++)
1666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1667                 tg3_writephy(tp, 0x16, 0x0202);
1668                 if (tg3_wait_macro_done(tp))
1669                         return -EBUSY;
1670         }
1671
1672         return 0;
1673 }
1674
1675 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1676 {
1677         u32 reg32, phy9_orig;
1678         int retries, do_phy_reset, err;
1679
1680         retries = 10;
1681         do_phy_reset = 1;
1682         do {
1683                 if (do_phy_reset) {
1684                         err = tg3_bmcr_reset(tp);
1685                         if (err)
1686                                 return err;
1687                         do_phy_reset = 0;
1688                 }
1689
1690                 /* Disable transmitter and interrupt.  */
1691                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1692                         continue;
1693
1694                 reg32 |= 0x3000;
1695                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1696
1697                 /* Set full-duplex, 1000 mbps.  */
1698                 tg3_writephy(tp, MII_BMCR,
1699                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1700
1701                 /* Set to master mode.  */
1702                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1703                         continue;
1704
1705                 tg3_writephy(tp, MII_TG3_CTRL,
1706                              (MII_TG3_CTRL_AS_MASTER |
1707                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1708
1709                 /* Enable SM_DSP_CLOCK and 6dB.  */
1710                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1711
1712                 /* Block the PHY control access.  */
1713                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1714                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1715
1716                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1717                 if (!err)
1718                         break;
1719         } while (--retries);
1720
1721         err = tg3_phy_reset_chanpat(tp);
1722         if (err)
1723                 return err;
1724
1725         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1726         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1727
1728         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1729         tg3_writephy(tp, 0x16, 0x0000);
1730
1731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1733                 /* Set Extended packet length bit for jumbo frames */
1734                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1735         }
1736         else {
1737                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1738         }
1739
1740         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1741
1742         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1743                 reg32 &= ~0x3000;
1744                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1745         } else if (!err)
1746                 err = -EBUSY;
1747
1748         return err;
1749 }
1750
1751 /* This will reset the tigon3 PHY if there is no valid
1752  * link unless the FORCE argument is non-zero.
1753  */
1754 static int tg3_phy_reset(struct tg3 *tp)
1755 {
1756         u32 cpmuctrl;
1757         u32 phy_status;
1758         int err;
1759
1760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1761                 u32 val;
1762
1763                 val = tr32(GRC_MISC_CFG);
1764                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1765                 udelay(40);
1766         }
1767         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1768         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1769         if (err != 0)
1770                 return -EBUSY;
1771
1772         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1773                 netif_carrier_off(tp->dev);
1774                 tg3_link_report(tp);
1775         }
1776
1777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1778             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1780                 err = tg3_phy_reset_5703_4_5(tp);
1781                 if (err)
1782                         return err;
1783                 goto out;
1784         }
1785
1786         cpmuctrl = 0;
1787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1788             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1789                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1790                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1791                         tw32(TG3_CPMU_CTRL,
1792                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1793         }
1794
1795         err = tg3_bmcr_reset(tp);
1796         if (err)
1797                 return err;
1798
1799         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1800                 u32 phy;
1801
1802                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1803                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1804
1805                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1806         }
1807
1808         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1809             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1810                 u32 val;
1811
1812                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1813                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1814                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1815                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1816                         udelay(40);
1817                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1818                 }
1819
1820                 /* Disable GPHY autopowerdown. */
1821                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1822                              MII_TG3_MISC_SHDW_WREN |
1823                              MII_TG3_MISC_SHDW_APD_SEL |
1824                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1825         }
1826
1827         tg3_phy_apply_otp(tp);
1828
1829 out:
1830         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1831                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1832                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1833                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1834                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1835                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1836                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1837         }
1838         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1839                 tg3_writephy(tp, 0x1c, 0x8d68);
1840                 tg3_writephy(tp, 0x1c, 0x8d68);
1841         }
1842         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1844                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1845                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1847                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1848                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1849                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1851         }
1852         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1853                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1855                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1856                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1857                         tg3_writephy(tp, MII_TG3_TEST1,
1858                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1859                 } else
1860                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1862         }
1863         /* Set Extended packet length bit (bit 14) on all chips that */
1864         /* support jumbo frames */
1865         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1866                 /* Cannot do read-modify-write on 5401 */
1867                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1868         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1869                 u32 phy_reg;
1870
1871                 /* Set bit 14 with read-modify-write to preserve other bits */
1872                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1873                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1874                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1875         }
1876
1877         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1878          * jumbo frames transmission.
1879          */
1880         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1881                 u32 phy_reg;
1882
1883                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1884                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1885                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1886         }
1887
1888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1889                 /* adjust output voltage */
1890                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1891         }
1892
1893         tg3_phy_toggle_automdix(tp, 1);
1894         tg3_phy_set_wirespeed(tp);
1895         return 0;
1896 }
1897
1898 static void tg3_frob_aux_power(struct tg3 *tp)
1899 {
1900         struct tg3 *tp_peer = tp;
1901
1902         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1903                 return;
1904
1905         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1906             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1907                 struct net_device *dev_peer;
1908
1909                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1910                 /* remove_one() may have been run on the peer. */
1911                 if (!dev_peer)
1912                         tp_peer = tp;
1913                 else
1914                         tp_peer = netdev_priv(dev_peer);
1915         }
1916
1917         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1919             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1920             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1923                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1924                                     (GRC_LCLCTRL_GPIO_OE0 |
1925                                      GRC_LCLCTRL_GPIO_OE1 |
1926                                      GRC_LCLCTRL_GPIO_OE2 |
1927                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1928                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1929                                     100);
1930                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1931                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1932                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1933                                              GRC_LCLCTRL_GPIO_OE1 |
1934                                              GRC_LCLCTRL_GPIO_OE2 |
1935                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1936                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1937                                              tp->grc_local_ctrl;
1938                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1939
1940                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1941                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1942
1943                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1944                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1945                 } else {
1946                         u32 no_gpio2;
1947                         u32 grc_local_ctrl = 0;
1948
1949                         if (tp_peer != tp &&
1950                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1951                                 return;
1952
1953                         /* Workaround to prevent overdrawing Amps. */
1954                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1955                             ASIC_REV_5714) {
1956                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1957                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1958                                             grc_local_ctrl, 100);
1959                         }
1960
1961                         /* On 5753 and variants, GPIO2 cannot be used. */
1962                         no_gpio2 = tp->nic_sram_data_cfg &
1963                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1964
1965                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1966                                          GRC_LCLCTRL_GPIO_OE1 |
1967                                          GRC_LCLCTRL_GPIO_OE2 |
1968                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1969                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1970                         if (no_gpio2) {
1971                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1972                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1973                         }
1974                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1975                                                     grc_local_ctrl, 100);
1976
1977                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1978
1979                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1980                                                     grc_local_ctrl, 100);
1981
1982                         if (!no_gpio2) {
1983                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1984                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1985                                             grc_local_ctrl, 100);
1986                         }
1987                 }
1988         } else {
1989                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1990                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1991                         if (tp_peer != tp &&
1992                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1993                                 return;
1994
1995                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1996                                     (GRC_LCLCTRL_GPIO_OE1 |
1997                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1998
1999                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2000                                     GRC_LCLCTRL_GPIO_OE1, 100);
2001
2002                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2003                                     (GRC_LCLCTRL_GPIO_OE1 |
2004                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2005                 }
2006         }
2007 }
2008
2009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2010 {
2011         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2012                 return 1;
2013         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2014                 if (speed != SPEED_10)
2015                         return 1;
2016         } else if (speed == SPEED_10)
2017                 return 1;
2018
2019         return 0;
2020 }
2021
2022 static int tg3_setup_phy(struct tg3 *, int);
2023
2024 #define RESET_KIND_SHUTDOWN     0
2025 #define RESET_KIND_INIT         1
2026 #define RESET_KIND_SUSPEND      2
2027
2028 static void tg3_write_sig_post_reset(struct tg3 *, int);
2029 static int tg3_halt_cpu(struct tg3 *, u32);
2030 static int tg3_nvram_lock(struct tg3 *);
2031 static void tg3_nvram_unlock(struct tg3 *);
2032
2033 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2034 {
2035         u32 val;
2036
2037         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2038                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2039                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2040                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2041
2042                         sg_dig_ctrl |=
2043                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2044                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2045                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2046                 }
2047                 return;
2048         }
2049
2050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2051                 tg3_bmcr_reset(tp);
2052                 val = tr32(GRC_MISC_CFG);
2053                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2054                 udelay(40);
2055                 return;
2056         } else if (do_low_power) {
2057                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2058                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2059
2060                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2061                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2062                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2063                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2064                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2065         }
2066
2067         /* The PHY should not be powered down on some chips because
2068          * of bugs.
2069          */
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2071             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2072             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2073              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2074                 return;
2075
2076         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2077             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2078                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2079                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2080                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2081                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2082         }
2083
2084         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2085 }
2086
2087 /* tp->lock is held. */
2088 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2089 {
2090         u32 addr_high, addr_low;
2091         int i;
2092
2093         addr_high = ((tp->dev->dev_addr[0] << 8) |
2094                      tp->dev->dev_addr[1]);
2095         addr_low = ((tp->dev->dev_addr[2] << 24) |
2096                     (tp->dev->dev_addr[3] << 16) |
2097                     (tp->dev->dev_addr[4] <<  8) |
2098                     (tp->dev->dev_addr[5] <<  0));
2099         for (i = 0; i < 4; i++) {
2100                 if (i == 1 && skip_mac_1)
2101                         continue;
2102                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2103                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2104         }
2105
2106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2107             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2108                 for (i = 0; i < 12; i++) {
2109                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2110                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2111                 }
2112         }
2113
2114         addr_high = (tp->dev->dev_addr[0] +
2115                      tp->dev->dev_addr[1] +
2116                      tp->dev->dev_addr[2] +
2117                      tp->dev->dev_addr[3] +
2118                      tp->dev->dev_addr[4] +
2119                      tp->dev->dev_addr[5]) &
2120                 TX_BACKOFF_SEED_MASK;
2121         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2122 }
2123
2124 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2125 {
2126         u32 misc_host_ctrl;
2127         bool device_should_wake, do_low_power;
2128
2129         /* Make sure register accesses (indirect or otherwise)
2130          * will function correctly.
2131          */
2132         pci_write_config_dword(tp->pdev,
2133                                TG3PCI_MISC_HOST_CTRL,
2134                                tp->misc_host_ctrl);
2135
2136         switch (state) {
2137         case PCI_D0:
2138                 pci_enable_wake(tp->pdev, state, false);
2139                 pci_set_power_state(tp->pdev, PCI_D0);
2140
2141                 /* Switch out of Vaux if it is a NIC */
2142                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2143                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2144
2145                 return 0;
2146
2147         case PCI_D1:
2148         case PCI_D2:
2149         case PCI_D3hot:
2150                 break;
2151
2152         default:
2153                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2154                         tp->dev->name, state);
2155                 return -EINVAL;
2156         }
2157         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2158         tw32(TG3PCI_MISC_HOST_CTRL,
2159              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2160
2161         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2162                              device_may_wakeup(&tp->pdev->dev) &&
2163                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2164
2165         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2166                 do_low_power = false;
2167                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2168                     !tp->link_config.phy_is_low_power) {
2169                         struct phy_device *phydev;
2170                         u32 phyid, advertising;
2171
2172                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2173
2174                         tp->link_config.phy_is_low_power = 1;
2175
2176                         tp->link_config.orig_speed = phydev->speed;
2177                         tp->link_config.orig_duplex = phydev->duplex;
2178                         tp->link_config.orig_autoneg = phydev->autoneg;
2179                         tp->link_config.orig_advertising = phydev->advertising;
2180
2181                         advertising = ADVERTISED_TP |
2182                                       ADVERTISED_Pause |
2183                                       ADVERTISED_Autoneg |
2184                                       ADVERTISED_10baseT_Half;
2185
2186                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2187                             device_should_wake) {
2188                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2189                                         advertising |=
2190                                                 ADVERTISED_100baseT_Half |
2191                                                 ADVERTISED_100baseT_Full |
2192                                                 ADVERTISED_10baseT_Full;
2193                                 else
2194                                         advertising |= ADVERTISED_10baseT_Full;
2195                         }
2196
2197                         phydev->advertising = advertising;
2198
2199                         phy_start_aneg(phydev);
2200
2201                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2202                         if (phyid != TG3_PHY_ID_BCMAC131) {
2203                                 phyid &= TG3_PHY_OUI_MASK;
2204                                 if (phyid == TG3_PHY_OUI_1 &&
2205                                     phyid == TG3_PHY_OUI_2 &&
2206                                     phyid == TG3_PHY_OUI_3)
2207                                         do_low_power = true;
2208                         }
2209                 }
2210         } else {
2211                 do_low_power = false;
2212
2213                 if (tp->link_config.phy_is_low_power == 0) {
2214                         tp->link_config.phy_is_low_power = 1;
2215                         tp->link_config.orig_speed = tp->link_config.speed;
2216                         tp->link_config.orig_duplex = tp->link_config.duplex;
2217                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2218                 }
2219
2220                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2221                         tp->link_config.speed = SPEED_10;
2222                         tp->link_config.duplex = DUPLEX_HALF;
2223                         tp->link_config.autoneg = AUTONEG_ENABLE;
2224                         tg3_setup_phy(tp, 0);
2225                 }
2226         }
2227
2228         __tg3_set_mac_addr(tp, 0);
2229
2230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2231                 u32 val;
2232
2233                 val = tr32(GRC_VCPU_EXT_CTRL);
2234                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2235         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2236                 int i;
2237                 u32 val;
2238
2239                 for (i = 0; i < 200; i++) {
2240                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2241                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2242                                 break;
2243                         msleep(1);
2244                 }
2245         }
2246         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2247                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2248                                                      WOL_DRV_STATE_SHUTDOWN |
2249                                                      WOL_DRV_WOL |
2250                                                      WOL_SET_MAGIC_PKT);
2251
2252         if (device_should_wake) {
2253                 u32 mac_mode;
2254
2255                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2256                         if (do_low_power) {
2257                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2258                                 udelay(40);
2259                         }
2260
2261                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2262                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2263                         else
2264                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2265
2266                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2267                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2268                             ASIC_REV_5700) {
2269                                 u32 speed = (tp->tg3_flags &
2270                                              TG3_FLAG_WOL_SPEED_100MB) ?
2271                                              SPEED_100 : SPEED_10;
2272                                 if (tg3_5700_link_polarity(tp, speed))
2273                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2274                                 else
2275                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2276                         }
2277                 } else {
2278                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2279                 }
2280
2281                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2282                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2283
2284                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2285                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2286                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2287                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2288                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2289                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2290
2291                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2292                         mac_mode |= tp->mac_mode &
2293                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2294                         if (mac_mode & MAC_MODE_APE_TX_EN)
2295                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2296                 }
2297
2298                 tw32_f(MAC_MODE, mac_mode);
2299                 udelay(100);
2300
2301                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2302                 udelay(10);
2303         }
2304
2305         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2306             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2307              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2308                 u32 base_val;
2309
2310                 base_val = tp->pci_clock_ctrl;
2311                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2312                              CLOCK_CTRL_TXCLK_DISABLE);
2313
2314                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2315                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2316         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2317                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2318                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2319                 /* do nothing */
2320         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2321                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2322                 u32 newbits1, newbits2;
2323
2324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2325                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2326                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2327                                     CLOCK_CTRL_TXCLK_DISABLE |
2328                                     CLOCK_CTRL_ALTCLK);
2329                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2330                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2331                         newbits1 = CLOCK_CTRL_625_CORE;
2332                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2333                 } else {
2334                         newbits1 = CLOCK_CTRL_ALTCLK;
2335                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2336                 }
2337
2338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2339                             40);
2340
2341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2342                             40);
2343
2344                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2345                         u32 newbits3;
2346
2347                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2348                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2349                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2350                                             CLOCK_CTRL_TXCLK_DISABLE |
2351                                             CLOCK_CTRL_44MHZ_CORE);
2352                         } else {
2353                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2354                         }
2355
2356                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2357                                     tp->pci_clock_ctrl | newbits3, 40);
2358                 }
2359         }
2360
2361         if (!(device_should_wake) &&
2362             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2363             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2364                 tg3_power_down_phy(tp, do_low_power);
2365
2366         tg3_frob_aux_power(tp);
2367
2368         /* Workaround for unstable PLL clock */
2369         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2370             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2371                 u32 val = tr32(0x7d00);
2372
2373                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2374                 tw32(0x7d00, val);
2375                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2376                         int err;
2377
2378                         err = tg3_nvram_lock(tp);
2379                         tg3_halt_cpu(tp, RX_CPU_BASE);
2380                         if (!err)
2381                                 tg3_nvram_unlock(tp);
2382                 }
2383         }
2384
2385         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2386
2387         if (device_should_wake)
2388                 pci_enable_wake(tp->pdev, state, true);
2389
2390         /* Finally, set the new power state. */
2391         pci_set_power_state(tp->pdev, state);
2392
2393         return 0;
2394 }
2395
2396 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2397 {
2398         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2399         case MII_TG3_AUX_STAT_10HALF:
2400                 *speed = SPEED_10;
2401                 *duplex = DUPLEX_HALF;
2402                 break;
2403
2404         case MII_TG3_AUX_STAT_10FULL:
2405                 *speed = SPEED_10;
2406                 *duplex = DUPLEX_FULL;
2407                 break;
2408
2409         case MII_TG3_AUX_STAT_100HALF:
2410                 *speed = SPEED_100;
2411                 *duplex = DUPLEX_HALF;
2412                 break;
2413
2414         case MII_TG3_AUX_STAT_100FULL:
2415                 *speed = SPEED_100;
2416                 *duplex = DUPLEX_FULL;
2417                 break;
2418
2419         case MII_TG3_AUX_STAT_1000HALF:
2420                 *speed = SPEED_1000;
2421                 *duplex = DUPLEX_HALF;
2422                 break;
2423
2424         case MII_TG3_AUX_STAT_1000FULL:
2425                 *speed = SPEED_1000;
2426                 *duplex = DUPLEX_FULL;
2427                 break;
2428
2429         default:
2430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2431                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2432                                  SPEED_10;
2433                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2434                                   DUPLEX_HALF;
2435                         break;
2436                 }
2437                 *speed = SPEED_INVALID;
2438                 *duplex = DUPLEX_INVALID;
2439                 break;
2440         }
2441 }
2442
2443 static void tg3_phy_copper_begin(struct tg3 *tp)
2444 {
2445         u32 new_adv;
2446         int i;
2447
2448         if (tp->link_config.phy_is_low_power) {
2449                 /* Entering low power mode.  Disable gigabit and
2450                  * 100baseT advertisements.
2451                  */
2452                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2453
2454                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2455                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2456                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2457                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2458
2459                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2460         } else if (tp->link_config.speed == SPEED_INVALID) {
2461                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2462                         tp->link_config.advertising &=
2463                                 ~(ADVERTISED_1000baseT_Half |
2464                                   ADVERTISED_1000baseT_Full);
2465
2466                 new_adv = ADVERTISE_CSMA;
2467                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2468                         new_adv |= ADVERTISE_10HALF;
2469                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2470                         new_adv |= ADVERTISE_10FULL;
2471                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2472                         new_adv |= ADVERTISE_100HALF;
2473                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2474                         new_adv |= ADVERTISE_100FULL;
2475
2476                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2477
2478                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2479
2480                 if (tp->link_config.advertising &
2481                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2482                         new_adv = 0;
2483                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2484                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2485                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2486                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2487                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2488                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2489                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2490                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2491                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2492                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2493                 } else {
2494                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2495                 }
2496         } else {
2497                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2498                 new_adv |= ADVERTISE_CSMA;
2499
2500                 /* Asking for a specific link mode. */
2501                 if (tp->link_config.speed == SPEED_1000) {
2502                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2503
2504                         if (tp->link_config.duplex == DUPLEX_FULL)
2505                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2506                         else
2507                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2508                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2509                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2510                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2511                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2512                 } else {
2513                         if (tp->link_config.speed == SPEED_100) {
2514                                 if (tp->link_config.duplex == DUPLEX_FULL)
2515                                         new_adv |= ADVERTISE_100FULL;
2516                                 else
2517                                         new_adv |= ADVERTISE_100HALF;
2518                         } else {
2519                                 if (tp->link_config.duplex == DUPLEX_FULL)
2520                                         new_adv |= ADVERTISE_10FULL;
2521                                 else
2522                                         new_adv |= ADVERTISE_10HALF;
2523                         }
2524                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2525
2526                         new_adv = 0;
2527                 }
2528
2529                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2530         }
2531
2532         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2533             tp->link_config.speed != SPEED_INVALID) {
2534                 u32 bmcr, orig_bmcr;
2535
2536                 tp->link_config.active_speed = tp->link_config.speed;
2537                 tp->link_config.active_duplex = tp->link_config.duplex;
2538
2539                 bmcr = 0;
2540                 switch (tp->link_config.speed) {
2541                 default:
2542                 case SPEED_10:
2543                         break;
2544
2545                 case SPEED_100:
2546                         bmcr |= BMCR_SPEED100;
2547                         break;
2548
2549                 case SPEED_1000:
2550                         bmcr |= TG3_BMCR_SPEED1000;
2551                         break;
2552                 }
2553
2554                 if (tp->link_config.duplex == DUPLEX_FULL)
2555                         bmcr |= BMCR_FULLDPLX;
2556
2557                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2558                     (bmcr != orig_bmcr)) {
2559                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2560                         for (i = 0; i < 1500; i++) {
2561                                 u32 tmp;
2562
2563                                 udelay(10);
2564                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2565                                     tg3_readphy(tp, MII_BMSR, &tmp))
2566                                         continue;
2567                                 if (!(tmp & BMSR_LSTATUS)) {
2568                                         udelay(40);
2569                                         break;
2570                                 }
2571                         }
2572                         tg3_writephy(tp, MII_BMCR, bmcr);
2573                         udelay(40);
2574                 }
2575         } else {
2576                 tg3_writephy(tp, MII_BMCR,
2577                              BMCR_ANENABLE | BMCR_ANRESTART);
2578         }
2579 }
2580
2581 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2582 {
2583         int err;
2584
2585         /* Turn off tap power management. */
2586         /* Set Extended packet length bit */
2587         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2588
2589         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2590         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2591
2592         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2593         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2594
2595         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2596         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2597
2598         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2599         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2600
2601         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2602         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2603
2604         udelay(40);
2605
2606         return err;
2607 }
2608
2609 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2610 {
2611         u32 adv_reg, all_mask = 0;
2612
2613         if (mask & ADVERTISED_10baseT_Half)
2614                 all_mask |= ADVERTISE_10HALF;
2615         if (mask & ADVERTISED_10baseT_Full)
2616                 all_mask |= ADVERTISE_10FULL;
2617         if (mask & ADVERTISED_100baseT_Half)
2618                 all_mask |= ADVERTISE_100HALF;
2619         if (mask & ADVERTISED_100baseT_Full)
2620                 all_mask |= ADVERTISE_100FULL;
2621
2622         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2623                 return 0;
2624
2625         if ((adv_reg & all_mask) != all_mask)
2626                 return 0;
2627         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2628                 u32 tg3_ctrl;
2629
2630                 all_mask = 0;
2631                 if (mask & ADVERTISED_1000baseT_Half)
2632                         all_mask |= ADVERTISE_1000HALF;
2633                 if (mask & ADVERTISED_1000baseT_Full)
2634                         all_mask |= ADVERTISE_1000FULL;
2635
2636                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2637                         return 0;
2638
2639                 if ((tg3_ctrl & all_mask) != all_mask)
2640                         return 0;
2641         }
2642         return 1;
2643 }
2644
2645 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2646 {
2647         u32 curadv, reqadv;
2648
2649         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2650                 return 1;
2651
2652         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2653         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2654
2655         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2656                 if (curadv != reqadv)
2657                         return 0;
2658
2659                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2660                         tg3_readphy(tp, MII_LPA, rmtadv);
2661         } else {
2662                 /* Reprogram the advertisement register, even if it
2663                  * does not affect the current link.  If the link
2664                  * gets renegotiated in the future, we can save an
2665                  * additional renegotiation cycle by advertising
2666                  * it correctly in the first place.
2667                  */
2668                 if (curadv != reqadv) {
2669                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2670                                      ADVERTISE_PAUSE_ASYM);
2671                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2672                 }
2673         }
2674
2675         return 1;
2676 }
2677
2678 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2679 {
2680         int current_link_up;
2681         u32 bmsr, dummy;
2682         u32 lcl_adv, rmt_adv;
2683         u16 current_speed;
2684         u8 current_duplex;
2685         int i, err;
2686
2687         tw32(MAC_EVENT, 0);
2688
2689         tw32_f(MAC_STATUS,
2690              (MAC_STATUS_SYNC_CHANGED |
2691               MAC_STATUS_CFG_CHANGED |
2692               MAC_STATUS_MI_COMPLETION |
2693               MAC_STATUS_LNKSTATE_CHANGED));
2694         udelay(40);
2695
2696         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2697                 tw32_f(MAC_MI_MODE,
2698                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2699                 udelay(80);
2700         }
2701
2702         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2703
2704         /* Some third-party PHYs need to be reset on link going
2705          * down.
2706          */
2707         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2708              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2709              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2710             netif_carrier_ok(tp->dev)) {
2711                 tg3_readphy(tp, MII_BMSR, &bmsr);
2712                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2713                     !(bmsr & BMSR_LSTATUS))
2714                         force_reset = 1;
2715         }
2716         if (force_reset)
2717                 tg3_phy_reset(tp);
2718
2719         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2720                 tg3_readphy(tp, MII_BMSR, &bmsr);
2721                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2722                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2723                         bmsr = 0;
2724
2725                 if (!(bmsr & BMSR_LSTATUS)) {
2726                         err = tg3_init_5401phy_dsp(tp);
2727                         if (err)
2728                                 return err;
2729
2730                         tg3_readphy(tp, MII_BMSR, &bmsr);
2731                         for (i = 0; i < 1000; i++) {
2732                                 udelay(10);
2733                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2734                                     (bmsr & BMSR_LSTATUS)) {
2735                                         udelay(40);
2736                                         break;
2737                                 }
2738                         }
2739
2740                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2741                             !(bmsr & BMSR_LSTATUS) &&
2742                             tp->link_config.active_speed == SPEED_1000) {
2743                                 err = tg3_phy_reset(tp);
2744                                 if (!err)
2745                                         err = tg3_init_5401phy_dsp(tp);
2746                                 if (err)
2747                                         return err;
2748                         }
2749                 }
2750         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2751                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2752                 /* 5701 {A0,B0} CRC bug workaround */
2753                 tg3_writephy(tp, 0x15, 0x0a75);
2754                 tg3_writephy(tp, 0x1c, 0x8c68);
2755                 tg3_writephy(tp, 0x1c, 0x8d68);
2756                 tg3_writephy(tp, 0x1c, 0x8c68);
2757         }
2758
2759         /* Clear pending interrupts... */
2760         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2761         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2762
2763         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2764                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2765         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2766                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2767
2768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2770                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2771                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2773                 else
2774                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2775         }
2776
2777         current_link_up = 0;
2778         current_speed = SPEED_INVALID;
2779         current_duplex = DUPLEX_INVALID;
2780
2781         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2782                 u32 val;
2783
2784                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2785                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2786                 if (!(val & (1 << 10))) {
2787                         val |= (1 << 10);
2788                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2789                         goto relink;
2790                 }
2791         }
2792
2793         bmsr = 0;
2794         for (i = 0; i < 100; i++) {
2795                 tg3_readphy(tp, MII_BMSR, &bmsr);
2796                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2797                     (bmsr & BMSR_LSTATUS))
2798                         break;
2799                 udelay(40);
2800         }
2801
2802         if (bmsr & BMSR_LSTATUS) {
2803                 u32 aux_stat, bmcr;
2804
2805                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2806                 for (i = 0; i < 2000; i++) {
2807                         udelay(10);
2808                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2809                             aux_stat)
2810                                 break;
2811                 }
2812
2813                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2814                                              &current_speed,
2815                                              &current_duplex);
2816
2817                 bmcr = 0;
2818                 for (i = 0; i < 200; i++) {
2819                         tg3_readphy(tp, MII_BMCR, &bmcr);
2820                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2821                                 continue;
2822                         if (bmcr && bmcr != 0x7fff)
2823                                 break;
2824                         udelay(10);
2825                 }
2826
2827                 lcl_adv = 0;
2828                 rmt_adv = 0;
2829
2830                 tp->link_config.active_speed = current_speed;
2831                 tp->link_config.active_duplex = current_duplex;
2832
2833                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2834                         if ((bmcr & BMCR_ANENABLE) &&
2835                             tg3_copper_is_advertising_all(tp,
2836                                                 tp->link_config.advertising)) {
2837                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2838                                                                   &rmt_adv))
2839                                         current_link_up = 1;
2840                         }
2841                 } else {
2842                         if (!(bmcr & BMCR_ANENABLE) &&
2843                             tp->link_config.speed == current_speed &&
2844                             tp->link_config.duplex == current_duplex &&
2845                             tp->link_config.flowctrl ==
2846                             tp->link_config.active_flowctrl) {
2847                                 current_link_up = 1;
2848                         }
2849                 }
2850
2851                 if (current_link_up == 1 &&
2852                     tp->link_config.active_duplex == DUPLEX_FULL)
2853                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2854         }
2855
2856 relink:
2857         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2858                 u32 tmp;
2859
2860                 tg3_phy_copper_begin(tp);
2861
2862                 tg3_readphy(tp, MII_BMSR, &tmp);
2863                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2864                     (tmp & BMSR_LSTATUS))
2865                         current_link_up = 1;
2866         }
2867
2868         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2869         if (current_link_up == 1) {
2870                 if (tp->link_config.active_speed == SPEED_100 ||
2871                     tp->link_config.active_speed == SPEED_10)
2872                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2873                 else
2874                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875         } else
2876                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2877
2878         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2879         if (tp->link_config.active_duplex == DUPLEX_HALF)
2880                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2881
2882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2883                 if (current_link_up == 1 &&
2884                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2885                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2886                 else
2887                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2888         }
2889
2890         /* ??? Without this setting Netgear GA302T PHY does not
2891          * ??? send/receive packets...
2892          */
2893         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2894             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2895                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2896                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2897                 udelay(80);
2898         }
2899
2900         tw32_f(MAC_MODE, tp->mac_mode);
2901         udelay(40);
2902
2903         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2904                 /* Polled via timer. */
2905                 tw32_f(MAC_EVENT, 0);
2906         } else {
2907                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2908         }
2909         udelay(40);
2910
2911         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2912             current_link_up == 1 &&
2913             tp->link_config.active_speed == SPEED_1000 &&
2914             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2915              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2916                 udelay(120);
2917                 tw32_f(MAC_STATUS,
2918                      (MAC_STATUS_SYNC_CHANGED |
2919                       MAC_STATUS_CFG_CHANGED));
2920                 udelay(40);
2921                 tg3_write_mem(tp,
2922                               NIC_SRAM_FIRMWARE_MBOX,
2923                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2924         }
2925
2926         if (current_link_up != netif_carrier_ok(tp->dev)) {
2927                 if (current_link_up)
2928                         netif_carrier_on(tp->dev);
2929                 else
2930                         netif_carrier_off(tp->dev);
2931                 tg3_link_report(tp);
2932         }
2933
2934         return 0;
2935 }
2936
2937 struct tg3_fiber_aneginfo {
2938         int state;
2939 #define ANEG_STATE_UNKNOWN              0
2940 #define ANEG_STATE_AN_ENABLE            1
2941 #define ANEG_STATE_RESTART_INIT         2
2942 #define ANEG_STATE_RESTART              3
2943 #define ANEG_STATE_DISABLE_LINK_OK      4
2944 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2945 #define ANEG_STATE_ABILITY_DETECT       6
2946 #define ANEG_STATE_ACK_DETECT_INIT      7
2947 #define ANEG_STATE_ACK_DETECT           8
2948 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2949 #define ANEG_STATE_COMPLETE_ACK         10
2950 #define ANEG_STATE_IDLE_DETECT_INIT     11
2951 #define ANEG_STATE_IDLE_DETECT          12
2952 #define ANEG_STATE_LINK_OK              13
2953 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2954 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2955
2956         u32 flags;
2957 #define MR_AN_ENABLE            0x00000001
2958 #define MR_RESTART_AN           0x00000002
2959 #define MR_AN_COMPLETE          0x00000004
2960 #define MR_PAGE_RX              0x00000008
2961 #define MR_NP_LOADED            0x00000010
2962 #define MR_TOGGLE_TX            0x00000020
2963 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2964 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2965 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2966 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2967 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2968 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2969 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2970 #define MR_TOGGLE_RX            0x00002000
2971 #define MR_NP_RX                0x00004000
2972
2973 #define MR_LINK_OK              0x80000000
2974
2975         unsigned long link_time, cur_time;
2976
2977         u32 ability_match_cfg;
2978         int ability_match_count;
2979
2980         char ability_match, idle_match, ack_match;
2981
2982         u32 txconfig, rxconfig;
2983 #define ANEG_CFG_NP             0x00000080
2984 #define ANEG_CFG_ACK            0x00000040
2985 #define ANEG_CFG_RF2            0x00000020
2986 #define ANEG_CFG_RF1            0x00000010
2987 #define ANEG_CFG_PS2            0x00000001
2988 #define ANEG_CFG_PS1            0x00008000
2989 #define ANEG_CFG_HD             0x00004000
2990 #define ANEG_CFG_FD             0x00002000
2991 #define ANEG_CFG_INVAL          0x00001f06
2992
2993 };
2994 #define ANEG_OK         0
2995 #define ANEG_DONE       1
2996 #define ANEG_TIMER_ENAB 2
2997 #define ANEG_FAILED     -1
2998
2999 #define ANEG_STATE_SETTLE_TIME  10000
3000
3001 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3002                                    struct tg3_fiber_aneginfo *ap)
3003 {
3004         u16 flowctrl;
3005         unsigned long delta;
3006         u32 rx_cfg_reg;
3007         int ret;
3008
3009         if (ap->state == ANEG_STATE_UNKNOWN) {
3010                 ap->rxconfig = 0;
3011                 ap->link_time = 0;
3012                 ap->cur_time = 0;
3013                 ap->ability_match_cfg = 0;
3014                 ap->ability_match_count = 0;
3015                 ap->ability_match = 0;
3016                 ap->idle_match = 0;
3017                 ap->ack_match = 0;
3018         }
3019         ap->cur_time++;
3020
3021         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3022                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3023
3024                 if (rx_cfg_reg != ap->ability_match_cfg) {
3025                         ap->ability_match_cfg = rx_cfg_reg;
3026                         ap->ability_match = 0;
3027                         ap->ability_match_count = 0;
3028                 } else {
3029                         if (++ap->ability_match_count > 1) {
3030                                 ap->ability_match = 1;
3031                                 ap->ability_match_cfg = rx_cfg_reg;
3032                         }
3033                 }
3034                 if (rx_cfg_reg & ANEG_CFG_ACK)
3035                         ap->ack_match = 1;
3036                 else
3037                         ap->ack_match = 0;
3038
3039                 ap->idle_match = 0;
3040         } else {
3041                 ap->idle_match = 1;
3042                 ap->ability_match_cfg = 0;
3043                 ap->ability_match_count = 0;
3044                 ap->ability_match = 0;
3045                 ap->ack_match = 0;
3046
3047                 rx_cfg_reg = 0;
3048         }
3049
3050         ap->rxconfig = rx_cfg_reg;
3051         ret = ANEG_OK;
3052
3053         switch(ap->state) {
3054         case ANEG_STATE_UNKNOWN:
3055                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3056                         ap->state = ANEG_STATE_AN_ENABLE;
3057
3058                 /* fallthru */
3059         case ANEG_STATE_AN_ENABLE:
3060                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3061                 if (ap->flags & MR_AN_ENABLE) {
3062                         ap->link_time = 0;
3063                         ap->cur_time = 0;
3064                         ap->ability_match_cfg = 0;
3065                         ap->ability_match_count = 0;
3066                         ap->ability_match = 0;
3067                         ap->idle_match = 0;
3068                         ap->ack_match = 0;
3069
3070                         ap->state = ANEG_STATE_RESTART_INIT;
3071                 } else {
3072                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3073                 }
3074                 break;
3075
3076         case ANEG_STATE_RESTART_INIT:
3077                 ap->link_time = ap->cur_time;
3078                 ap->flags &= ~(MR_NP_LOADED);
3079                 ap->txconfig = 0;
3080                 tw32(MAC_TX_AUTO_NEG, 0);
3081                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3082                 tw32_f(MAC_MODE, tp->mac_mode);
3083                 udelay(40);
3084
3085                 ret = ANEG_TIMER_ENAB;
3086                 ap->state = ANEG_STATE_RESTART;
3087
3088                 /* fallthru */
3089         case ANEG_STATE_RESTART:
3090                 delta = ap->cur_time - ap->link_time;
3091                 if (delta > ANEG_STATE_SETTLE_TIME) {
3092                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3093                 } else {
3094                         ret = ANEG_TIMER_ENAB;
3095                 }
3096                 break;
3097
3098         case ANEG_STATE_DISABLE_LINK_OK:
3099                 ret = ANEG_DONE;
3100                 break;
3101
3102         case ANEG_STATE_ABILITY_DETECT_INIT:
3103                 ap->flags &= ~(MR_TOGGLE_TX);
3104                 ap->txconfig = ANEG_CFG_FD;
3105                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3106                 if (flowctrl & ADVERTISE_1000XPAUSE)
3107                         ap->txconfig |= ANEG_CFG_PS1;
3108                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3109                         ap->txconfig |= ANEG_CFG_PS2;
3110                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3111                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3112                 tw32_f(MAC_MODE, tp->mac_mode);
3113                 udelay(40);
3114
3115                 ap->state = ANEG_STATE_ABILITY_DETECT;
3116                 break;
3117
3118         case ANEG_STATE_ABILITY_DETECT:
3119                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3120                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3121                 }
3122                 break;
3123
3124         case ANEG_STATE_ACK_DETECT_INIT:
3125                 ap->txconfig |= ANEG_CFG_ACK;
3126                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3127                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3128                 tw32_f(MAC_MODE, tp->mac_mode);
3129                 udelay(40);
3130
3131                 ap->state = ANEG_STATE_ACK_DETECT;
3132
3133                 /* fallthru */
3134         case ANEG_STATE_ACK_DETECT:
3135                 if (ap->ack_match != 0) {
3136                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3137                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3138                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3139                         } else {
3140                                 ap->state = ANEG_STATE_AN_ENABLE;
3141                         }
3142                 } else if (ap->ability_match != 0 &&
3143                            ap->rxconfig == 0) {
3144                         ap->state = ANEG_STATE_AN_ENABLE;
3145                 }
3146                 break;
3147
3148         case ANEG_STATE_COMPLETE_ACK_INIT:
3149                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3150                         ret = ANEG_FAILED;
3151                         break;
3152                 }
3153                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3154                                MR_LP_ADV_HALF_DUPLEX |
3155                                MR_LP_ADV_SYM_PAUSE |
3156                                MR_LP_ADV_ASYM_PAUSE |
3157                                MR_LP_ADV_REMOTE_FAULT1 |
3158                                MR_LP_ADV_REMOTE_FAULT2 |
3159                                MR_LP_ADV_NEXT_PAGE |
3160                                MR_TOGGLE_RX |
3161                                MR_NP_RX);
3162                 if (ap->rxconfig & ANEG_CFG_FD)
3163                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3164                 if (ap->rxconfig & ANEG_CFG_HD)
3165                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3166                 if (ap->rxconfig & ANEG_CFG_PS1)
3167                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3168                 if (ap->rxconfig & ANEG_CFG_PS2)
3169                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3170                 if (ap->rxconfig & ANEG_CFG_RF1)
3171                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3172                 if (ap->rxconfig & ANEG_CFG_RF2)
3173                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3174                 if (ap->rxconfig & ANEG_CFG_NP)
3175                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3176
3177                 ap->link_time = ap->cur_time;
3178
3179                 ap->flags ^= (MR_TOGGLE_TX);
3180                 if (ap->rxconfig & 0x0008)
3181                         ap->flags |= MR_TOGGLE_RX;
3182                 if (ap->rxconfig & ANEG_CFG_NP)
3183                         ap->flags |= MR_NP_RX;
3184                 ap->flags |= MR_PAGE_RX;
3185
3186                 ap->state = ANEG_STATE_COMPLETE_ACK;
3187                 ret = ANEG_TIMER_ENAB;
3188                 break;
3189
3190         case ANEG_STATE_COMPLETE_ACK:
3191                 if (ap->ability_match != 0 &&
3192                     ap->rxconfig == 0) {
3193                         ap->state = ANEG_STATE_AN_ENABLE;
3194                         break;
3195                 }
3196                 delta = ap->cur_time - ap->link_time;
3197                 if (delta > ANEG_STATE_SETTLE_TIME) {
3198                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3199                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3200                         } else {
3201                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3202                                     !(ap->flags & MR_NP_RX)) {
3203                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3204                                 } else {
3205                                         ret = ANEG_FAILED;
3206                                 }
3207                         }
3208                 }
3209                 break;
3210
3211         case ANEG_STATE_IDLE_DETECT_INIT:
3212                 ap->link_time = ap->cur_time;
3213                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3214                 tw32_f(MAC_MODE, tp->mac_mode);
3215                 udelay(40);
3216
3217                 ap->state = ANEG_STATE_IDLE_DETECT;
3218                 ret = ANEG_TIMER_ENAB;
3219                 break;
3220
3221         case ANEG_STATE_IDLE_DETECT:
3222                 if (ap->ability_match != 0 &&
3223                     ap->rxconfig == 0) {
3224                         ap->state = ANEG_STATE_AN_ENABLE;
3225                         break;
3226                 }
3227                 delta = ap->cur_time - ap->link_time;
3228                 if (delta > ANEG_STATE_SETTLE_TIME) {
3229                         /* XXX another gem from the Broadcom driver :( */
3230                         ap->state = ANEG_STATE_LINK_OK;
3231                 }
3232                 break;
3233
3234         case ANEG_STATE_LINK_OK:
3235                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3236                 ret = ANEG_DONE;
3237                 break;
3238
3239         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3240                 /* ??? unimplemented */
3241                 break;
3242
3243         case ANEG_STATE_NEXT_PAGE_WAIT:
3244                 /* ??? unimplemented */
3245                 break;
3246
3247         default:
3248                 ret = ANEG_FAILED;
3249                 break;
3250         }
3251
3252         return ret;
3253 }
3254
3255 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3256 {
3257         int res = 0;
3258         struct tg3_fiber_aneginfo aninfo;
3259         int status = ANEG_FAILED;
3260         unsigned int tick;
3261         u32 tmp;
3262
3263         tw32_f(MAC_TX_AUTO_NEG, 0);
3264
3265         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3266         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3267         udelay(40);
3268
3269         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3270         udelay(40);
3271
3272         memset(&aninfo, 0, sizeof(aninfo));
3273         aninfo.flags |= MR_AN_ENABLE;
3274         aninfo.state = ANEG_STATE_UNKNOWN;
3275         aninfo.cur_time = 0;
3276         tick = 0;
3277         while (++tick < 195000) {
3278                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3279                 if (status == ANEG_DONE || status == ANEG_FAILED)
3280                         break;
3281
3282                 udelay(1);
3283         }
3284
3285         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3286         tw32_f(MAC_MODE, tp->mac_mode);
3287         udelay(40);
3288
3289         *txflags = aninfo.txconfig;
3290         *rxflags = aninfo.flags;
3291
3292         if (status == ANEG_DONE &&
3293             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3294                              MR_LP_ADV_FULL_DUPLEX)))
3295                 res = 1;
3296
3297         return res;
3298 }
3299
3300 static void tg3_init_bcm8002(struct tg3 *tp)
3301 {
3302         u32 mac_status = tr32(MAC_STATUS);
3303         int i;
3304
3305         /* Reset when initting first time or we have a link. */
3306         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3307             !(mac_status & MAC_STATUS_PCS_SYNCED))
3308                 return;
3309
3310         /* Set PLL lock range. */
3311         tg3_writephy(tp, 0x16, 0x8007);
3312
3313         /* SW reset */
3314         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3315
3316         /* Wait for reset to complete. */
3317         /* XXX schedule_timeout() ... */
3318         for (i = 0; i < 500; i++)
3319                 udelay(10);
3320
3321         /* Config mode; select PMA/Ch 1 regs. */
3322         tg3_writephy(tp, 0x10, 0x8411);
3323
3324         /* Enable auto-lock and comdet, select txclk for tx. */
3325         tg3_writephy(tp, 0x11, 0x0a10);
3326
3327         tg3_writephy(tp, 0x18, 0x00a0);
3328         tg3_writephy(tp, 0x16, 0x41ff);
3329
3330         /* Assert and deassert POR. */
3331         tg3_writephy(tp, 0x13, 0x0400);
3332         udelay(40);
3333         tg3_writephy(tp, 0x13, 0x0000);
3334
3335         tg3_writephy(tp, 0x11, 0x0a50);
3336         udelay(40);
3337         tg3_writephy(tp, 0x11, 0x0a10);
3338
3339         /* Wait for signal to stabilize */
3340         /* XXX schedule_timeout() ... */
3341         for (i = 0; i < 15000; i++)
3342                 udelay(10);
3343
3344         /* Deselect the channel register so we can read the PHYID
3345          * later.
3346          */
3347         tg3_writephy(tp, 0x10, 0x8011);
3348 }
3349
3350 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3351 {
3352         u16 flowctrl;
3353         u32 sg_dig_ctrl, sg_dig_status;
3354         u32 serdes_cfg, expected_sg_dig_ctrl;
3355         int workaround, port_a;
3356         int current_link_up;
3357
3358         serdes_cfg = 0;
3359         expected_sg_dig_ctrl = 0;
3360         workaround = 0;
3361         port_a = 1;
3362         current_link_up = 0;
3363
3364         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3365             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3366                 workaround = 1;
3367                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3368                         port_a = 0;
3369
3370                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3371                 /* preserve bits 20-23 for voltage regulator */
3372                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3373         }
3374
3375         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3376
3377         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3378                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3379                         if (workaround) {
3380                                 u32 val = serdes_cfg;
3381
3382                                 if (port_a)
3383                                         val |= 0xc010000;
3384                                 else
3385                                         val |= 0x4010000;
3386                                 tw32_f(MAC_SERDES_CFG, val);
3387                         }
3388
3389                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3390                 }
3391                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3392                         tg3_setup_flow_control(tp, 0, 0);
3393                         current_link_up = 1;
3394                 }
3395                 goto out;
3396         }
3397
3398         /* Want auto-negotiation.  */
3399         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3400
3401         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3402         if (flowctrl & ADVERTISE_1000XPAUSE)
3403                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3404         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3405                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3406
3407         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3408                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3409                     tp->serdes_counter &&
3410                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3411                                     MAC_STATUS_RCVD_CFG)) ==
3412                      MAC_STATUS_PCS_SYNCED)) {
3413                         tp->serdes_counter--;
3414                         current_link_up = 1;
3415                         goto out;
3416                 }
3417 restart_autoneg:
3418                 if (workaround)
3419                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3420                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3421                 udelay(5);
3422                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3423
3424                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3425                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3426         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3427                                  MAC_STATUS_SIGNAL_DET)) {
3428                 sg_dig_status = tr32(SG_DIG_STATUS);
3429                 mac_status = tr32(MAC_STATUS);
3430
3431                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3432                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3433                         u32 local_adv = 0, remote_adv = 0;
3434
3435                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3436                                 local_adv |= ADVERTISE_1000XPAUSE;
3437                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3438                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3439
3440                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3441                                 remote_adv |= LPA_1000XPAUSE;
3442                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3443                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3444
3445                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3446                         current_link_up = 1;
3447                         tp->serdes_counter = 0;
3448                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3449                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3450                         if (tp->serdes_counter)
3451                                 tp->serdes_counter--;
3452                         else {
3453                                 if (workaround) {
3454                                         u32 val = serdes_cfg;
3455
3456                                         if (port_a)
3457                                                 val |= 0xc010000;
3458                                         else
3459                                                 val |= 0x4010000;
3460
3461                                         tw32_f(MAC_SERDES_CFG, val);
3462                                 }
3463
3464                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3465                                 udelay(40);
3466
3467                                 /* Link parallel detection - link is up */
3468                                 /* only if we have PCS_SYNC and not */
3469                                 /* receiving config code words */
3470                                 mac_status = tr32(MAC_STATUS);
3471                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3472                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3473                                         tg3_setup_flow_control(tp, 0, 0);
3474                                         current_link_up = 1;
3475                                         tp->tg3_flags2 |=
3476                                                 TG3_FLG2_PARALLEL_DETECT;
3477                                         tp->serdes_counter =
3478                                                 SERDES_PARALLEL_DET_TIMEOUT;
3479                                 } else
3480                                         goto restart_autoneg;
3481                         }
3482                 }
3483         } else {
3484                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3485                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3486         }
3487
3488 out:
3489         return current_link_up;
3490 }
3491
3492 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3493 {
3494         int current_link_up = 0;
3495
3496         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3497                 goto out;
3498
3499         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3500                 u32 txflags, rxflags;
3501                 int i;
3502
3503                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3504                         u32 local_adv = 0, remote_adv = 0;
3505
3506                         if (txflags & ANEG_CFG_PS1)
3507                                 local_adv |= ADVERTISE_1000XPAUSE;
3508                         if (txflags & ANEG_CFG_PS2)
3509                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3510
3511                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3512                                 remote_adv |= LPA_1000XPAUSE;
3513                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3514                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3515
3516                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3517
3518                         current_link_up = 1;
3519                 }
3520                 for (i = 0; i < 30; i++) {
3521                         udelay(20);
3522                         tw32_f(MAC_STATUS,
3523                                (MAC_STATUS_SYNC_CHANGED |
3524                                 MAC_STATUS_CFG_CHANGED));
3525                         udelay(40);
3526                         if ((tr32(MAC_STATUS) &
3527                              (MAC_STATUS_SYNC_CHANGED |
3528                               MAC_STATUS_CFG_CHANGED)) == 0)
3529                                 break;
3530                 }
3531
3532                 mac_status = tr32(MAC_STATUS);
3533                 if (current_link_up == 0 &&
3534                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3535                     !(mac_status & MAC_STATUS_RCVD_CFG))
3536                         current_link_up = 1;
3537         } else {
3538                 tg3_setup_flow_control(tp, 0, 0);
3539
3540                 /* Forcing 1000FD link up. */
3541                 current_link_up = 1;
3542
3543                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3544                 udelay(40);
3545
3546                 tw32_f(MAC_MODE, tp->mac_mode);
3547                 udelay(40);
3548         }
3549
3550 out:
3551         return current_link_up;
3552 }
3553
3554 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3555 {
3556         u32 orig_pause_cfg;
3557         u16 orig_active_speed;
3558         u8 orig_active_duplex;
3559         u32 mac_status;
3560         int current_link_up;
3561         int i;
3562
3563         orig_pause_cfg = tp->link_config.active_flowctrl;
3564         orig_active_speed = tp->link_config.active_speed;
3565         orig_active_duplex = tp->link_config.active_duplex;
3566
3567         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3568             netif_carrier_ok(tp->dev) &&
3569             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3570                 mac_status = tr32(MAC_STATUS);
3571                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3572                                MAC_STATUS_SIGNAL_DET |
3573                                MAC_STATUS_CFG_CHANGED |
3574                                MAC_STATUS_RCVD_CFG);
3575                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3576                                    MAC_STATUS_SIGNAL_DET)) {
3577                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3578                                             MAC_STATUS_CFG_CHANGED));
3579                         return 0;
3580                 }
3581         }
3582
3583         tw32_f(MAC_TX_AUTO_NEG, 0);
3584
3585         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3586         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3587         tw32_f(MAC_MODE, tp->mac_mode);
3588         udelay(40);
3589
3590         if (tp->phy_id == PHY_ID_BCM8002)
3591                 tg3_init_bcm8002(tp);
3592
3593         /* Enable link change event even when serdes polling.  */
3594         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3595         udelay(40);
3596
3597         current_link_up = 0;
3598         mac_status = tr32(MAC_STATUS);
3599
3600         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3601                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3602         else
3603                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3604
3605         tp->hw_status->status =
3606                 (SD_STATUS_UPDATED |
3607                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3608
3609         for (i = 0; i < 100; i++) {
3610                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3611                                     MAC_STATUS_CFG_CHANGED));
3612                 udelay(5);
3613                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3614                                          MAC_STATUS_CFG_CHANGED |
3615                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3616                         break;
3617         }
3618
3619         mac_status = tr32(MAC_STATUS);
3620         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3621                 current_link_up = 0;
3622                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3623                     tp->serdes_counter == 0) {
3624                         tw32_f(MAC_MODE, (tp->mac_mode |
3625                                           MAC_MODE_SEND_CONFIGS));
3626                         udelay(1);
3627                         tw32_f(MAC_MODE, tp->mac_mode);
3628                 }
3629         }
3630
3631         if (current_link_up == 1) {
3632                 tp->link_config.active_speed = SPEED_1000;
3633                 tp->link_config.active_duplex = DUPLEX_FULL;
3634                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3635                                     LED_CTRL_LNKLED_OVERRIDE |
3636                                     LED_CTRL_1000MBPS_ON));
3637         } else {
3638                 tp->link_config.active_speed = SPEED_INVALID;
3639                 tp->link_config.active_duplex = DUPLEX_INVALID;
3640                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3641                                     LED_CTRL_LNKLED_OVERRIDE |
3642                                     LED_CTRL_TRAFFIC_OVERRIDE));
3643         }
3644
3645         if (current_link_up != netif_carrier_ok(tp->dev)) {
3646                 if (current_link_up)
3647                         netif_carrier_on(tp->dev);
3648                 else
3649                         netif_carrier_off(tp->dev);
3650                 tg3_link_report(tp);
3651         } else {
3652                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3653                 if (orig_pause_cfg != now_pause_cfg ||
3654                     orig_active_speed != tp->link_config.active_speed ||
3655                     orig_active_duplex != tp->link_config.active_duplex)
3656                         tg3_link_report(tp);
3657         }
3658
3659         return 0;
3660 }
3661
3662 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3663 {
3664         int current_link_up, err = 0;
3665         u32 bmsr, bmcr;
3666         u16 current_speed;
3667         u8 current_duplex;
3668         u32 local_adv, remote_adv;
3669
3670         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3671         tw32_f(MAC_MODE, tp->mac_mode);
3672         udelay(40);
3673
3674         tw32(MAC_EVENT, 0);
3675
3676         tw32_f(MAC_STATUS,
3677              (MAC_STATUS_SYNC_CHANGED |
3678               MAC_STATUS_CFG_CHANGED |
3679               MAC_STATUS_MI_COMPLETION |
3680               MAC_STATUS_LNKSTATE_CHANGED));
3681         udelay(40);
3682
3683         if (force_reset)
3684                 tg3_phy_reset(tp);
3685
3686         current_link_up = 0;
3687         current_speed = SPEED_INVALID;
3688         current_duplex = DUPLEX_INVALID;
3689
3690         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3691         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3693                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3694                         bmsr |= BMSR_LSTATUS;
3695                 else
3696                         bmsr &= ~BMSR_LSTATUS;
3697         }
3698
3699         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3700
3701         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3702             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3703                 /* do nothing, just check for link up at the end */
3704         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3705                 u32 adv, new_adv;
3706
3707                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3708                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3709                                   ADVERTISE_1000XPAUSE |
3710                                   ADVERTISE_1000XPSE_ASYM |
3711                                   ADVERTISE_SLCT);
3712
3713                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3714
3715                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3716                         new_adv |= ADVERTISE_1000XHALF;
3717                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3718                         new_adv |= ADVERTISE_1000XFULL;
3719
3720                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3721                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3722                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3723                         tg3_writephy(tp, MII_BMCR, bmcr);
3724
3725                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3726                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3727                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3728
3729                         return err;
3730                 }
3731         } else {
3732                 u32 new_bmcr;
3733
3734                 bmcr &= ~BMCR_SPEED1000;
3735                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3736
3737                 if (tp->link_config.duplex == DUPLEX_FULL)
3738                         new_bmcr |= BMCR_FULLDPLX;
3739
3740                 if (new_bmcr != bmcr) {
3741                         /* BMCR_SPEED1000 is a reserved bit that needs
3742                          * to be set on write.
3743                          */
3744                         new_bmcr |= BMCR_SPEED1000;
3745
3746                         /* Force a linkdown */
3747                         if (netif_carrier_ok(tp->dev)) {
3748                                 u32 adv;
3749
3750                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3751                                 adv &= ~(ADVERTISE_1000XFULL |
3752                                          ADVERTISE_1000XHALF |
3753                                          ADVERTISE_SLCT);
3754                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3755                                 tg3_writephy(tp, MII_BMCR, bmcr |
3756                                                            BMCR_ANRESTART |
3757                                                            BMCR_ANENABLE);
3758                                 udelay(10);
3759                                 netif_carrier_off(tp->dev);
3760                         }
3761                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3762                         bmcr = new_bmcr;
3763                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3764                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3765                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3766                             ASIC_REV_5714) {
3767                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3768                                         bmsr |= BMSR_LSTATUS;
3769                                 else
3770                                         bmsr &= ~BMSR_LSTATUS;
3771                         }
3772                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3773                 }
3774         }
3775
3776         if (bmsr & BMSR_LSTATUS) {
3777                 current_speed = SPEED_1000;
3778                 current_link_up = 1;
3779                 if (bmcr & BMCR_FULLDPLX)
3780                         current_duplex = DUPLEX_FULL;
3781                 else
3782                         current_duplex = DUPLEX_HALF;
3783
3784                 local_adv = 0;
3785                 remote_adv = 0;
3786
3787                 if (bmcr & BMCR_ANENABLE) {
3788                         u32 common;
3789
3790                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3791                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3792                         common = local_adv & remote_adv;
3793                         if (common & (ADVERTISE_1000XHALF |
3794                                       ADVERTISE_1000XFULL)) {
3795                                 if (common & ADVERTISE_1000XFULL)
3796                                         current_duplex = DUPLEX_FULL;
3797                                 else
3798                                         current_duplex = DUPLEX_HALF;
3799                         }
3800                         else
3801                                 current_link_up = 0;
3802                 }
3803         }
3804
3805         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3806                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3807
3808         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3809         if (tp->link_config.active_duplex == DUPLEX_HALF)
3810                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3811
3812         tw32_f(MAC_MODE, tp->mac_mode);
3813         udelay(40);
3814
3815         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3816
3817         tp->link_config.active_speed = current_speed;
3818         tp->link_config.active_duplex = current_duplex;
3819
3820         if (current_link_up != netif_carrier_ok(tp->dev)) {
3821                 if (current_link_up)
3822                         netif_carrier_on(tp->dev);
3823                 else {
3824                         netif_carrier_off(tp->dev);
3825                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3826                 }
3827                 tg3_link_report(tp);
3828         }
3829         return err;
3830 }
3831
3832 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3833 {
3834         if (tp->serdes_counter) {
3835                 /* Give autoneg time to complete. */
3836                 tp->serdes_counter--;
3837                 return;
3838         }
3839         if (!netif_carrier_ok(tp->dev) &&
3840             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3841                 u32 bmcr;
3842
3843                 tg3_readphy(tp, MII_BMCR, &bmcr);
3844                 if (bmcr & BMCR_ANENABLE) {
3845                         u32 phy1, phy2;
3846
3847                         /* Select shadow register 0x1f */
3848                         tg3_writephy(tp, 0x1c, 0x7c00);
3849                         tg3_readphy(tp, 0x1c, &phy1);
3850
3851                         /* Select expansion interrupt status register */
3852                         tg3_writephy(tp, 0x17, 0x0f01);
3853                         tg3_readphy(tp, 0x15, &phy2);
3854                         tg3_readphy(tp, 0x15, &phy2);
3855
3856                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3857                                 /* We have signal detect and not receiving
3858                                  * config code words, link is up by parallel
3859                                  * detection.
3860                                  */
3861
3862                                 bmcr &= ~BMCR_ANENABLE;
3863                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3864                                 tg3_writephy(tp, MII_BMCR, bmcr);
3865                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3866                         }
3867                 }
3868         }
3869         else if (netif_carrier_ok(tp->dev) &&
3870                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3871                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3872                 u32 phy2;
3873
3874                 /* Select expansion interrupt status register */
3875                 tg3_writephy(tp, 0x17, 0x0f01);
3876                 tg3_readphy(tp, 0x15, &phy2);
3877                 if (phy2 & 0x20) {
3878                         u32 bmcr;
3879
3880                         /* Config code words received, turn on autoneg. */
3881                         tg3_readphy(tp, MII_BMCR, &bmcr);
3882                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3883
3884                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3885
3886                 }
3887         }
3888 }
3889
3890 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3891 {
3892         int err;
3893
3894         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3895                 err = tg3_setup_fiber_phy(tp, force_reset);
3896         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3897                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3898         } else {
3899                 err = tg3_setup_copper_phy(tp, force_reset);
3900         }
3901
3902         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3903                 u32 val, scale;
3904
3905                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3906                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3907                         scale = 65;
3908                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3909                         scale = 6;
3910                 else
3911                         scale = 12;
3912
3913                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3914                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3915                 tw32(GRC_MISC_CFG, val);
3916         }
3917
3918         if (tp->link_config.active_speed == SPEED_1000 &&
3919             tp->link_config.active_duplex == DUPLEX_HALF)
3920                 tw32(MAC_TX_LENGTHS,
3921                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3922                       (6 << TX_LENGTHS_IPG_SHIFT) |
3923                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3924         else
3925                 tw32(MAC_TX_LENGTHS,
3926                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3927                       (6 << TX_LENGTHS_IPG_SHIFT) |
3928                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3929
3930         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3931                 if (netif_carrier_ok(tp->dev)) {
3932                         tw32(HOSTCC_STAT_COAL_TICKS,
3933                              tp->coal.stats_block_coalesce_usecs);
3934                 } else {
3935                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3936                 }
3937         }
3938
3939         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3940                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3941                 if (!netif_carrier_ok(tp->dev))
3942                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3943                               tp->pwrmgmt_thresh;
3944                 else
3945                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3946                 tw32(PCIE_PWR_MGMT_THRESH, val);
3947         }
3948
3949         return err;
3950 }
3951
3952 /* This is called whenever we suspect that the system chipset is re-
3953  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3954  * is bogus tx completions. We try to recover by setting the
3955  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3956  * in the workqueue.
3957  */
3958 static void tg3_tx_recover(struct tg3 *tp)
3959 {
3960         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3961                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3962
3963         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3964                "mapped I/O cycles to the network device, attempting to "
3965                "recover. Please report the problem to the driver maintainer "
3966                "and include system chipset information.\n", tp->dev->name);
3967
3968         spin_lock(&tp->lock);
3969         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3970         spin_unlock(&tp->lock);
3971 }
3972
3973 static inline u32 tg3_tx_avail(struct tg3 *tp)
3974 {
3975         smp_mb();
3976         return (tp->tx_pending -
3977                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3978 }
3979
3980 /* Tigon3 never reports partial packet sends.  So we do not
3981  * need special logic to handle SKBs that have not had all
3982  * of their frags sent yet, like SunGEM does.
3983  */
3984 static void tg3_tx(struct tg3 *tp)
3985 {
3986         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3987         u32 sw_idx = tp->tx_cons;
3988
3989         while (sw_idx != hw_idx) {
3990                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3991                 struct sk_buff *skb = ri->skb;
3992                 int i, tx_bug = 0;
3993
3994                 if (unlikely(skb == NULL)) {
3995                         tg3_tx_recover(tp);
3996                         return;
3997                 }
3998
3999                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4000
4001                 ri->skb = NULL;
4002
4003                 sw_idx = NEXT_TX(sw_idx);
4004
4005                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4006                         ri = &tp->tx_buffers[sw_idx];
4007                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4008                                 tx_bug = 1;
4009                         sw_idx = NEXT_TX(sw_idx);
4010                 }
4011
4012                 dev_kfree_skb(skb);
4013
4014                 if (unlikely(tx_bug)) {
4015                         tg3_tx_recover(tp);
4016                         return;
4017                 }
4018         }
4019
4020         tp->tx_cons = sw_idx;
4021
4022         /* Need to make the tx_cons update visible to tg3_start_xmit()
4023          * before checking for netif_queue_stopped().  Without the
4024          * memory barrier, there is a small possibility that tg3_start_xmit()
4025          * will miss it and cause the queue to be stopped forever.
4026          */
4027         smp_mb();
4028
4029         if (unlikely(netif_queue_stopped(tp->dev) &&
4030                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4031                 netif_tx_lock(tp->dev);
4032                 if (netif_queue_stopped(tp->dev) &&
4033                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4034                         netif_wake_queue(tp->dev);
4035                 netif_tx_unlock(tp->dev);
4036         }
4037 }
4038
4039 /* Returns size of skb allocated or < 0 on error.
4040  *
4041  * We only need to fill in the address because the other members
4042  * of the RX descriptor are invariant, see tg3_init_rings.
4043  *
4044  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4045  * posting buffers we only dirty the first cache line of the RX
4046  * descriptor (containing the address).  Whereas for the RX status
4047  * buffers the cpu only reads the last cacheline of the RX descriptor
4048  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4049  */
4050 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4051                             int src_idx, u32 dest_idx_unmasked)
4052 {
4053         struct tg3_rx_buffer_desc *desc;
4054         struct ring_info *map, *src_map;
4055         struct sk_buff *skb;
4056         dma_addr_t mapping;
4057         int skb_size, dest_idx;
4058
4059         src_map = NULL;
4060         switch (opaque_key) {
4061         case RXD_OPAQUE_RING_STD:
4062                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4063                 desc = &tp->rx_std[dest_idx];
4064                 map = &tp->rx_std_buffers[dest_idx];
4065                 if (src_idx >= 0)
4066                         src_map = &tp->rx_std_buffers[src_idx];
4067                 skb_size = tp->rx_pkt_buf_sz;
4068                 break;
4069
4070         case RXD_OPAQUE_RING_JUMBO:
4071                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4072                 desc = &tp->rx_jumbo[dest_idx];
4073                 map = &tp->rx_jumbo_buffers[dest_idx];
4074                 if (src_idx >= 0)
4075                         src_map = &tp->rx_jumbo_buffers[src_idx];
4076                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4077                 break;
4078
4079         default:
4080                 return -EINVAL;
4081         }
4082
4083         /* Do not overwrite any of the map or rp information
4084          * until we are sure we can commit to a new buffer.
4085          *
4086          * Callers depend upon this behavior and assume that
4087          * we leave everything unchanged if we fail.
4088          */
4089         skb = netdev_alloc_skb(tp->dev, skb_size);
4090         if (skb == NULL)
4091                 return -ENOMEM;
4092
4093         skb_reserve(skb, tp->rx_offset);
4094
4095         mapping = pci_map_single(tp->pdev, skb->data,
4096                                  skb_size - tp->rx_offset,
4097                                  PCI_DMA_FROMDEVICE);
4098
4099         map->skb = skb;
4100         pci_unmap_addr_set(map, mapping, mapping);
4101
4102         if (src_map != NULL)
4103                 src_map->skb = NULL;
4104
4105         desc->addr_hi = ((u64)mapping >> 32);
4106         desc->addr_lo = ((u64)mapping & 0xffffffff);
4107
4108         return skb_size;
4109 }
4110
4111 /* We only need to move over in the address because the other
4112  * members of the RX descriptor are invariant.  See notes above
4113  * tg3_alloc_rx_skb for full details.
4114  */
4115 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4116                            int src_idx, u32 dest_idx_unmasked)
4117 {
4118         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4119         struct ring_info *src_map, *dest_map;
4120         int dest_idx;
4121
4122         switch (opaque_key) {
4123         case RXD_OPAQUE_RING_STD:
4124                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4125                 dest_desc = &tp->rx_std[dest_idx];
4126                 dest_map = &tp->rx_std_buffers[dest_idx];
4127                 src_desc = &tp->rx_std[src_idx];
4128                 src_map = &tp->rx_std_buffers[src_idx];
4129                 break;
4130
4131         case RXD_OPAQUE_RING_JUMBO:
4132                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4133                 dest_desc = &tp->rx_jumbo[dest_idx];
4134                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4135                 src_desc = &tp->rx_jumbo[src_idx];
4136                 src_map = &tp->rx_jumbo_buffers[src_idx];
4137                 break;
4138
4139         default:
4140                 return;
4141         }
4142
4143         dest_map->skb = src_map->skb;
4144         pci_unmap_addr_set(dest_map, mapping,
4145                            pci_unmap_addr(src_map, mapping));
4146         dest_desc->addr_hi = src_desc->addr_hi;
4147         dest_desc->addr_lo = src_desc->addr_lo;
4148
4149         src_map->skb = NULL;
4150 }
4151
4152 #if TG3_VLAN_TAG_USED
4153 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4154 {
4155         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4156 }
4157 #endif
4158
4159 /* The RX ring scheme is composed of multiple rings which post fresh
4160  * buffers to the chip, and one special ring the chip uses to report
4161  * status back to the host.
4162  *
4163  * The special ring reports the status of received packets to the
4164  * host.  The chip does not write into the original descriptor the
4165  * RX buffer was obtained from.  The chip simply takes the original
4166  * descriptor as provided by the host, updates the status and length
4167  * field, then writes this into the next status ring entry.
4168  *
4169  * Each ring the host uses to post buffers to the chip is described
4170  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4171  * it is first placed into the on-chip ram.  When the packet's length
4172  * is known, it walks down the TG3_BDINFO entries to select the ring.
4173  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4174  * which is within the range of the new packet's length is chosen.
4175  *
4176  * The "separate ring for rx status" scheme may sound queer, but it makes
4177  * sense from a cache coherency perspective.  If only the host writes
4178  * to the buffer post rings, and only the chip writes to the rx status
4179  * rings, then cache lines never move beyond shared-modified state.
4180  * If both the host and chip were to write into the same ring, cache line
4181  * eviction could occur since both entities want it in an exclusive state.
4182  */
4183 static int tg3_rx(struct tg3 *tp, int budget)
4184 {
4185         u32 work_mask, rx_std_posted = 0;
4186         u32 sw_idx = tp->rx_rcb_ptr;
4187         u16 hw_idx;
4188         int received;
4189
4190         hw_idx = tp->hw_status->idx[0].rx_producer;
4191         /*
4192          * We need to order the read of hw_idx and the read of
4193          * the opaque cookie.
4194          */
4195         rmb();
4196         work_mask = 0;
4197         received = 0;
4198         while (sw_idx != hw_idx && budget > 0) {
4199                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4200                 unsigned int len;
4201                 struct sk_buff *skb;
4202                 dma_addr_t dma_addr;
4203                 u32 opaque_key, desc_idx, *post_ptr;
4204
4205                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4206                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4207                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4208                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4209                                                   mapping);
4210                         skb = tp->rx_std_buffers[desc_idx].skb;
4211                         post_ptr = &tp->rx_std_ptr;
4212                         rx_std_posted++;
4213                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4214                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4215                                                   mapping);
4216                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4217                         post_ptr = &tp->rx_jumbo_ptr;
4218                 }
4219                 else {
4220                         goto next_pkt_nopost;
4221                 }
4222
4223                 work_mask |= opaque_key;
4224
4225                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4226                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4227                 drop_it:
4228                         tg3_recycle_rx(tp, opaque_key,
4229                                        desc_idx, *post_ptr);
4230                 drop_it_no_recycle:
4231                         /* Other statistics kept track of by card. */
4232                         tp->net_stats.rx_dropped++;
4233                         goto next_pkt;
4234                 }
4235
4236                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4237                       ETH_FCS_LEN;
4238
4239                 if (len > RX_COPY_THRESHOLD
4240                         && tp->rx_offset == NET_IP_ALIGN
4241                         /* rx_offset will likely not equal NET_IP_ALIGN
4242                          * if this is a 5701 card running in PCI-X mode
4243                          * [see tg3_get_invariants()]
4244                          */
4245                 ) {
4246                         int skb_size;
4247
4248                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4249                                                     desc_idx, *post_ptr);
4250                         if (skb_size < 0)
4251                                 goto drop_it;
4252
4253                         pci_unmap_single(tp->pdev, dma_addr,
4254                                          skb_size - tp->rx_offset,
4255                                          PCI_DMA_FROMDEVICE);
4256
4257                         skb_put(skb, len);
4258                 } else {
4259                         struct sk_buff *copy_skb;
4260
4261                         tg3_recycle_rx(tp, opaque_key,
4262                                        desc_idx, *post_ptr);
4263
4264                         copy_skb = netdev_alloc_skb(tp->dev,
4265                                                     len + TG3_RAW_IP_ALIGN);
4266                         if (copy_skb == NULL)
4267                                 goto drop_it_no_recycle;
4268
4269                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4270                         skb_put(copy_skb, len);
4271                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4272                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4273                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4274
4275                         /* We'll reuse the original ring buffer. */
4276                         skb = copy_skb;
4277                 }
4278
4279                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4280                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4281                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4282                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4283                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4284                 else
4285                         skb->ip_summed = CHECKSUM_NONE;
4286
4287                 skb->protocol = eth_type_trans(skb, tp->dev);
4288 #if TG3_VLAN_TAG_USED
4289                 if (tp->vlgrp != NULL &&
4290                     desc->type_flags & RXD_FLAG_VLAN) {
4291                         tg3_vlan_rx(tp, skb,
4292                                     desc->err_vlan & RXD_VLAN_MASK);
4293                 } else
4294 #endif
4295                         netif_receive_skb(skb);
4296
4297                 received++;
4298                 budget--;
4299
4300 next_pkt:
4301                 (*post_ptr)++;
4302
4303                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4304                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4305
4306                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4307                                      TG3_64BIT_REG_LOW, idx);
4308                         work_mask &= ~RXD_OPAQUE_RING_STD;
4309                         rx_std_posted = 0;
4310                 }
4311 next_pkt_nopost:
4312                 sw_idx++;
4313                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4314
4315                 /* Refresh hw_idx to see if there is new work */
4316                 if (sw_idx == hw_idx) {
4317                         hw_idx = tp->hw_status->idx[0].rx_producer;
4318                         rmb();
4319                 }
4320         }
4321
4322         /* ACK the status ring. */
4323         tp->rx_rcb_ptr = sw_idx;
4324         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4325
4326         /* Refill RX ring(s). */
4327         if (work_mask & RXD_OPAQUE_RING_STD) {
4328                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4329                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4330                              sw_idx);
4331         }
4332         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4333                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4334                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4335                              sw_idx);
4336         }
4337         mmiowb();
4338
4339         return received;
4340 }
4341
4342 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4343 {
4344         struct tg3_hw_status *sblk = tp->hw_status;
4345
4346         /* handle link change and other phy events */
4347         if (!(tp->tg3_flags &
4348               (TG3_FLAG_USE_LINKCHG_REG |
4349                TG3_FLAG_POLL_SERDES))) {
4350                 if (sblk->status & SD_STATUS_LINK_CHG) {
4351                         sblk->status = SD_STATUS_UPDATED |
4352                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4353                         spin_lock(&tp->lock);
4354                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4355                                 tw32_f(MAC_STATUS,
4356                                      (MAC_STATUS_SYNC_CHANGED |
4357                                       MAC_STATUS_CFG_CHANGED |
4358                                       MAC_STATUS_MI_COMPLETION |
4359                                       MAC_STATUS_LNKSTATE_CHANGED));
4360                                 udelay(40);
4361                         } else
4362                                 tg3_setup_phy(tp, 0);
4363                         spin_unlock(&tp->lock);
4364                 }
4365         }
4366
4367         /* run TX completion thread */
4368         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4369                 tg3_tx(tp);
4370                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4371                         return work_done;
4372         }
4373
4374         /* run RX thread, within the bounds set by NAPI.
4375          * All RX "locking" is done by ensuring outside
4376          * code synchronizes with tg3->napi.poll()
4377          */
4378         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4379                 work_done += tg3_rx(tp, budget - work_done);
4380
4381         return work_done;
4382 }
4383
4384 static int tg3_poll(struct napi_struct *napi, int budget)
4385 {
4386         struct tg3 *tp = container_of(napi, struct tg3, napi);
4387         int work_done = 0;
4388         struct tg3_hw_status *sblk = tp->hw_status;
4389
4390         while (1) {
4391                 work_done = tg3_poll_work(tp, work_done, budget);
4392
4393                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4394                         goto tx_recovery;
4395
4396                 if (unlikely(work_done >= budget))
4397                         break;
4398
4399                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4400                         /* tp->last_tag is used in tg3_restart_ints() below
4401                          * to tell the hw how much work has been processed,
4402                          * so we must read it before checking for more work.
4403                          */
4404                         tp->last_tag = sblk->status_tag;
4405                         rmb();
4406                 } else
4407                         sblk->status &= ~SD_STATUS_UPDATED;
4408
4409                 if (likely(!tg3_has_work(tp))) {
4410                         netif_rx_complete(tp->dev, napi);
4411                         tg3_restart_ints(tp);
4412                         break;
4413                 }
4414         }
4415
4416         return work_done;
4417
4418 tx_recovery:
4419         /* work_done is guaranteed to be less than budget. */
4420         netif_rx_complete(tp->dev, napi);
4421         schedule_work(&tp->reset_task);
4422         return work_done;
4423 }
4424
4425 static void tg3_irq_quiesce(struct tg3 *tp)
4426 {
4427         BUG_ON(tp->irq_sync);
4428
4429         tp->irq_sync = 1;
4430         smp_mb();
4431
4432         synchronize_irq(tp->pdev->irq);
4433 }
4434
4435 static inline int tg3_irq_sync(struct tg3 *tp)
4436 {
4437         return tp->irq_sync;
4438 }
4439
4440 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4441  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4442  * with as well.  Most of the time, this is not necessary except when
4443  * shutting down the device.
4444  */
4445 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4446 {
4447         spin_lock_bh(&tp->lock);
4448         if (irq_sync)
4449                 tg3_irq_quiesce(tp);
4450 }
4451
4452 static inline void tg3_full_unlock(struct tg3 *tp)
4453 {
4454         spin_unlock_bh(&tp->lock);
4455 }
4456
4457 /* One-shot MSI handler - Chip automatically disables interrupt
4458  * after sending MSI so driver doesn't have to do it.
4459  */
4460 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4461 {
4462         struct net_device *dev = dev_id;
4463         struct tg3 *tp = netdev_priv(dev);
4464
4465         prefetch(tp->hw_status);
4466         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4467
4468         if (likely(!tg3_irq_sync(tp)))
4469                 netif_rx_schedule(dev, &tp->napi);
4470
4471         return IRQ_HANDLED;
4472 }
4473
4474 /* MSI ISR - No need to check for interrupt sharing and no need to
4475  * flush status block and interrupt mailbox. PCI ordering rules
4476  * guarantee that MSI will arrive after the status block.
4477  */
4478 static irqreturn_t tg3_msi(int irq, void *dev_id)
4479 {
4480         struct net_device *dev = dev_id;
4481         struct tg3 *tp = netdev_priv(dev);
4482
4483         prefetch(tp->hw_status);
4484         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4485         /*
4486          * Writing any value to intr-mbox-0 clears PCI INTA# and
4487          * chip-internal interrupt pending events.
4488          * Writing non-zero to intr-mbox-0 additional tells the
4489          * NIC to stop sending us irqs, engaging "in-intr-handler"
4490          * event coalescing.
4491          */
4492         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4493         if (likely(!tg3_irq_sync(tp)))
4494                 netif_rx_schedule(dev, &tp->napi);
4495
4496         return IRQ_RETVAL(1);
4497 }
4498
4499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4500 {
4501         struct net_device *dev = dev_id;
4502         struct tg3 *tp = netdev_priv(dev);
4503         struct tg3_hw_status *sblk = tp->hw_status;
4504         unsigned int handled = 1;
4505
4506         /* In INTx mode, it is possible for the interrupt to arrive at
4507          * the CPU before the status block posted prior to the interrupt.
4508          * Reading the PCI State register will confirm whether the
4509          * interrupt is ours and will flush the status block.
4510          */
4511         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4512                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4513                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4514                         handled = 0;
4515                         goto out;
4516                 }
4517         }
4518
4519         /*
4520          * Writing any value to intr-mbox-0 clears PCI INTA# and
4521          * chip-internal interrupt pending events.
4522          * Writing non-zero to intr-mbox-0 additional tells the
4523          * NIC to stop sending us irqs, engaging "in-intr-handler"
4524          * event coalescing.
4525          *
4526          * Flush the mailbox to de-assert the IRQ immediately to prevent
4527          * spurious interrupts.  The flush impacts performance but
4528          * excessive spurious interrupts can be worse in some cases.
4529          */
4530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4531         if (tg3_irq_sync(tp))
4532                 goto out;
4533         sblk->status &= ~SD_STATUS_UPDATED;
4534         if (likely(tg3_has_work(tp))) {
4535                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4536                 netif_rx_schedule(dev, &tp->napi);
4537         } else {
4538                 /* No work, shared interrupt perhaps?  re-enable
4539                  * interrupts, and flush that PCI write
4540                  */
4541                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4542                                0x00000000);
4543         }
4544 out:
4545         return IRQ_RETVAL(handled);
4546 }
4547
4548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4549 {
4550         struct net_device *dev = dev_id;
4551         struct tg3 *tp = netdev_priv(dev);
4552         struct tg3_hw_status *sblk = tp->hw_status;
4553         unsigned int handled = 1;
4554
4555         /* In INTx mode, it is possible for the interrupt to arrive at
4556          * the CPU before the status block posted prior to the interrupt.
4557          * Reading the PCI State register will confirm whether the
4558          * interrupt is ours and will flush the status block.
4559          */
4560         if (unlikely(sblk->status_tag == tp->last_tag)) {
4561                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4562                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4563                         handled = 0;
4564                         goto out;
4565                 }
4566         }
4567
4568         /*
4569          * writing any value to intr-mbox-0 clears PCI INTA# and
4570          * chip-internal interrupt pending events.
4571          * writing non-zero to intr-mbox-0 additional tells the
4572          * NIC to stop sending us irqs, engaging "in-intr-handler"
4573          * event coalescing.
4574          *
4575          * Flush the mailbox to de-assert the IRQ immediately to prevent
4576          * spurious interrupts.  The flush impacts performance but
4577          * excessive spurious interrupts can be worse in some cases.
4578          */
4579         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4580         if (tg3_irq_sync(tp))
4581                 goto out;
4582         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4583                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4584                 /* Update last_tag to mark that this status has been
4585                  * seen. Because interrupt may be shared, we may be
4586                  * racing with tg3_poll(), so only update last_tag
4587                  * if tg3_poll() is not scheduled.
4588                  */
4589                 tp->last_tag = sblk->status_tag;
4590                 __netif_rx_schedule(dev, &tp->napi);
4591         }
4592 out:
4593         return IRQ_RETVAL(handled);
4594 }
4595
4596 /* ISR for interrupt test */
4597 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4598 {
4599         struct net_device *dev = dev_id;
4600         struct tg3 *tp = netdev_priv(dev);
4601         struct tg3_hw_status *sblk = tp->hw_status;
4602
4603         if ((sblk->status & SD_STATUS_UPDATED) ||
4604             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4605                 tg3_disable_ints(tp);
4606                 return IRQ_RETVAL(1);
4607         }
4608         return IRQ_RETVAL(0);
4609 }
4610
4611 static int tg3_init_hw(struct tg3 *, int);
4612 static int tg3_halt(struct tg3 *, int, int);
4613
4614 /* Restart hardware after configuration changes, self-test, etc.
4615  * Invoked with tp->lock held.
4616  */
4617 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4618         __releases(tp->lock)
4619         __acquires(tp->lock)
4620 {
4621         int err;
4622
4623         err = tg3_init_hw(tp, reset_phy);
4624         if (err) {
4625                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4626                        "aborting.\n", tp->dev->name);
4627                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4628                 tg3_full_unlock(tp);
4629                 del_timer_sync(&tp->timer);
4630                 tp->irq_sync = 0;
4631                 napi_enable(&tp->napi);
4632                 dev_close(tp->dev);
4633                 tg3_full_lock(tp, 0);
4634         }
4635         return err;
4636 }
4637
4638 #ifdef CONFIG_NET_POLL_CONTROLLER
4639 static void tg3_poll_controller(struct net_device *dev)
4640 {
4641         struct tg3 *tp = netdev_priv(dev);
4642
4643         tg3_interrupt(tp->pdev->irq, dev);
4644 }
4645 #endif
4646
4647 static void tg3_reset_task(struct work_struct *work)
4648 {
4649         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4650         int err;
4651         unsigned int restart_timer;
4652
4653         tg3_full_lock(tp, 0);
4654
4655         if (!netif_running(tp->dev)) {
4656                 tg3_full_unlock(tp);
4657                 return;
4658         }
4659
4660         tg3_full_unlock(tp);
4661
4662         tg3_phy_stop(tp);
4663
4664         tg3_netif_stop(tp);
4665
4666         tg3_full_lock(tp, 1);
4667
4668         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4669         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4670
4671         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4672                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4673                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4674                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4675                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4676         }
4677
4678         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4679         err = tg3_init_hw(tp, 1);
4680         if (err)
4681                 goto out;
4682
4683         tg3_netif_start(tp);
4684
4685         if (restart_timer)
4686                 mod_timer(&tp->timer, jiffies + 1);
4687
4688 out:
4689         tg3_full_unlock(tp);
4690
4691         if (!err)
4692                 tg3_phy_start(tp);
4693 }
4694
4695 static void tg3_dump_short_state(struct tg3 *tp)
4696 {
4697         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4698                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4699         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4700                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4701 }
4702
4703 static void tg3_tx_timeout(struct net_device *dev)
4704 {
4705         struct tg3 *tp = netdev_priv(dev);
4706
4707         if (netif_msg_tx_err(tp)) {
4708                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4709                        dev->name);
4710                 tg3_dump_short_state(tp);
4711         }
4712
4713         schedule_work(&tp->reset_task);
4714 }
4715
4716 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4717 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4718 {
4719         u32 base = (u32) mapping & 0xffffffff;
4720
4721         return ((base > 0xffffdcc0) &&
4722                 (base + len + 8 < base));
4723 }
4724
4725 /* Test for DMA addresses > 40-bit */
4726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4727                                           int len)
4728 {
4729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4730         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4731                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4732         return 0;
4733 #else
4734         return 0;
4735 #endif
4736 }
4737
4738 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4739
4740 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4741 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4742                                        u32 last_plus_one, u32 *start,
4743                                        u32 base_flags, u32 mss)
4744 {
4745         struct sk_buff *new_skb;
4746         dma_addr_t new_addr = 0;
4747         u32 entry = *start;
4748         int i, ret = 0;
4749
4750         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4751                 new_skb = skb_copy(skb, GFP_ATOMIC);
4752         else {
4753                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4754
4755                 new_skb = skb_copy_expand(skb,
4756                                           skb_headroom(skb) + more_headroom,
4757                                           skb_tailroom(skb), GFP_ATOMIC);
4758         }
4759
4760         if (!new_skb) {
4761                 ret = -1;
4762         } else {
4763                 /* New SKB is guaranteed to be linear. */
4764                 entry = *start;
4765                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4766                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4767
4768                 /* Make sure new skb does not cross any 4G boundaries.
4769                  * Drop the packet if it does.
4770                  */
4771                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4772                         if (!ret)
4773                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4774                                               DMA_TO_DEVICE);
4775                         ret = -1;
4776                         dev_kfree_skb(new_skb);
4777                         new_skb = NULL;
4778                 } else {
4779                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4780                                     base_flags, 1 | (mss << 1));
4781                         *start = NEXT_TX(entry);
4782                 }
4783         }
4784
4785         /* Now clean up the sw ring entries. */
4786         i = 0;
4787         while (entry != last_plus_one) {
4788                 if (i == 0) {
4789                         tp->tx_buffers[entry].skb = new_skb;
4790                 } else {
4791                         tp->tx_buffers[entry].skb = NULL;
4792                 }
4793                 entry = NEXT_TX(entry);
4794                 i++;
4795         }
4796
4797         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4798         dev_kfree_skb(skb);
4799
4800         return ret;
4801 }
4802
4803 static void tg3_set_txd(struct tg3 *tp, int entry,
4804                         dma_addr_t mapping, int len, u32 flags,
4805                         u32 mss_and_is_end)
4806 {
4807         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4808         int is_end = (mss_and_is_end & 0x1);
4809         u32 mss = (mss_and_is_end >> 1);
4810         u32 vlan_tag = 0;
4811
4812         if (is_end)
4813                 flags |= TXD_FLAG_END;
4814         if (flags & TXD_FLAG_VLAN) {
4815                 vlan_tag = flags >> 16;
4816                 flags &= 0xffff;
4817         }
4818         vlan_tag |= (mss << TXD_MSS_SHIFT);
4819
4820         txd->addr_hi = ((u64) mapping >> 32);
4821         txd->addr_lo = ((u64) mapping & 0xffffffff);
4822         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4823         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4824 }
4825
4826 /* hard_start_xmit for devices that don't have any bugs and
4827  * support TG3_FLG2_HW_TSO_2 only.
4828  */
4829 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4830 {
4831         struct tg3 *tp = netdev_priv(dev);
4832         u32 len, entry, base_flags, mss;
4833         struct skb_shared_info *sp;
4834         dma_addr_t mapping;
4835
4836         len = skb_headlen(skb);
4837
4838         /* We are running in BH disabled context with netif_tx_lock
4839          * and TX reclaim runs via tp->napi.poll inside of a software
4840          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4841          * no IRQ context deadlocks to worry about either.  Rejoice!
4842          */
4843         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4844                 if (!netif_queue_stopped(dev)) {
4845                         netif_stop_queue(dev);
4846
4847                         /* This is a hard error, log it. */
4848                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4849                                "queue awake!\n", dev->name);
4850                 }
4851                 return NETDEV_TX_BUSY;
4852         }
4853
4854         entry = tp->tx_prod;
4855         base_flags = 0;
4856         mss = 0;
4857         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4858                 int tcp_opt_len, ip_tcp_len;
4859
4860                 if (skb_header_cloned(skb) &&
4861                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4862                         dev_kfree_skb(skb);
4863                         goto out_unlock;
4864                 }
4865
4866                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4867                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4868                 else {
4869                         struct iphdr *iph = ip_hdr(skb);
4870
4871                         tcp_opt_len = tcp_optlen(skb);
4872                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4873
4874                         iph->check = 0;
4875                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4876                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4877                 }
4878
4879                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4880                                TXD_FLAG_CPU_POST_DMA);
4881
4882                 tcp_hdr(skb)->check = 0;
4883
4884         }
4885         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4886                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4887 #if TG3_VLAN_TAG_USED
4888         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4889                 base_flags |= (TXD_FLAG_VLAN |
4890                                (vlan_tx_tag_get(skb) << 16));
4891 #endif
4892
4893         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4894                 dev_kfree_skb(skb);
4895                 goto out_unlock;
4896         }
4897
4898         sp = skb_shinfo(skb);
4899
4900         mapping = sp->dma_maps[0];
4901
4902         tp->tx_buffers[entry].skb = skb;
4903
4904         tg3_set_txd(tp, entry, mapping, len, base_flags,
4905                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4906
4907         entry = NEXT_TX(entry);
4908
4909         /* Now loop through additional data fragments, and queue them. */
4910         if (skb_shinfo(skb)->nr_frags > 0) {
4911                 unsigned int i, last;
4912
4913                 last = skb_shinfo(skb)->nr_frags - 1;
4914                 for (i = 0; i <= last; i++) {
4915                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4916
4917                         len = frag->size;
4918                         mapping = sp->dma_maps[i + 1];
4919                         tp->tx_buffers[entry].skb = NULL;
4920
4921                         tg3_set_txd(tp, entry, mapping, len,
4922                                     base_flags, (i == last) | (mss << 1));
4923
4924                         entry = NEXT_TX(entry);
4925                 }
4926         }
4927
4928         /* Packets are ready, update Tx producer idx local and on card. */
4929         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4930
4931         tp->tx_prod = entry;
4932         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4933                 netif_stop_queue(dev);
4934                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4935                         netif_wake_queue(tp->dev);
4936         }
4937
4938 out_unlock:
4939         mmiowb();
4940
4941         dev->trans_start = jiffies;
4942
4943         return NETDEV_TX_OK;
4944 }
4945
4946 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4947
4948 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4949  * TSO header is greater than 80 bytes.
4950  */
4951 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4952 {
4953         struct sk_buff *segs, *nskb;
4954
4955         /* Estimate the number of fragments in the worst case */
4956         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4957                 netif_stop_queue(tp->dev);
4958                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4959                         return NETDEV_TX_BUSY;
4960
4961                 netif_wake_queue(tp->dev);
4962         }
4963
4964         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4965         if (IS_ERR(segs))
4966                 goto tg3_tso_bug_end;
4967
4968         do {
4969                 nskb = segs;
4970                 segs = segs->next;
4971                 nskb->next = NULL;
4972                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4973         } while (segs);
4974
4975 tg3_tso_bug_end:
4976         dev_kfree_skb(skb);
4977
4978         return NETDEV_TX_OK;
4979 }
4980
4981 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4982  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4983  */
4984 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4985 {
4986         struct tg3 *tp = netdev_priv(dev);
4987         u32 len, entry, base_flags, mss;
4988         struct skb_shared_info *sp;
4989         int would_hit_hwbug;
4990         dma_addr_t mapping;
4991
4992         len = skb_headlen(skb);
4993
4994         /* We are running in BH disabled context with netif_tx_lock
4995          * and TX reclaim runs via tp->napi.poll inside of a software
4996          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4997          * no IRQ context deadlocks to worry about either.  Rejoice!
4998          */
4999         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5000                 if (!netif_queue_stopped(dev)) {
5001                         netif_stop_queue(dev);
5002
5003                         /* This is a hard error, log it. */
5004                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5005                                "queue awake!\n", dev->name);
5006                 }
5007                 return NETDEV_TX_BUSY;
5008         }
5009
5010         entry = tp->tx_prod;
5011         base_flags = 0;
5012         if (skb->ip_summed == CHECKSUM_PARTIAL)
5013                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5014         mss = 0;
5015         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5016                 struct iphdr *iph;
5017                 int tcp_opt_len, ip_tcp_len, hdr_len;
5018
5019                 if (skb_header_cloned(skb) &&
5020                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5021                         dev_kfree_skb(skb);
5022                         goto out_unlock;
5023                 }
5024
5025                 tcp_opt_len = tcp_optlen(skb);
5026                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5027
5028                 hdr_len = ip_tcp_len + tcp_opt_len;
5029                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5030                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5031                         return (tg3_tso_bug(tp, skb));
5032
5033                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5034                                TXD_FLAG_CPU_POST_DMA);
5035
5036                 iph = ip_hdr(skb);
5037                 iph->check = 0;
5038                 iph->tot_len = htons(mss + hdr_len);
5039                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5040                         tcp_hdr(skb)->check = 0;
5041                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5042                 } else
5043                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5044                                                                  iph->daddr, 0,
5045                                                                  IPPROTO_TCP,
5046                                                                  0);
5047
5048                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5049                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5050                         if (tcp_opt_len || iph->ihl > 5) {
5051                                 int tsflags;
5052
5053                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5054                                 mss |= (tsflags << 11);
5055                         }
5056                 } else {
5057                         if (tcp_opt_len || iph->ihl > 5) {
5058                                 int tsflags;
5059
5060                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5061                                 base_flags |= tsflags << 12;
5062                         }
5063                 }
5064         }
5065 #if TG3_VLAN_TAG_USED
5066         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5067                 base_flags |= (TXD_FLAG_VLAN |
5068                                (vlan_tx_tag_get(skb) << 16));
5069 #endif
5070
5071         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5072                 dev_kfree_skb(skb);
5073                 goto out_unlock;
5074         }
5075
5076         sp = skb_shinfo(skb);
5077
5078         mapping = sp->dma_maps[0];
5079
5080         tp->tx_buffers[entry].skb = skb;
5081
5082         would_hit_hwbug = 0;
5083
5084         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5085                 would_hit_hwbug = 1;
5086         else if (tg3_4g_overflow_test(mapping, len))
5087                 would_hit_hwbug = 1;
5088
5089         tg3_set_txd(tp, entry, mapping, len, base_flags,
5090                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5091
5092         entry = NEXT_TX(entry);
5093
5094         /* Now loop through additional data fragments, and queue them. */
5095         if (skb_shinfo(skb)->nr_frags > 0) {
5096                 unsigned int i, last;
5097
5098                 last = skb_shinfo(skb)->nr_frags - 1;
5099                 for (i = 0; i <= last; i++) {
5100                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5101
5102                         len = frag->size;
5103                         mapping = sp->dma_maps[i + 1];
5104
5105                         tp->tx_buffers[entry].skb = NULL;
5106
5107                         if (tg3_4g_overflow_test(mapping, len))
5108                                 would_hit_hwbug = 1;
5109
5110                         if (tg3_40bit_overflow_test(tp, mapping, len))
5111                                 would_hit_hwbug = 1;
5112
5113                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5114                                 tg3_set_txd(tp, entry, mapping, len,
5115                                             base_flags, (i == last)|(mss << 1));
5116                         else
5117                                 tg3_set_txd(tp, entry, mapping, len,
5118                                             base_flags, (i == last));
5119
5120                         entry = NEXT_TX(entry);
5121                 }
5122         }
5123
5124         if (would_hit_hwbug) {
5125                 u32 last_plus_one = entry;
5126                 u32 start;
5127
5128                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5129                 start &= (TG3_TX_RING_SIZE - 1);
5130
5131                 /* If the workaround fails due to memory/mapping
5132                  * failure, silently drop this packet.
5133                  */
5134                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5135                                                 &start, base_flags, mss))
5136                         goto out_unlock;
5137
5138                 entry = start;
5139         }
5140
5141         /* Packets are ready, update Tx producer idx local and on card. */
5142         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5143
5144         tp->tx_prod = entry;
5145         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5146                 netif_stop_queue(dev);
5147                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5148                         netif_wake_queue(tp->dev);
5149         }
5150
5151 out_unlock:
5152         mmiowb();
5153
5154         dev->trans_start = jiffies;
5155
5156         return NETDEV_TX_OK;
5157 }
5158
5159 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5160                                int new_mtu)
5161 {
5162         dev->mtu = new_mtu;
5163
5164         if (new_mtu > ETH_DATA_LEN) {
5165                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5166                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5167                         ethtool_op_set_tso(dev, 0);
5168                 }
5169                 else
5170                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5171         } else {
5172                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5173                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5174                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5175         }
5176 }
5177
5178 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5179 {
5180         struct tg3 *tp = netdev_priv(dev);
5181         int err;
5182
5183         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5184                 return -EINVAL;
5185
5186         if (!netif_running(dev)) {
5187                 /* We'll just catch it later when the
5188                  * device is up'd.
5189                  */
5190                 tg3_set_mtu(dev, tp, new_mtu);
5191                 return 0;
5192         }
5193
5194         tg3_phy_stop(tp);
5195
5196         tg3_netif_stop(tp);
5197
5198         tg3_full_lock(tp, 1);
5199
5200         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5201
5202         tg3_set_mtu(dev, tp, new_mtu);
5203
5204         err = tg3_restart_hw(tp, 0);
5205
5206         if (!err)
5207                 tg3_netif_start(tp);
5208
5209         tg3_full_unlock(tp);
5210
5211         if (!err)
5212                 tg3_phy_start(tp);
5213
5214         return err;
5215 }
5216
5217 /* Free up pending packets in all rx/tx rings.
5218  *
5219  * The chip has been shut down and the driver detached from
5220  * the networking, so no interrupts or new tx packets will
5221  * end up in the driver.  tp->{tx,}lock is not held and we are not
5222  * in an interrupt context and thus may sleep.
5223  */
5224 static void tg3_free_rings(struct tg3 *tp)
5225 {
5226         struct ring_info *rxp;
5227         int i;
5228
5229         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5230                 rxp = &tp->rx_std_buffers[i];
5231
5232                 if (rxp->skb == NULL)
5233                         continue;
5234                 pci_unmap_single(tp->pdev,
5235                                  pci_unmap_addr(rxp, mapping),
5236                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5237                                  PCI_DMA_FROMDEVICE);
5238                 dev_kfree_skb_any(rxp->skb);
5239                 rxp->skb = NULL;
5240         }
5241
5242         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5243                 rxp = &tp->rx_jumbo_buffers[i];
5244
5245                 if (rxp->skb == NULL)
5246                         continue;
5247                 pci_unmap_single(tp->pdev,
5248                                  pci_unmap_addr(rxp, mapping),
5249                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5250                                  PCI_DMA_FROMDEVICE);
5251                 dev_kfree_skb_any(rxp->skb);
5252                 rxp->skb = NULL;
5253         }
5254
5255         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5256                 struct tx_ring_info *txp;
5257                 struct sk_buff *skb;
5258
5259                 txp = &tp->tx_buffers[i];
5260                 skb = txp->skb;
5261
5262                 if (skb == NULL) {
5263                         i++;
5264                         continue;
5265                 }
5266
5267                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5268
5269                 txp->skb = NULL;
5270
5271                 i += skb_shinfo(skb)->nr_frags + 1;
5272
5273                 dev_kfree_skb_any(skb);
5274         }
5275 }
5276
5277 /* Initialize tx/rx rings for packet processing.
5278  *
5279  * The chip has been shut down and the driver detached from
5280  * the networking, so no interrupts or new tx packets will
5281  * end up in the driver.  tp->{tx,}lock are held and thus
5282  * we may not sleep.
5283  */
5284 static int tg3_init_rings(struct tg3 *tp)
5285 {
5286         u32 i;
5287
5288         /* Free up all the SKBs. */
5289         tg3_free_rings(tp);
5290
5291         /* Zero out all descriptors. */
5292         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5293         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5294         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5295         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5296
5297         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5298         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5299             (tp->dev->mtu > ETH_DATA_LEN))
5300                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5301
5302         /* Initialize invariants of the rings, we only set this
5303          * stuff once.  This works because the card does not
5304          * write into the rx buffer posting rings.
5305          */
5306         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5307                 struct tg3_rx_buffer_desc *rxd;
5308
5309                 rxd = &tp->rx_std[i];
5310                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5311                         << RXD_LEN_SHIFT;
5312                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5313                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5314                                (i << RXD_OPAQUE_INDEX_SHIFT));
5315         }
5316
5317         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5318                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5319                         struct tg3_rx_buffer_desc *rxd;
5320
5321                         rxd = &tp->rx_jumbo[i];
5322                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5323                                 << RXD_LEN_SHIFT;
5324                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5325                                 RXD_FLAG_JUMBO;
5326                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5327                                (i << RXD_OPAQUE_INDEX_SHIFT));
5328                 }
5329         }
5330
5331         /* Now allocate fresh SKBs for each rx ring. */
5332         for (i = 0; i < tp->rx_pending; i++) {
5333                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5334                         printk(KERN_WARNING PFX
5335                                "%s: Using a smaller RX standard ring, "
5336                                "only %d out of %d buffers were allocated "
5337                                "successfully.\n",
5338                                tp->dev->name, i, tp->rx_pending);
5339                         if (i == 0)
5340                                 return -ENOMEM;
5341                         tp->rx_pending = i;
5342                         break;
5343                 }
5344         }
5345
5346         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5347                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5348                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5349                                              -1, i) < 0) {
5350                                 printk(KERN_WARNING PFX
5351                                        "%s: Using a smaller RX jumbo ring, "
5352                                        "only %d out of %d buffers were "
5353                                        "allocated successfully.\n",
5354                                        tp->dev->name, i, tp->rx_jumbo_pending);
5355                                 if (i == 0) {
5356                                         tg3_free_rings(tp);
5357                                         return -ENOMEM;
5358                                 }
5359                                 tp->rx_jumbo_pending = i;
5360                                 break;
5361                         }
5362                 }
5363         }
5364         return 0;
5365 }
5366
5367 /*
5368  * Must not be invoked with interrupt sources disabled and
5369  * the hardware shutdown down.
5370  */
5371 static void tg3_free_consistent(struct tg3 *tp)
5372 {
5373         kfree(tp->rx_std_buffers);
5374         tp->rx_std_buffers = NULL;
5375         if (tp->rx_std) {
5376                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5377                                     tp->rx_std, tp->rx_std_mapping);
5378                 tp->rx_std = NULL;
5379         }
5380         if (tp->rx_jumbo) {
5381                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5382                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5383                 tp->rx_jumbo = NULL;
5384         }
5385         if (tp->rx_rcb) {
5386                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5387                                     tp->rx_rcb, tp->rx_rcb_mapping);
5388                 tp->rx_rcb = NULL;
5389         }
5390         if (tp->tx_ring) {
5391                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5392                         tp->tx_ring, tp->tx_desc_mapping);
5393                 tp->tx_ring = NULL;
5394         }
5395         if (tp->hw_status) {
5396                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5397                                     tp->hw_status, tp->status_mapping);
5398                 tp->hw_status = NULL;
5399         }
5400         if (tp->hw_stats) {
5401                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5402                                     tp->hw_stats, tp->stats_mapping);
5403                 tp->hw_stats = NULL;
5404         }
5405 }
5406
5407 /*
5408  * Must not be invoked with interrupt sources disabled and
5409  * the hardware shutdown down.  Can sleep.
5410  */
5411 static int tg3_alloc_consistent(struct tg3 *tp)
5412 {
5413         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5414                                       (TG3_RX_RING_SIZE +
5415                                        TG3_RX_JUMBO_RING_SIZE)) +
5416                                      (sizeof(struct tx_ring_info) *
5417                                       TG3_TX_RING_SIZE),
5418                                      GFP_KERNEL);
5419         if (!tp->rx_std_buffers)
5420                 return -ENOMEM;
5421
5422         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5423         tp->tx_buffers = (struct tx_ring_info *)
5424                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5425
5426         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5427                                           &tp->rx_std_mapping);
5428         if (!tp->rx_std)
5429                 goto err_out;
5430
5431         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5432                                             &tp->rx_jumbo_mapping);
5433
5434         if (!tp->rx_jumbo)
5435                 goto err_out;
5436
5437         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5438                                           &tp->rx_rcb_mapping);
5439         if (!tp->rx_rcb)
5440                 goto err_out;
5441
5442         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5443                                            &tp->tx_desc_mapping);
5444         if (!tp->tx_ring)
5445                 goto err_out;
5446
5447         tp->hw_status = pci_alloc_consistent(tp->pdev,
5448                                              TG3_HW_STATUS_SIZE,
5449                                              &tp->status_mapping);
5450         if (!tp->hw_status)
5451                 goto err_out;
5452
5453         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5454                                             sizeof(struct tg3_hw_stats),
5455                                             &tp->stats_mapping);
5456         if (!tp->hw_stats)
5457                 goto err_out;
5458
5459         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5460         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5461
5462         return 0;
5463
5464 err_out:
5465         tg3_free_consistent(tp);
5466         return -ENOMEM;
5467 }
5468
5469 #define MAX_WAIT_CNT 1000
5470
5471 /* To stop a block, clear the enable bit and poll till it
5472  * clears.  tp->lock is held.
5473  */
5474 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5475 {
5476         unsigned int i;
5477         u32 val;
5478
5479         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5480                 switch (ofs) {
5481                 case RCVLSC_MODE:
5482                 case DMAC_MODE:
5483                 case MBFREE_MODE:
5484                 case BUFMGR_MODE:
5485                 case MEMARB_MODE:
5486                         /* We can't enable/disable these bits of the
5487                          * 5705/5750, just say success.
5488                          */
5489                         return 0;
5490
5491                 default:
5492                         break;
5493                 }
5494         }
5495
5496         val = tr32(ofs);
5497         val &= ~enable_bit;
5498         tw32_f(ofs, val);
5499
5500         for (i = 0; i < MAX_WAIT_CNT; i++) {
5501                 udelay(100);
5502                 val = tr32(ofs);
5503                 if ((val & enable_bit) == 0)
5504                         break;
5505         }
5506
5507         if (i == MAX_WAIT_CNT && !silent) {
5508                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5509                        "ofs=%lx enable_bit=%x\n",
5510                        ofs, enable_bit);
5511                 return -ENODEV;
5512         }
5513
5514         return 0;
5515 }
5516
5517 /* tp->lock is held. */
5518 static int tg3_abort_hw(struct tg3 *tp, int silent)
5519 {
5520         int i, err;
5521
5522         tg3_disable_ints(tp);
5523
5524         tp->rx_mode &= ~RX_MODE_ENABLE;
5525         tw32_f(MAC_RX_MODE, tp->rx_mode);
5526         udelay(10);
5527
5528         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5529         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5530         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5531         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5532         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5533         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5534
5535         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5536         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5537         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5538         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5539         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5540         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5541         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5542
5543         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5544         tw32_f(MAC_MODE, tp->mac_mode);
5545         udelay(40);
5546
5547         tp->tx_mode &= ~TX_MODE_ENABLE;
5548         tw32_f(MAC_TX_MODE, tp->tx_mode);
5549
5550         for (i = 0; i < MAX_WAIT_CNT; i++) {
5551                 udelay(100);
5552                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5553                         break;
5554         }
5555         if (i >= MAX_WAIT_CNT) {
5556                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5557                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5558                        tp->dev->name, tr32(MAC_TX_MODE));
5559                 err |= -ENODEV;
5560         }
5561
5562         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5563         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5564         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5565
5566         tw32(FTQ_RESET, 0xffffffff);
5567         tw32(FTQ_RESET, 0x00000000);
5568
5569         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5570         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5571
5572         if (tp->hw_status)
5573                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5574         if (tp->hw_stats)
5575                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5576
5577         return err;
5578 }
5579
5580 /* tp->lock is held. */
5581 static int tg3_nvram_lock(struct tg3 *tp)
5582 {
5583         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5584                 int i;
5585
5586                 if (tp->nvram_lock_cnt == 0) {
5587                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5588                         for (i = 0; i < 8000; i++) {
5589                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5590                                         break;
5591                                 udelay(20);
5592                         }
5593                         if (i == 8000) {
5594                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5595                                 return -ENODEV;
5596                         }
5597                 }
5598                 tp->nvram_lock_cnt++;
5599         }
5600         return 0;
5601 }
5602
5603 /* tp->lock is held. */
5604 static void tg3_nvram_unlock(struct tg3 *tp)
5605 {
5606         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5607                 if (tp->nvram_lock_cnt > 0)
5608                         tp->nvram_lock_cnt--;
5609                 if (tp->nvram_lock_cnt == 0)
5610                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5611         }
5612 }
5613
5614 /* tp->lock is held. */
5615 static void tg3_enable_nvram_access(struct tg3 *tp)
5616 {
5617         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5618             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5619                 u32 nvaccess = tr32(NVRAM_ACCESS);
5620
5621                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5622         }
5623 }
5624
5625 /* tp->lock is held. */
5626 static void tg3_disable_nvram_access(struct tg3 *tp)
5627 {
5628         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5629             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5630                 u32 nvaccess = tr32(NVRAM_ACCESS);
5631
5632                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5633         }
5634 }
5635
5636 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5637 {
5638         int i;
5639         u32 apedata;
5640
5641         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5642         if (apedata != APE_SEG_SIG_MAGIC)
5643                 return;
5644
5645         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5646         if (!(apedata & APE_FW_STATUS_READY))
5647                 return;
5648
5649         /* Wait for up to 1 millisecond for APE to service previous event. */
5650         for (i = 0; i < 10; i++) {
5651                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5652                         return;
5653
5654                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5655
5656                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5657                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5658                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5659
5660                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5661
5662                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5663                         break;
5664
5665                 udelay(100);
5666         }
5667
5668         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5669                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5670 }
5671
5672 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5673 {
5674         u32 event;
5675         u32 apedata;
5676
5677         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5678                 return;
5679
5680         switch (kind) {
5681                 case RESET_KIND_INIT:
5682                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5683                                         APE_HOST_SEG_SIG_MAGIC);
5684                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5685                                         APE_HOST_SEG_LEN_MAGIC);
5686                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5687                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5688                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5689                                         APE_HOST_DRIVER_ID_MAGIC);
5690                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5691                                         APE_HOST_BEHAV_NO_PHYLOCK);
5692
5693                         event = APE_EVENT_STATUS_STATE_START;
5694                         break;
5695                 case RESET_KIND_SHUTDOWN:
5696                         /* With the interface we are currently using,
5697                          * APE does not track driver state.  Wiping
5698                          * out the HOST SEGMENT SIGNATURE forces
5699                          * the APE to assume OS absent status.
5700                          */
5701                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5702
5703                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5704                         break;
5705                 case RESET_KIND_SUSPEND:
5706                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5707                         break;
5708                 default:
5709                         return;
5710         }
5711
5712         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5713
5714         tg3_ape_send_event(tp, event);
5715 }
5716
5717 /* tp->lock is held. */
5718 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5719 {
5720         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5721                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5722
5723         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5724                 switch (kind) {
5725                 case RESET_KIND_INIT:
5726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5727                                       DRV_STATE_START);
5728                         break;
5729
5730                 case RESET_KIND_SHUTDOWN:
5731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5732                                       DRV_STATE_UNLOAD);
5733                         break;
5734
5735                 case RESET_KIND_SUSPEND:
5736                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5737                                       DRV_STATE_SUSPEND);
5738                         break;
5739
5740                 default:
5741                         break;
5742                 }
5743         }
5744
5745         if (kind == RESET_KIND_INIT ||
5746             kind == RESET_KIND_SUSPEND)
5747                 tg3_ape_driver_state_change(tp, kind);
5748 }
5749
5750 /* tp->lock is held. */
5751 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5752 {
5753         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5754                 switch (kind) {
5755                 case RESET_KIND_INIT:
5756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5757                                       DRV_STATE_START_DONE);
5758                         break;
5759
5760                 case RESET_KIND_SHUTDOWN:
5761                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5762                                       DRV_STATE_UNLOAD_DONE);
5763                         break;
5764
5765                 default:
5766                         break;
5767                 }
5768         }
5769
5770         if (kind == RESET_KIND_SHUTDOWN)
5771                 tg3_ape_driver_state_change(tp, kind);
5772 }
5773
5774 /* tp->lock is held. */
5775 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5776 {
5777         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5778                 switch (kind) {
5779                 case RESET_KIND_INIT:
5780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781                                       DRV_STATE_START);
5782                         break;
5783
5784                 case RESET_KIND_SHUTDOWN:
5785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786                                       DRV_STATE_UNLOAD);
5787                         break;
5788
5789                 case RESET_KIND_SUSPEND:
5790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5791                                       DRV_STATE_SUSPEND);
5792                         break;
5793
5794                 default:
5795                         break;
5796                 }
5797         }
5798 }
5799
5800 static int tg3_poll_fw(struct tg3 *tp)
5801 {
5802         int i;
5803         u32 val;
5804
5805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5806                 /* Wait up to 20ms for init done. */
5807                 for (i = 0; i < 200; i++) {
5808                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5809                                 return 0;
5810                         udelay(100);
5811                 }
5812                 return -ENODEV;
5813         }
5814
5815         /* Wait for firmware initialization to complete. */
5816         for (i = 0; i < 100000; i++) {
5817                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5818                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5819                         break;
5820                 udelay(10);
5821         }
5822
5823         /* Chip might not be fitted with firmware.  Some Sun onboard
5824          * parts are configured like that.  So don't signal the timeout
5825          * of the above loop as an error, but do report the lack of
5826          * running firmware once.
5827          */
5828         if (i >= 100000 &&
5829             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5830                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5831
5832                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5833                        tp->dev->name);
5834         }
5835
5836         return 0;
5837 }
5838
5839 /* Save PCI command register before chip reset */
5840 static void tg3_save_pci_state(struct tg3 *tp)
5841 {
5842         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5843 }
5844
5845 /* Restore PCI state after chip reset */
5846 static void tg3_restore_pci_state(struct tg3 *tp)
5847 {
5848         u32 val;
5849
5850         /* Re-enable indirect register accesses. */
5851         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5852                                tp->misc_host_ctrl);
5853
5854         /* Set MAX PCI retry to zero. */
5855         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5856         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5857             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5858                 val |= PCISTATE_RETRY_SAME_DMA;
5859         /* Allow reads and writes to the APE register and memory space. */
5860         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5861                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5862                        PCISTATE_ALLOW_APE_SHMEM_WR;
5863         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5864
5865         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5866
5867         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5868                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5869                         pcie_set_readrq(tp->pdev, 4096);
5870                 else {
5871                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5872                                               tp->pci_cacheline_sz);
5873                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5874                                               tp->pci_lat_timer);
5875                 }
5876         }
5877
5878         /* Make sure PCI-X relaxed ordering bit is clear. */
5879         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5880                 u16 pcix_cmd;
5881
5882                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5883                                      &pcix_cmd);
5884                 pcix_cmd &= ~PCI_X_CMD_ERO;
5885                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5886                                       pcix_cmd);
5887         }
5888
5889         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5890
5891                 /* Chip reset on 5780 will reset MSI enable bit,
5892                  * so need to restore it.
5893                  */
5894                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5895                         u16 ctrl;
5896
5897                         pci_read_config_word(tp->pdev,
5898                                              tp->msi_cap + PCI_MSI_FLAGS,
5899                                              &ctrl);
5900                         pci_write_config_word(tp->pdev,
5901                                               tp->msi_cap + PCI_MSI_FLAGS,
5902                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5903                         val = tr32(MSGINT_MODE);
5904                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5905                 }
5906         }
5907 }
5908
5909 static void tg3_stop_fw(struct tg3 *);
5910
5911 /* tp->lock is held. */
5912 static int tg3_chip_reset(struct tg3 *tp)
5913 {
5914         u32 val;
5915         void (*write_op)(struct tg3 *, u32, u32);
5916         int err;
5917
5918         tg3_nvram_lock(tp);
5919
5920         tg3_mdio_stop(tp);
5921
5922         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5923
5924         /* No matching tg3_nvram_unlock() after this because
5925          * chip reset below will undo the nvram lock.
5926          */
5927         tp->nvram_lock_cnt = 0;
5928
5929         /* GRC_MISC_CFG core clock reset will clear the memory
5930          * enable bit in PCI register 4 and the MSI enable bit
5931          * on some chips, so we save relevant registers here.
5932          */
5933         tg3_save_pci_state(tp);
5934
5935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5941                 tw32(GRC_FASTBOOT_PC, 0);
5942
5943         /*
5944          * We must avoid the readl() that normally takes place.
5945          * It locks machines, causes machine checks, and other
5946          * fun things.  So, temporarily disable the 5701
5947          * hardware workaround, while we do the reset.
5948          */
5949         write_op = tp->write32;
5950         if (write_op == tg3_write_flush_reg32)
5951                 tp->write32 = tg3_write32;
5952
5953         /* Prevent the irq handler from reading or writing PCI registers
5954          * during chip reset when the memory enable bit in the PCI command
5955          * register may be cleared.  The chip does not generate interrupt
5956          * at this time, but the irq handler may still be called due to irq
5957          * sharing or irqpoll.
5958          */
5959         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5960         if (tp->hw_status) {
5961                 tp->hw_status->status = 0;
5962                 tp->hw_status->status_tag = 0;
5963         }
5964         tp->last_tag = 0;
5965         smp_mb();
5966         synchronize_irq(tp->pdev->irq);
5967
5968         /* do the reset */
5969         val = GRC_MISC_CFG_CORECLK_RESET;
5970
5971         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5972                 if (tr32(0x7e2c) == 0x60) {
5973                         tw32(0x7e2c, 0x20);
5974                 }
5975                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5976                         tw32(GRC_MISC_CFG, (1 << 29));
5977                         val |= (1 << 29);
5978                 }
5979         }
5980
5981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5982                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5983                 tw32(GRC_VCPU_EXT_CTRL,
5984                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5985         }
5986
5987         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5988                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5989         tw32(GRC_MISC_CFG, val);
5990
5991         /* restore 5701 hardware bug workaround write method */
5992         tp->write32 = write_op;
5993
5994         /* Unfortunately, we have to delay before the PCI read back.
5995          * Some 575X chips even will not respond to a PCI cfg access
5996          * when the reset command is given to the chip.
5997          *
5998          * How do these hardware designers expect things to work
5999          * properly if the PCI write is posted for a long period
6000          * of time?  It is always necessary to have some method by
6001          * which a register read back can occur to push the write
6002          * out which does the reset.
6003          *
6004          * For most tg3 variants the trick below was working.
6005          * Ho hum...
6006          */
6007         udelay(120);
6008
6009         /* Flush PCI posted writes.  The normal MMIO registers
6010          * are inaccessible at this time so this is the only
6011          * way to make this reliably (actually, this is no longer
6012          * the case, see above).  I tried to use indirect
6013          * register read/write but this upset some 5701 variants.
6014          */
6015         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6016
6017         udelay(120);
6018
6019         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6020                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6021                         int i;
6022                         u32 cfg_val;
6023
6024                         /* Wait for link training to complete.  */
6025                         for (i = 0; i < 5000; i++)
6026                                 udelay(100);
6027
6028                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6029                         pci_write_config_dword(tp->pdev, 0xc4,
6030                                                cfg_val | (1 << 15));
6031                 }
6032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6033                         /* Set PCIE max payload size and clear error status.  */
6034                         pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
6035         }
6036
6037         tg3_restore_pci_state(tp);
6038
6039         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6040
6041         val = 0;
6042         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6043                 val = tr32(MEMARB_MODE);
6044         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6045
6046         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6047                 tg3_stop_fw(tp);
6048                 tw32(0x5000, 0x400);
6049         }
6050
6051         tw32(GRC_MODE, tp->grc_mode);
6052
6053         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6054                 val = tr32(0xc4);
6055
6056                 tw32(0xc4, val | (1 << 15));
6057         }
6058
6059         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6060             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6061                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6062                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6063                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6064                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6065         }
6066
6067         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6068                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6069                 tw32_f(MAC_MODE, tp->mac_mode);
6070         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6071                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6072                 tw32_f(MAC_MODE, tp->mac_mode);
6073         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6074                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6075                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6076                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6077                 tw32_f(MAC_MODE, tp->mac_mode);
6078         } else
6079                 tw32_f(MAC_MODE, 0);
6080         udelay(40);
6081
6082         tg3_mdio_start(tp);
6083
6084         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6085
6086         err = tg3_poll_fw(tp);
6087         if (err)
6088                 return err;
6089
6090         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6091             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6092                 val = tr32(0x7c00);
6093
6094                 tw32(0x7c00, val | (1 << 25));
6095         }
6096
6097         /* Reprobe ASF enable state.  */
6098         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6099         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6100         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6101         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6102                 u32 nic_cfg;
6103
6104                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6105                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6106                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6107                         tp->last_event_jiffies = jiffies;
6108                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6109                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6110                 }
6111         }
6112
6113         return 0;
6114 }
6115
6116 /* tp->lock is held. */
6117 static void tg3_stop_fw(struct tg3 *tp)
6118 {
6119         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6120            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6121                 /* Wait for RX cpu to ACK the previous event. */
6122                 tg3_wait_for_event_ack(tp);
6123
6124                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6125
6126                 tg3_generate_fw_event(tp);
6127
6128                 /* Wait for RX cpu to ACK this event. */
6129                 tg3_wait_for_event_ack(tp);
6130         }
6131 }
6132
6133 /* tp->lock is held. */
6134 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6135 {
6136         int err;
6137
6138         tg3_stop_fw(tp);
6139
6140         tg3_write_sig_pre_reset(tp, kind);
6141
6142         tg3_abort_hw(tp, silent);
6143         err = tg3_chip_reset(tp);
6144
6145         tg3_write_sig_legacy(tp, kind);
6146         tg3_write_sig_post_reset(tp, kind);
6147
6148         if (err)
6149                 return err;
6150
6151         return 0;
6152 }
6153
6154 #define TG3_FW_RELEASE_MAJOR    0x0
6155 #define TG3_FW_RELASE_MINOR     0x0
6156 #define TG3_FW_RELEASE_FIX      0x0
6157 #define TG3_FW_START_ADDR       0x08000000
6158 #define TG3_FW_TEXT_ADDR        0x08000000
6159 #define TG3_FW_TEXT_LEN         0x9c0
6160 #define TG3_FW_RODATA_ADDR      0x080009c0
6161 #define TG3_FW_RODATA_LEN       0x60
6162 #define TG3_FW_DATA_ADDR        0x08000a40
6163 #define TG3_FW_DATA_LEN         0x20
6164 #define TG3_FW_SBSS_ADDR        0x08000a60
6165 #define TG3_FW_SBSS_LEN         0xc
6166 #define TG3_FW_BSS_ADDR         0x08000a70
6167 #define TG3_FW_BSS_LEN          0x10
6168
6169 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6170         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6171         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6172         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6173         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6174         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6175         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6176         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6177         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6178         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6179         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6180         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6181         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6182         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6183         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6184         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6185         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6186         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6187         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6188         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6189         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6190         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6191         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6192         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6193         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6194         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6195         0, 0, 0, 0, 0, 0,
6196         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6197         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6198         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6199         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6201         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6202         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6203         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6204         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6205         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6206         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6207         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6208         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6209         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6210         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6211         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6212         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6213         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6214         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6215         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6216         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6217         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6218         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6219         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6220         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6221         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6222         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6223         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6224         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6225         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6226         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6227         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6228         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6229         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6230         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6231         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6232         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6233         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6234         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6235         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6236         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6237         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6238         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6239         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6240         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6241         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6242         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6243         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6244         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6245         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6246         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6247         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6248         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6249         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6250         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6251         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6252         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6253         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6254         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6255         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6256         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6257         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6258         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6259         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6260         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6261 };
6262
6263 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6264         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6265         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6266         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6267         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6268         0x00000000
6269 };
6270
6271 #if 0 /* All zeros, don't eat up space with it. */
6272 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6273         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6274         0x00000000, 0x00000000, 0x00000000, 0x00000000
6275 };
6276 #endif
6277
6278 #define RX_CPU_SCRATCH_BASE     0x30000
6279 #define RX_CPU_SCRATCH_SIZE     0x04000
6280 #define TX_CPU_SCRATCH_BASE     0x34000
6281 #define TX_CPU_SCRATCH_SIZE     0x04000
6282
6283 /* tp->lock is held. */
6284 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6285 {
6286         int i;
6287
6288         BUG_ON(offset == TX_CPU_BASE &&
6289             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6290
6291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6292                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6293
6294                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6295                 return 0;
6296         }
6297         if (offset == RX_CPU_BASE) {
6298                 for (i = 0; i < 10000; i++) {
6299                         tw32(offset + CPU_STATE, 0xffffffff);
6300                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6301                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6302                                 break;
6303                 }
6304
6305                 tw32(offset + CPU_STATE, 0xffffffff);
6306                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6307                 udelay(10);
6308         } else {
6309                 for (i = 0; i < 10000; i++) {
6310                         tw32(offset + CPU_STATE, 0xffffffff);
6311                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6312                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6313                                 break;
6314                 }
6315         }
6316
6317         if (i >= 10000) {
6318                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6319                        "and %s CPU\n",
6320                        tp->dev->name,
6321                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6322                 return -ENODEV;
6323         }
6324
6325         /* Clear firmware's nvram arbitration. */
6326         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6327                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6328         return 0;
6329 }
6330
6331 struct fw_info {
6332         unsigned int text_base;
6333         unsigned int text_len;
6334         const u32 *text_data;
6335         unsigned int rodata_base;
6336         unsigned int rodata_len;
6337         const u32 *rodata_data;
6338         unsigned int data_base;
6339         unsigned int data_len;
6340         const u32 *data_data;
6341 };
6342
6343 /* tp->lock is held. */
6344 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6345                                  int cpu_scratch_size, struct fw_info *info)
6346 {
6347         int err, lock_err, i;
6348         void (*write_op)(struct tg3 *, u32, u32);
6349
6350         if (cpu_base == TX_CPU_BASE &&
6351             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6352                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6353                        "TX cpu firmware on %s which is 5705.\n",
6354                        tp->dev->name);
6355                 return -EINVAL;
6356         }
6357
6358         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6359                 write_op = tg3_write_mem;
6360         else
6361                 write_op = tg3_write_indirect_reg32;
6362
6363         /* It is possible that bootcode is still loading at this point.
6364          * Get the nvram lock first before halting the cpu.
6365          */
6366         lock_err = tg3_nvram_lock(tp);
6367         err = tg3_halt_cpu(tp, cpu_base);
6368         if (!lock_err)
6369                 tg3_nvram_unlock(tp);
6370         if (err)
6371                 goto out;
6372
6373         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6374                 write_op(tp, cpu_scratch_base + i, 0);
6375         tw32(cpu_base + CPU_STATE, 0xffffffff);
6376         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6377         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6378                 write_op(tp, (cpu_scratch_base +
6379                               (info->text_base & 0xffff) +
6380                               (i * sizeof(u32))),
6381                          (info->text_data ?
6382                           info->text_data[i] : 0));
6383         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6384                 write_op(tp, (cpu_scratch_base +
6385                               (info->rodata_base & 0xffff) +
6386                               (i * sizeof(u32))),
6387                          (info->rodata_data ?
6388                           info->rodata_data[i] : 0));
6389         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6390                 write_op(tp, (cpu_scratch_base +
6391                               (info->data_base & 0xffff) +
6392                               (i * sizeof(u32))),
6393                          (info->data_data ?
6394                           info->data_data[i] : 0));
6395
6396         err = 0;
6397
6398 out:
6399         return err;
6400 }
6401
6402 /* tp->lock is held. */
6403 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6404 {
6405         struct fw_info info;
6406         int err, i;
6407
6408         info.text_base = TG3_FW_TEXT_ADDR;
6409         info.text_len = TG3_FW_TEXT_LEN;
6410         info.text_data = &tg3FwText[0];
6411         info.rodata_base = TG3_FW_RODATA_ADDR;
6412         info.rodata_len = TG3_FW_RODATA_LEN;
6413         info.rodata_data = &tg3FwRodata[0];
6414         info.data_base = TG3_FW_DATA_ADDR;
6415         info.data_len = TG3_FW_DATA_LEN;
6416         info.data_data = NULL;
6417
6418         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6419                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6420                                     &info);
6421         if (err)
6422                 return err;
6423
6424         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6425                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6426                                     &info);
6427         if (err)
6428                 return err;
6429
6430         /* Now startup only the RX cpu. */
6431         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6432         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6433
6434         for (i = 0; i < 5; i++) {
6435                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6436                         break;
6437                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6438                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6439                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6440                 udelay(1000);
6441         }
6442         if (i >= 5) {
6443                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6444                        "to set RX CPU PC, is %08x should be %08x\n",
6445                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6446                        TG3_FW_TEXT_ADDR);
6447                 return -ENODEV;
6448         }
6449         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6450         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6451
6452         return 0;
6453 }
6454
6455
6456 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6457 #define TG3_TSO_FW_RELASE_MINOR         0x6
6458 #define TG3_TSO_FW_RELEASE_FIX          0x0
6459 #define TG3_TSO_FW_START_ADDR           0x08000000
6460 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6461 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6462 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6463 #define TG3_TSO_FW_RODATA_LEN           0x60
6464 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6465 #define TG3_TSO_FW_DATA_LEN             0x30
6466 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6467 #define TG3_TSO_FW_SBSS_LEN             0x2c
6468 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6469 #define TG3_TSO_FW_BSS_LEN              0x894
6470
6471 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6472         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6473         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6474         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6475         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6476         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6477         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6478         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6479         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6480         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6481         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6482         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6483         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6484         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6485         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6486         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6487         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6488         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6489         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6490         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6491         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6492         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6493         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6494         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6495         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6496         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6497         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6498         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6499         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6500         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6501         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6502         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6503         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6504         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6505         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6506         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6507         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6508         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6509         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6510         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6511         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6512         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6513         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6514         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6515         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6516         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6517         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6518         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6519         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6520         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6521         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6522         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6523         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6524         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6525         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6526         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6527         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6528         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6529         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6530         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6531         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6532         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6533         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6534         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6535         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6536         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6537         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6538         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6539         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6540         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6541         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6542         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6543         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6544         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6545         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6546         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6547         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6548         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6549         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6550         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6551         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6552         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6553         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6554         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6555         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6556         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6557         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6558         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6559         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6560         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6561         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6562         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6563         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6564         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6565         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6566         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6567         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6568         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6569         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6570         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6571         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6572         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6573         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6574         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6575         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6576         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6577         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6578         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6579         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6580         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6581         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6582         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6583         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6584         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6585         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6586         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6587         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6588         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6589         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6590         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6591         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6592         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6593         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6594         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6595         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6596         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6597         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6598         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6599         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6600         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6601         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6602         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6603         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6604         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6605         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6606         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6607         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6608         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6609         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6610         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6611         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6612         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6613         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6614         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6615         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6616         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6617         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6618         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6619         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6620         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6621         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6622         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6623         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6624         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6625         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6626         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6627         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6628         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6629         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6630         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6631         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6632         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6633         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6634         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6635         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6636         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6637         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6638         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6639         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6640         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6641         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6642         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6643         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6644         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6645         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6646         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6647         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6648         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6649         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6650         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6651         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6652         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6653         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6654         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6655         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6656         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6657         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6658         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6659         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6660         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6661         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6662         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6663         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6664         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6665         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6666         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6667         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6668         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6669         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6670         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6671         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6672         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6673         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6674         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6675         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6676         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6677         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6678         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6679         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6680         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6681         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6682         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6683         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6684         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6685         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6686         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6687         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6688         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6689         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6690         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6691         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6692         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6693         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6694         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6695         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6696         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6697         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6698         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6699         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6700         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6701         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6702         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6703         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6704         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6705         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6706         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6707         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6708         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6709         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6710         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6711         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6712         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6713         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6714         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6715         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6716         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6717         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6718         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6719         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6720         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6721         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6722         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6723         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6724         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6725         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6726         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6727         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6728         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6729         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6730         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6731         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6732         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6733         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6734         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6735         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6736         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6737         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6738         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6739         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6740         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6741         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6742         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6743         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6744         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6745         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6746         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6747         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6748         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6749         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6750         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6751         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6752         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6753         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6754         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6755         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6756 };
6757
6758 static const u32 tg3TsoFwRodata[] = {
6759         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6760         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6761         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6762         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6763         0x00000000,
6764 };
6765
6766 static const u32 tg3TsoFwData[] = {
6767         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6768         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6769         0x00000000,
6770 };
6771
6772 /* 5705 needs a special version of the TSO firmware.  */
6773 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6774 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6775 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6776 #define TG3_TSO5_FW_START_ADDR          0x00010000
6777 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6778 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6779 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6780 #define TG3_TSO5_FW_RODATA_LEN          0x50
6781 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6782 #define TG3_TSO5_FW_DATA_LEN            0x20
6783 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6784 #define TG3_TSO5_FW_SBSS_LEN            0x28
6785 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6786 #define TG3_TSO5_FW_BSS_LEN             0x88
6787
6788 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6789         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6790         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6791         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6792         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6793         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6794         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6795         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6796         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6797         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6798         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6799         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6800         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6801         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6802         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6803         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6804         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6805         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6806         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6807         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6808         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6809         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6810         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6811         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6812         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6813         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6814         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6815         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6816         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6817         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6818         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6819         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6820         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6821         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6822         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6823         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6824         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6825         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6826         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6827         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6828         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6829         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6830         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6831         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6832         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6833         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6834         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6835         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6836         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6837         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6838         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6839         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6840         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6841         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6842         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6843         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6844         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6845         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6846         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6847         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6848         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6849         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6850         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6851         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6852         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6853         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6854         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6855         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6856         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6857         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6858         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6859         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6860         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6861         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6862         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6863         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6864         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6865         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6866         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6867         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6868         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6869         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6870         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6871         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6872         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6873         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6874         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6875         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6876         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6877         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6878         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6879         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6880         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6881         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6882         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6883         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6884         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6885         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6886         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6887         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6888         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6889         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6890         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6891         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6892         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6893         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6894         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6895         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6896         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6897         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6898         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6899         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6900         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6901         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6902         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6903         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6904         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6905         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6906         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6907         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6908         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6909         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6910         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6911         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6912         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6913         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6914         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6915         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6916         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6917         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6918         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6919         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6920         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6921         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6922         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6923         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6924         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6925         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6926         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6927         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6928         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6929         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6930         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6931         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6932         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6933         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6934         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6935         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6936         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6937         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6938         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6939         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6940         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6941         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6942         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6943         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6944         0x00000000, 0x00000000, 0x00000000,
6945 };
6946
6947 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6948         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6949         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6950         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6951         0x00000000, 0x00000000, 0x00000000,
6952 };
6953
6954 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6955         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6956         0x00000000, 0x00000000, 0x00000000,
6957 };
6958
6959 /* tp->lock is held. */
6960 static int tg3_load_tso_firmware(struct tg3 *tp)
6961 {
6962         struct fw_info info;
6963         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6964         int err, i;
6965
6966         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6967                 return 0;
6968
6969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6970                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6971                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6972                 info.text_data = &tg3Tso5FwText[0];
6973                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6974                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6975                 info.rodata_data = &tg3Tso5FwRodata[0];
6976                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6977                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6978                 info.data_data = &tg3Tso5FwData[0];
6979                 cpu_base = RX_CPU_BASE;
6980                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6981                 cpu_scratch_size = (info.text_len +
6982                                     info.rodata_len +
6983                                     info.data_len +
6984                                     TG3_TSO5_FW_SBSS_LEN +
6985                                     TG3_TSO5_FW_BSS_LEN);
6986         } else {
6987                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6988                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6989                 info.text_data = &tg3TsoFwText[0];
6990                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6991                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6992                 info.rodata_data = &tg3TsoFwRodata[0];
6993                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6994                 info.data_len = TG3_TSO_FW_DATA_LEN;
6995                 info.data_data = &tg3TsoFwData[0];
6996                 cpu_base = TX_CPU_BASE;
6997                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6998                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6999         }
7000
7001         err = tg3_load_firmware_cpu(tp, cpu_base,
7002                                     cpu_scratch_base, cpu_scratch_size,
7003                                     &info);
7004         if (err)
7005                 return err;
7006
7007         /* Now startup the cpu. */
7008         tw32(cpu_base + CPU_STATE, 0xffffffff);
7009         tw32_f(cpu_base + CPU_PC,    info.text_base);
7010
7011         for (i = 0; i < 5; i++) {
7012                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7013                         break;
7014                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7015                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7016                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7017                 udelay(1000);
7018         }
7019         if (i >= 5) {
7020                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7021                        "to set CPU PC, is %08x should be %08x\n",
7022                        tp->dev->name, tr32(cpu_base + CPU_PC),
7023                        info.text_base);
7024                 return -ENODEV;
7025         }
7026         tw32(cpu_base + CPU_STATE, 0xffffffff);
7027         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7028         return 0;
7029 }
7030
7031
7032 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7033 {
7034         struct tg3 *tp = netdev_priv(dev);
7035         struct sockaddr *addr = p;
7036         int err = 0, skip_mac_1 = 0;
7037
7038         if (!is_valid_ether_addr(addr->sa_data))
7039                 return -EINVAL;
7040
7041         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7042
7043         if (!netif_running(dev))
7044                 return 0;
7045
7046         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7047                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7048
7049                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7050                 addr0_low = tr32(MAC_ADDR_0_LOW);
7051                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7052                 addr1_low = tr32(MAC_ADDR_1_LOW);
7053
7054                 /* Skip MAC addr 1 if ASF is using it. */
7055                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7056                     !(addr1_high == 0 && addr1_low == 0))
7057                         skip_mac_1 = 1;
7058         }
7059         spin_lock_bh(&tp->lock);
7060         __tg3_set_mac_addr(tp, skip_mac_1);
7061         spin_unlock_bh(&tp->lock);
7062
7063         return err;
7064 }
7065
7066 /* tp->lock is held. */
7067 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7068                            dma_addr_t mapping, u32 maxlen_flags,
7069                            u32 nic_addr)
7070 {
7071         tg3_write_mem(tp,
7072                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7073                       ((u64) mapping >> 32));
7074         tg3_write_mem(tp,
7075                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7076                       ((u64) mapping & 0xffffffff));
7077         tg3_write_mem(tp,
7078                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7079                        maxlen_flags);
7080
7081         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7082                 tg3_write_mem(tp,
7083                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7084                               nic_addr);
7085 }
7086
7087 static void __tg3_set_rx_mode(struct net_device *);
7088 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7089 {
7090         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7091         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7092         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7093         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7094         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7095                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7096                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7097         }
7098         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7099         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7100         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7101                 u32 val = ec->stats_block_coalesce_usecs;
7102
7103                 if (!netif_carrier_ok(tp->dev))
7104                         val = 0;
7105
7106                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7107         }
7108 }
7109
7110 /* tp->lock is held. */
7111 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7112 {
7113         u32 val, rdmac_mode;
7114         int i, err, limit;
7115
7116         tg3_disable_ints(tp);
7117
7118         tg3_stop_fw(tp);
7119
7120         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7121
7122         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7123                 tg3_abort_hw(tp, 1);
7124         }
7125
7126         if (reset_phy &&
7127             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7128                 tg3_phy_reset(tp);
7129
7130         err = tg3_chip_reset(tp);
7131         if (err)
7132                 return err;
7133
7134         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7135
7136         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7137                 val = tr32(TG3_CPMU_CTRL);
7138                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7139                 tw32(TG3_CPMU_CTRL, val);
7140
7141                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7142                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7143                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7144                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7145
7146                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7147                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7148                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7149                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7150
7151                 val = tr32(TG3_CPMU_HST_ACC);
7152                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7153                 val |= CPMU_HST_ACC_MACCLK_6_25;
7154                 tw32(TG3_CPMU_HST_ACC, val);
7155         }
7156
7157         /* This works around an issue with Athlon chipsets on
7158          * B3 tigon3 silicon.  This bit has no effect on any
7159          * other revision.  But do not set this on PCI Express
7160          * chips and don't even touch the clocks if the CPMU is present.
7161          */
7162         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7163                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7164                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7165                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7166         }
7167
7168         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7169             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7170                 val = tr32(TG3PCI_PCISTATE);
7171                 val |= PCISTATE_RETRY_SAME_DMA;
7172                 tw32(TG3PCI_PCISTATE, val);
7173         }
7174
7175         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7176                 /* Allow reads and writes to the
7177                  * APE register and memory space.
7178                  */
7179                 val = tr32(TG3PCI_PCISTATE);
7180                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7181                        PCISTATE_ALLOW_APE_SHMEM_WR;
7182                 tw32(TG3PCI_PCISTATE, val);
7183         }
7184
7185         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7186                 /* Enable some hw fixes.  */
7187                 val = tr32(TG3PCI_MSI_DATA);
7188                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7189                 tw32(TG3PCI_MSI_DATA, val);
7190         }
7191
7192         /* Descriptor ring init may make accesses to the
7193          * NIC SRAM area to setup the TX descriptors, so we
7194          * can only do this after the hardware has been
7195          * successfully reset.
7196          */
7197         err = tg3_init_rings(tp);
7198         if (err)
7199                 return err;
7200
7201         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7202             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7203                 /* This value is determined during the probe time DMA
7204                  * engine test, tg3_test_dma.
7205                  */
7206                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7207         }
7208
7209         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7210                           GRC_MODE_4X_NIC_SEND_RINGS |
7211                           GRC_MODE_NO_TX_PHDR_CSUM |
7212                           GRC_MODE_NO_RX_PHDR_CSUM);
7213         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7214
7215         /* Pseudo-header checksum is done by hardware logic and not
7216          * the offload processers, so make the chip do the pseudo-
7217          * header checksums on receive.  For transmit it is more
7218          * convenient to do the pseudo-header checksum in software
7219          * as Linux does that on transmit for us in all cases.
7220          */
7221         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7222
7223         tw32(GRC_MODE,
7224              tp->grc_mode |
7225              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7226
7227         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7228         val = tr32(GRC_MISC_CFG);
7229         val &= ~0xff;
7230         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7231         tw32(GRC_MISC_CFG, val);
7232
7233         /* Initialize MBUF/DESC pool. */
7234         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7235                 /* Do nothing.  */
7236         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7237                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7238                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7239                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7240                 else
7241                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7242                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7243                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7244         }
7245         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7246                 int fw_len;
7247
7248                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7249                           TG3_TSO5_FW_RODATA_LEN +
7250                           TG3_TSO5_FW_DATA_LEN +
7251                           TG3_TSO5_FW_SBSS_LEN +
7252                           TG3_TSO5_FW_BSS_LEN);
7253                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7254                 tw32(BUFMGR_MB_POOL_ADDR,
7255                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7256                 tw32(BUFMGR_MB_POOL_SIZE,
7257                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7258         }
7259
7260         if (tp->dev->mtu <= ETH_DATA_LEN) {
7261                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7262                      tp->bufmgr_config.mbuf_read_dma_low_water);
7263                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7264                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7265                 tw32(BUFMGR_MB_HIGH_WATER,
7266                      tp->bufmgr_config.mbuf_high_water);
7267         } else {
7268                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7269                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7270                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7271                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7272                 tw32(BUFMGR_MB_HIGH_WATER,
7273                      tp->bufmgr_config.mbuf_high_water_jumbo);
7274         }
7275         tw32(BUFMGR_DMA_LOW_WATER,
7276              tp->bufmgr_config.dma_low_water);
7277         tw32(BUFMGR_DMA_HIGH_WATER,
7278              tp->bufmgr_config.dma_high_water);
7279
7280         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7281         for (i = 0; i < 2000; i++) {
7282                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7283                         break;
7284                 udelay(10);
7285         }
7286         if (i >= 2000) {
7287                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7288                        tp->dev->name);
7289                 return -ENODEV;
7290         }
7291
7292         /* Setup replenish threshold. */
7293         val = tp->rx_pending / 8;
7294         if (val == 0)
7295                 val = 1;
7296         else if (val > tp->rx_std_max_post)
7297                 val = tp->rx_std_max_post;
7298         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7299                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7300                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7301
7302                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7303                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7304         }
7305
7306         tw32(RCVBDI_STD_THRESH, val);
7307
7308         /* Initialize TG3_BDINFO's at:
7309          *  RCVDBDI_STD_BD:     standard eth size rx ring
7310          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7311          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7312          *
7313          * like so:
7314          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7315          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7316          *                              ring attribute flags
7317          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7318          *
7319          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7320          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7321          *
7322          * The size of each ring is fixed in the firmware, but the location is
7323          * configurable.
7324          */
7325         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7326              ((u64) tp->rx_std_mapping >> 32));
7327         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7328              ((u64) tp->rx_std_mapping & 0xffffffff));
7329         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7330              NIC_SRAM_RX_BUFFER_DESC);
7331
7332         /* Don't even try to program the JUMBO/MINI buffer descriptor
7333          * configs on 5705.
7334          */
7335         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7336                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7337                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7338         } else {
7339                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7340                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7341
7342                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7343                      BDINFO_FLAGS_DISABLED);
7344
7345                 /* Setup replenish threshold. */
7346                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7347
7348                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7349                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7350                              ((u64) tp->rx_jumbo_mapping >> 32));
7351                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7352                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7353                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7354                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7355                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7356                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7357                 } else {
7358                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7359                              BDINFO_FLAGS_DISABLED);
7360                 }
7361
7362         }
7363
7364         /* There is only one send ring on 5705/5750, no need to explicitly
7365          * disable the others.
7366          */
7367         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7368                 /* Clear out send RCB ring in SRAM. */
7369                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7370                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7371                                       BDINFO_FLAGS_DISABLED);
7372         }
7373
7374         tp->tx_prod = 0;
7375         tp->tx_cons = 0;
7376         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7377         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7378
7379         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7380                        tp->tx_desc_mapping,
7381                        (TG3_TX_RING_SIZE <<
7382                         BDINFO_FLAGS_MAXLEN_SHIFT),
7383                        NIC_SRAM_TX_BUFFER_DESC);
7384
7385         /* There is only one receive return ring on 5705/5750, no need
7386          * to explicitly disable the others.
7387          */
7388         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7389                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7390                      i += TG3_BDINFO_SIZE) {
7391                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7392                                       BDINFO_FLAGS_DISABLED);
7393                 }
7394         }
7395
7396         tp->rx_rcb_ptr = 0;
7397         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7398
7399         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7400                        tp->rx_rcb_mapping,
7401                        (TG3_RX_RCB_RING_SIZE(tp) <<
7402                         BDINFO_FLAGS_MAXLEN_SHIFT),
7403                        0);
7404
7405         tp->rx_std_ptr = tp->rx_pending;
7406         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7407                      tp->rx_std_ptr);
7408
7409         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7410                                                 tp->rx_jumbo_pending : 0;
7411         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7412                      tp->rx_jumbo_ptr);
7413
7414         /* Initialize MAC address and backoff seed. */
7415         __tg3_set_mac_addr(tp, 0);
7416
7417         /* MTU + ethernet header + FCS + optional VLAN tag */
7418         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7419
7420         /* The slot time is changed by tg3_setup_phy if we
7421          * run at gigabit with half duplex.
7422          */
7423         tw32(MAC_TX_LENGTHS,
7424              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7425              (6 << TX_LENGTHS_IPG_SHIFT) |
7426              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7427
7428         /* Receive rules. */
7429         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7430         tw32(RCVLPC_CONFIG, 0x0181);
7431
7432         /* Calculate RDMAC_MODE setting early, we need it to determine
7433          * the RCVLPC_STATE_ENABLE mask.
7434          */
7435         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7436                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7437                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7438                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7439                       RDMAC_MODE_LNGREAD_ENAB);
7440
7441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7443                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7444                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7445                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7446
7447         /* If statement applies to 5705 and 5750 PCI devices only */
7448         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7449              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7450             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7451                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7452                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7453                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7454                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7455                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7456                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7457                 }
7458         }
7459
7460         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7461                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7462
7463         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7464                 rdmac_mode |= (1 << 27);
7465
7466         /* Receive/send statistics. */
7467         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7468                 val = tr32(RCVLPC_STATS_ENABLE);
7469                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7470                 tw32(RCVLPC_STATS_ENABLE, val);
7471         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7472                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7473                 val = tr32(RCVLPC_STATS_ENABLE);
7474                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7475                 tw32(RCVLPC_STATS_ENABLE, val);
7476         } else {
7477                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7478         }
7479         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7480         tw32(SNDDATAI_STATSENAB, 0xffffff);
7481         tw32(SNDDATAI_STATSCTRL,
7482              (SNDDATAI_SCTRL_ENABLE |
7483               SNDDATAI_SCTRL_FASTUPD));
7484
7485         /* Setup host coalescing engine. */
7486         tw32(HOSTCC_MODE, 0);
7487         for (i = 0; i < 2000; i++) {
7488                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7489                         break;
7490                 udelay(10);
7491         }
7492
7493         __tg3_set_coalesce(tp, &tp->coal);
7494
7495         /* set status block DMA address */
7496         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7497              ((u64) tp->status_mapping >> 32));
7498         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7499              ((u64) tp->status_mapping & 0xffffffff));
7500
7501         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7502                 /* Status/statistics block address.  See tg3_timer,
7503                  * the tg3_periodic_fetch_stats call there, and
7504                  * tg3_get_stats to see how this works for 5705/5750 chips.
7505                  */
7506                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7507                      ((u64) tp->stats_mapping >> 32));
7508                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7509                      ((u64) tp->stats_mapping & 0xffffffff));
7510                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7511                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7512         }
7513
7514         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7515
7516         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7517         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7518         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7519                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7520
7521         /* Clear statistics/status block in chip, and status block in ram. */
7522         for (i = NIC_SRAM_STATS_BLK;
7523              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7524              i += sizeof(u32)) {
7525                 tg3_write_mem(tp, i, 0);
7526                 udelay(40);
7527         }
7528         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7529
7530         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7531                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7532                 /* reset to prevent losing 1st rx packet intermittently */
7533                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7534                 udelay(10);
7535         }
7536
7537         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7538                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7539         else
7540                 tp->mac_mode = 0;
7541         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7542                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7543         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7544             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7545             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7546                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7547         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7548         udelay(40);
7549
7550         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7551          * If TG3_FLG2_IS_NIC is zero, we should read the
7552          * register to preserve the GPIO settings for LOMs. The GPIOs,
7553          * whether used as inputs or outputs, are set by boot code after
7554          * reset.
7555          */
7556         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7557                 u32 gpio_mask;
7558
7559                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7560                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7561                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7562
7563                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7564                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7565                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7566
7567                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7568                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7569
7570                 tp->grc_local_ctrl &= ~gpio_mask;
7571                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7572
7573                 /* GPIO1 must be driven high for eeprom write protect */
7574                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7575                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7576                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7577         }
7578         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7579         udelay(100);
7580
7581         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7582         tp->last_tag = 0;
7583
7584         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7585                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7586                 udelay(40);
7587         }
7588
7589         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7590                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7591                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7592                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7593                WDMAC_MODE_LNGREAD_ENAB);
7594
7595         /* If statement applies to 5705 and 5750 PCI devices only */
7596         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7597              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7599                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7600                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7601                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7602                         /* nothing */
7603                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7604                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7605                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7606                         val |= WDMAC_MODE_RX_ACCEL;
7607                 }
7608         }
7609
7610         /* Enable host coalescing bug fix */
7611         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7612             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7613             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7614             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7615             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7616                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7617
7618         tw32_f(WDMAC_MODE, val);
7619         udelay(40);
7620
7621         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7622                 u16 pcix_cmd;
7623
7624                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7625                                      &pcix_cmd);
7626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7627                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7628                         pcix_cmd |= PCI_X_CMD_READ_2K;
7629                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7630                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7631                         pcix_cmd |= PCI_X_CMD_READ_2K;
7632                 }
7633                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7634                                       pcix_cmd);
7635         }
7636
7637         tw32_f(RDMAC_MODE, rdmac_mode);
7638         udelay(40);
7639
7640         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7641         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7642                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7643
7644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7645                 tw32(SNDDATAC_MODE,
7646                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7647         else
7648                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7649
7650         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7651         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7652         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7653         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7654         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7655                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7656         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7657         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7658
7659         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7660                 err = tg3_load_5701_a0_firmware_fix(tp);
7661                 if (err)
7662                         return err;
7663         }
7664
7665         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7666                 err = tg3_load_tso_firmware(tp);
7667                 if (err)
7668                         return err;
7669         }
7670
7671         tp->tx_mode = TX_MODE_ENABLE;
7672         tw32_f(MAC_TX_MODE, tp->tx_mode);
7673         udelay(100);
7674
7675         tp->rx_mode = RX_MODE_ENABLE;
7676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7678             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7680                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7681
7682         tw32_f(MAC_RX_MODE, tp->rx_mode);
7683         udelay(10);
7684
7685         tw32(MAC_LED_CTRL, tp->led_ctrl);
7686
7687         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7688         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7689                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7690                 udelay(10);
7691         }
7692         tw32_f(MAC_RX_MODE, tp->rx_mode);
7693         udelay(10);
7694
7695         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7696                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7697                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7698                         /* Set drive transmission level to 1.2V  */
7699                         /* only if the signal pre-emphasis bit is not set  */
7700                         val = tr32(MAC_SERDES_CFG);
7701                         val &= 0xfffff000;
7702                         val |= 0x880;
7703                         tw32(MAC_SERDES_CFG, val);
7704                 }
7705                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7706                         tw32(MAC_SERDES_CFG, 0x616000);
7707         }
7708
7709         /* Prevent chip from dropping frames when flow control
7710          * is enabled.
7711          */
7712         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7713
7714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7715             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7716                 /* Use hardware link auto-negotiation */
7717                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7718         }
7719
7720         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7721             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7722                 u32 tmp;
7723
7724                 tmp = tr32(SERDES_RX_CTRL);
7725                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7726                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7727                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7728                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7729         }
7730
7731         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7732                 if (tp->link_config.phy_is_low_power) {
7733                         tp->link_config.phy_is_low_power = 0;
7734                         tp->link_config.speed = tp->link_config.orig_speed;
7735                         tp->link_config.duplex = tp->link_config.orig_duplex;
7736                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7737                 }
7738
7739                 err = tg3_setup_phy(tp, 0);
7740                 if (err)
7741                         return err;
7742
7743                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7744                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7745                         u32 tmp;
7746
7747                         /* Clear CRC stats. */
7748                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7749                                 tg3_writephy(tp, MII_TG3_TEST1,
7750                                              tmp | MII_TG3_TEST1_CRC_EN);
7751                                 tg3_readphy(tp, 0x14, &tmp);
7752                         }
7753                 }
7754         }
7755
7756         __tg3_set_rx_mode(tp->dev);
7757
7758         /* Initialize receive rules. */
7759         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7760         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7761         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7762         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7763
7764         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7765             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7766                 limit = 8;
7767         else
7768                 limit = 16;
7769         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7770                 limit -= 4;
7771         switch (limit) {
7772         case 16:
7773                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7774         case 15:
7775                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7776         case 14:
7777                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7778         case 13:
7779                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7780         case 12:
7781                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7782         case 11:
7783                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7784         case 10:
7785                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7786         case 9:
7787                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7788         case 8:
7789                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7790         case 7:
7791                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7792         case 6:
7793                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7794         case 5:
7795                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7796         case 4:
7797                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7798         case 3:
7799                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7800         case 2:
7801         case 1:
7802
7803         default:
7804                 break;
7805         }
7806
7807         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7808                 /* Write our heartbeat update interval to APE. */
7809                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7810                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7811
7812         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7813
7814         return 0;
7815 }
7816
7817 /* Called at device open time to get the chip ready for
7818  * packet processing.  Invoked with tp->lock held.
7819  */
7820 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7821 {
7822         tg3_switch_clocks(tp);
7823
7824         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7825
7826         return tg3_reset_hw(tp, reset_phy);
7827 }
7828
7829 #define TG3_STAT_ADD32(PSTAT, REG) \
7830 do {    u32 __val = tr32(REG); \
7831         (PSTAT)->low += __val; \
7832         if ((PSTAT)->low < __val) \
7833                 (PSTAT)->high += 1; \
7834 } while (0)
7835
7836 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7837 {
7838         struct tg3_hw_stats *sp = tp->hw_stats;
7839
7840         if (!netif_carrier_ok(tp->dev))
7841                 return;
7842
7843         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7844         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7845         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7846         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7847         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7848         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7849         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7850         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7851         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7852         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7853         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7854         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7855         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7856
7857         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7858         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7859         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7860         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7861         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7862         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7863         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7864         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7865         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7866         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7867         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7868         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7869         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7870         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7871
7872         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7873         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7874         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7875 }
7876
7877 static void tg3_timer(unsigned long __opaque)
7878 {
7879         struct tg3 *tp = (struct tg3 *) __opaque;
7880
7881         if (tp->irq_sync)
7882                 goto restart_timer;
7883
7884         spin_lock(&tp->lock);
7885
7886         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7887                 /* All of this garbage is because when using non-tagged
7888                  * IRQ status the mailbox/status_block protocol the chip
7889                  * uses with the cpu is race prone.
7890                  */
7891                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7892                         tw32(GRC_LOCAL_CTRL,
7893                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7894                 } else {
7895                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7896                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7897                 }
7898
7899                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7900                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7901                         spin_unlock(&tp->lock);
7902                         schedule_work(&tp->reset_task);
7903                         return;
7904                 }
7905         }
7906
7907         /* This part only runs once per second. */
7908         if (!--tp->timer_counter) {
7909                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7910                         tg3_periodic_fetch_stats(tp);
7911
7912                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7913                         u32 mac_stat;
7914                         int phy_event;
7915
7916                         mac_stat = tr32(MAC_STATUS);
7917
7918                         phy_event = 0;
7919                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7920                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7921                                         phy_event = 1;
7922                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7923                                 phy_event = 1;
7924
7925                         if (phy_event)
7926                                 tg3_setup_phy(tp, 0);
7927                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7928                         u32 mac_stat = tr32(MAC_STATUS);
7929                         int need_setup = 0;
7930
7931                         if (netif_carrier_ok(tp->dev) &&
7932                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7933                                 need_setup = 1;
7934                         }
7935                         if (! netif_carrier_ok(tp->dev) &&
7936                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7937                                          MAC_STATUS_SIGNAL_DET))) {
7938                                 need_setup = 1;
7939                         }
7940                         if (need_setup) {
7941                                 if (!tp->serdes_counter) {
7942                                         tw32_f(MAC_MODE,
7943                                              (tp->mac_mode &
7944                                               ~MAC_MODE_PORT_MODE_MASK));
7945                                         udelay(40);
7946                                         tw32_f(MAC_MODE, tp->mac_mode);
7947                                         udelay(40);
7948                                 }
7949                                 tg3_setup_phy(tp, 0);
7950                         }
7951                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7952                         tg3_serdes_parallel_detect(tp);
7953
7954                 tp->timer_counter = tp->timer_multiplier;
7955         }
7956
7957         /* Heartbeat is only sent once every 2 seconds.
7958          *
7959          * The heartbeat is to tell the ASF firmware that the host
7960          * driver is still alive.  In the event that the OS crashes,
7961          * ASF needs to reset the hardware to free up the FIFO space
7962          * that may be filled with rx packets destined for the host.
7963          * If the FIFO is full, ASF will no longer function properly.
7964          *
7965          * Unintended resets have been reported on real time kernels
7966          * where the timer doesn't run on time.  Netpoll will also have
7967          * same problem.
7968          *
7969          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7970          * to check the ring condition when the heartbeat is expiring
7971          * before doing the reset.  This will prevent most unintended
7972          * resets.
7973          */
7974         if (!--tp->asf_counter) {
7975                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7976                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7977                         tg3_wait_for_event_ack(tp);
7978
7979                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7980                                       FWCMD_NICDRV_ALIVE3);
7981                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7982                         /* 5 seconds timeout */
7983                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7984
7985                         tg3_generate_fw_event(tp);
7986                 }
7987                 tp->asf_counter = tp->asf_multiplier;
7988         }
7989
7990         spin_unlock(&tp->lock);
7991
7992 restart_timer:
7993         tp->timer.expires = jiffies + tp->timer_offset;
7994         add_timer(&tp->timer);
7995 }
7996
7997 static int tg3_request_irq(struct tg3 *tp)
7998 {
7999         irq_handler_t fn;
8000         unsigned long flags;
8001         struct net_device *dev = tp->dev;
8002
8003         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8004                 fn = tg3_msi;
8005                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8006                         fn = tg3_msi_1shot;
8007                 flags = IRQF_SAMPLE_RANDOM;
8008         } else {
8009                 fn = tg3_interrupt;
8010                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8011                         fn = tg3_interrupt_tagged;
8012                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8013         }
8014         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8015 }
8016
8017 static int tg3_test_interrupt(struct tg3 *tp)
8018 {
8019         struct net_device *dev = tp->dev;
8020         int err, i, intr_ok = 0;
8021
8022         if (!netif_running(dev))
8023                 return -ENODEV;
8024
8025         tg3_disable_ints(tp);
8026
8027         free_irq(tp->pdev->irq, dev);
8028
8029         err = request_irq(tp->pdev->irq, tg3_test_isr,
8030                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8031         if (err)
8032                 return err;
8033
8034         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8035         tg3_enable_ints(tp);
8036
8037         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8038                HOSTCC_MODE_NOW);
8039
8040         for (i = 0; i < 5; i++) {
8041                 u32 int_mbox, misc_host_ctrl;
8042
8043                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8044                                         TG3_64BIT_REG_LOW);
8045                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8046
8047                 if ((int_mbox != 0) ||
8048                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8049                         intr_ok = 1;
8050                         break;
8051                 }
8052
8053                 msleep(10);
8054         }
8055
8056         tg3_disable_ints(tp);
8057
8058         free_irq(tp->pdev->irq, dev);
8059
8060         err = tg3_request_irq(tp);
8061
8062         if (err)
8063                 return err;
8064
8065         if (intr_ok)
8066                 return 0;
8067
8068         return -EIO;
8069 }
8070
8071 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8072  * successfully restored
8073  */
8074 static int tg3_test_msi(struct tg3 *tp)
8075 {
8076         struct net_device *dev = tp->dev;
8077         int err;
8078         u16 pci_cmd;
8079
8080         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8081                 return 0;
8082
8083         /* Turn off SERR reporting in case MSI terminates with Master
8084          * Abort.
8085          */
8086         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8087         pci_write_config_word(tp->pdev, PCI_COMMAND,
8088                               pci_cmd & ~PCI_COMMAND_SERR);
8089
8090         err = tg3_test_interrupt(tp);
8091
8092         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8093
8094         if (!err)
8095                 return 0;
8096
8097         /* other failures */
8098         if (err != -EIO)
8099                 return err;
8100
8101         /* MSI test failed, go back to INTx mode */
8102         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8103                "switching to INTx mode. Please report this failure to "
8104                "the PCI maintainer and include system chipset information.\n",
8105                        tp->dev->name);
8106
8107         free_irq(tp->pdev->irq, dev);
8108         pci_disable_msi(tp->pdev);
8109
8110         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8111
8112         err = tg3_request_irq(tp);
8113         if (err)
8114                 return err;
8115
8116         /* Need to reset the chip because the MSI cycle may have terminated
8117          * with Master Abort.
8118          */
8119         tg3_full_lock(tp, 1);
8120
8121         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8122         err = tg3_init_hw(tp, 1);
8123
8124         tg3_full_unlock(tp);
8125
8126         if (err)
8127                 free_irq(tp->pdev->irq, dev);
8128
8129         return err;
8130 }
8131
8132 static int tg3_open(struct net_device *dev)
8133 {
8134         struct tg3 *tp = netdev_priv(dev);
8135         int err;
8136
8137         netif_carrier_off(tp->dev);
8138
8139         err = tg3_set_power_state(tp, PCI_D0);
8140         if (err)
8141                 return err;
8142
8143         tg3_full_lock(tp, 0);
8144
8145         tg3_disable_ints(tp);
8146         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8147
8148         tg3_full_unlock(tp);
8149
8150         /* The placement of this call is tied
8151          * to the setup and use of Host TX descriptors.
8152          */
8153         err = tg3_alloc_consistent(tp);
8154         if (err)
8155                 return err;
8156
8157         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8158                 /* All MSI supporting chips should support tagged
8159                  * status.  Assert that this is the case.
8160                  */
8161                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8162                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8163                                "Not using MSI.\n", tp->dev->name);
8164                 } else if (pci_enable_msi(tp->pdev) == 0) {
8165                         u32 msi_mode;
8166
8167                         msi_mode = tr32(MSGINT_MODE);
8168                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8169                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8170                 }
8171         }
8172         err = tg3_request_irq(tp);
8173
8174         if (err) {
8175                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8176                         pci_disable_msi(tp->pdev);
8177                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8178                 }
8179                 tg3_free_consistent(tp);
8180                 return err;
8181         }
8182
8183         napi_enable(&tp->napi);
8184
8185         tg3_full_lock(tp, 0);
8186
8187         err = tg3_init_hw(tp, 1);
8188         if (err) {
8189                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8190                 tg3_free_rings(tp);
8191         } else {
8192                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8193                         tp->timer_offset = HZ;
8194                 else
8195                         tp->timer_offset = HZ / 10;
8196
8197                 BUG_ON(tp->timer_offset > HZ);
8198                 tp->timer_counter = tp->timer_multiplier =
8199                         (HZ / tp->timer_offset);
8200                 tp->asf_counter = tp->asf_multiplier =
8201                         ((HZ / tp->timer_offset) * 2);
8202
8203                 init_timer(&tp->timer);
8204                 tp->timer.expires = jiffies + tp->timer_offset;
8205                 tp->timer.data = (unsigned long) tp;
8206                 tp->timer.function = tg3_timer;
8207         }
8208
8209         tg3_full_unlock(tp);
8210
8211         if (err) {
8212                 napi_disable(&tp->napi);
8213                 free_irq(tp->pdev->irq, dev);
8214                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8215                         pci_disable_msi(tp->pdev);
8216                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8217                 }
8218                 tg3_free_consistent(tp);
8219                 return err;
8220         }
8221
8222         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8223                 err = tg3_test_msi(tp);
8224
8225                 if (err) {
8226                         tg3_full_lock(tp, 0);
8227
8228                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8229                                 pci_disable_msi(tp->pdev);
8230                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8231                         }
8232                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8233                         tg3_free_rings(tp);
8234                         tg3_free_consistent(tp);
8235
8236                         tg3_full_unlock(tp);
8237
8238                         napi_disable(&tp->napi);
8239
8240                         return err;
8241                 }
8242
8243                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8244                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8245                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8246
8247                                 tw32(PCIE_TRANSACTION_CFG,
8248                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8249                         }
8250                 }
8251         }
8252
8253         tg3_phy_start(tp);
8254
8255         tg3_full_lock(tp, 0);
8256
8257         add_timer(&tp->timer);
8258         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8259         tg3_enable_ints(tp);
8260
8261         tg3_full_unlock(tp);
8262
8263         netif_start_queue(dev);
8264
8265         return 0;
8266 }
8267
8268 #if 0
8269 /*static*/ void tg3_dump_state(struct tg3 *tp)
8270 {
8271         u32 val32, val32_2, val32_3, val32_4, val32_5;
8272         u16 val16;
8273         int i;
8274
8275         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8276         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8277         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8278                val16, val32);
8279
8280         /* MAC block */
8281         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8282                tr32(MAC_MODE), tr32(MAC_STATUS));
8283         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8284                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8285         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8286                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8287         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8288                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8289
8290         /* Send data initiator control block */
8291         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8292                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8293         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8294                tr32(SNDDATAI_STATSCTRL));
8295
8296         /* Send data completion control block */
8297         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8298
8299         /* Send BD ring selector block */
8300         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8301                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8302
8303         /* Send BD initiator control block */
8304         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8305                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8306
8307         /* Send BD completion control block */
8308         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8309
8310         /* Receive list placement control block */
8311         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8312                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8313         printk("       RCVLPC_STATSCTRL[%08x]\n",
8314                tr32(RCVLPC_STATSCTRL));
8315
8316         /* Receive data and receive BD initiator control block */
8317         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8318                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8319
8320         /* Receive data completion control block */
8321         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8322                tr32(RCVDCC_MODE));
8323
8324         /* Receive BD initiator control block */
8325         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8326                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8327
8328         /* Receive BD completion control block */
8329         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8330                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8331
8332         /* Receive list selector control block */
8333         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8334                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8335
8336         /* Mbuf cluster free block */
8337         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8338                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8339
8340         /* Host coalescing control block */
8341         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8342                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8343         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8344                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8345                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8346         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8347                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8348                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8349         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8350                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8351         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8352                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8353
8354         /* Memory arbiter control block */
8355         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8356                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8357
8358         /* Buffer manager control block */
8359         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8360                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8361         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8362                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8363         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8364                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8365                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8366                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8367
8368         /* Read DMA control block */
8369         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8370                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8371
8372         /* Write DMA control block */
8373         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8374                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8375
8376         /* DMA completion block */
8377         printk("DEBUG: DMAC_MODE[%08x]\n",
8378                tr32(DMAC_MODE));
8379
8380         /* GRC block */
8381         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8382                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8383         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8384                tr32(GRC_LOCAL_CTRL));
8385
8386         /* TG3_BDINFOs */
8387         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8388                tr32(RCVDBDI_JUMBO_BD + 0x0),
8389                tr32(RCVDBDI_JUMBO_BD + 0x4),
8390                tr32(RCVDBDI_JUMBO_BD + 0x8),
8391                tr32(RCVDBDI_JUMBO_BD + 0xc));
8392         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8393                tr32(RCVDBDI_STD_BD + 0x0),
8394                tr32(RCVDBDI_STD_BD + 0x4),
8395                tr32(RCVDBDI_STD_BD + 0x8),
8396                tr32(RCVDBDI_STD_BD + 0xc));
8397         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8398                tr32(RCVDBDI_MINI_BD + 0x0),
8399                tr32(RCVDBDI_MINI_BD + 0x4),
8400                tr32(RCVDBDI_MINI_BD + 0x8),
8401                tr32(RCVDBDI_MINI_BD + 0xc));
8402
8403         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8404         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8405         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8406         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8407         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8408                val32, val32_2, val32_3, val32_4);
8409
8410         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8411         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8412         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8413         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8414         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8415                val32, val32_2, val32_3, val32_4);
8416
8417         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8418         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8419         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8420         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8421         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8422         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8423                val32, val32_2, val32_3, val32_4, val32_5);
8424
8425         /* SW status block */
8426         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8427                tp->hw_status->status,
8428                tp->hw_status->status_tag,
8429                tp->hw_status->rx_jumbo_consumer,
8430                tp->hw_status->rx_consumer,
8431                tp->hw_status->rx_mini_consumer,
8432                tp->hw_status->idx[0].rx_producer,
8433                tp->hw_status->idx[0].tx_consumer);
8434
8435         /* SW statistics block */
8436         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8437                ((u32 *)tp->hw_stats)[0],
8438                ((u32 *)tp->hw_stats)[1],
8439                ((u32 *)tp->hw_stats)[2],
8440                ((u32 *)tp->hw_stats)[3]);
8441
8442         /* Mailboxes */
8443         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8444                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8445                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8446                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8447                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8448
8449         /* NIC side send descriptors. */
8450         for (i = 0; i < 6; i++) {
8451                 unsigned long txd;
8452
8453                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8454                         + (i * sizeof(struct tg3_tx_buffer_desc));
8455                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8456                        i,
8457                        readl(txd + 0x0), readl(txd + 0x4),
8458                        readl(txd + 0x8), readl(txd + 0xc));
8459         }
8460
8461         /* NIC side RX descriptors. */
8462         for (i = 0; i < 6; i++) {
8463                 unsigned long rxd;
8464
8465                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8466                         + (i * sizeof(struct tg3_rx_buffer_desc));
8467                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8468                        i,
8469                        readl(rxd + 0x0), readl(rxd + 0x4),
8470                        readl(rxd + 0x8), readl(rxd + 0xc));
8471                 rxd += (4 * sizeof(u32));
8472                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8473                        i,
8474                        readl(rxd + 0x0), readl(rxd + 0x4),
8475                        readl(rxd + 0x8), readl(rxd + 0xc));
8476         }
8477
8478         for (i = 0; i < 6; i++) {
8479                 unsigned long rxd;
8480
8481                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8482                         + (i * sizeof(struct tg3_rx_buffer_desc));
8483                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8484                        i,
8485                        readl(rxd + 0x0), readl(rxd + 0x4),
8486                        readl(rxd + 0x8), readl(rxd + 0xc));
8487                 rxd += (4 * sizeof(u32));
8488                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8489                        i,
8490                        readl(rxd + 0x0), readl(rxd + 0x4),
8491                        readl(rxd + 0x8), readl(rxd + 0xc));
8492         }
8493 }
8494 #endif
8495
8496 static struct net_device_stats *tg3_get_stats(struct net_device *);
8497 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8498
8499 static int tg3_close(struct net_device *dev)
8500 {
8501         struct tg3 *tp = netdev_priv(dev);
8502
8503         napi_disable(&tp->napi);
8504         cancel_work_sync(&tp->reset_task);
8505
8506         netif_stop_queue(dev);
8507
8508         del_timer_sync(&tp->timer);
8509
8510         tg3_full_lock(tp, 1);
8511 #if 0
8512         tg3_dump_state(tp);
8513 #endif
8514
8515         tg3_disable_ints(tp);
8516
8517         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8518         tg3_free_rings(tp);
8519         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8520
8521         tg3_full_unlock(tp);
8522
8523         free_irq(tp->pdev->irq, dev);
8524         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8525                 pci_disable_msi(tp->pdev);
8526                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8527         }
8528
8529         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8530                sizeof(tp->net_stats_prev));
8531         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8532                sizeof(tp->estats_prev));
8533
8534         tg3_free_consistent(tp);
8535
8536         tg3_set_power_state(tp, PCI_D3hot);
8537
8538         netif_carrier_off(tp->dev);
8539
8540         return 0;
8541 }
8542
8543 static inline unsigned long get_stat64(tg3_stat64_t *val)
8544 {
8545         unsigned long ret;
8546
8547 #if (BITS_PER_LONG == 32)
8548         ret = val->low;
8549 #else
8550         ret = ((u64)val->high << 32) | ((u64)val->low);
8551 #endif
8552         return ret;
8553 }
8554
8555 static inline u64 get_estat64(tg3_stat64_t *val)
8556 {
8557        return ((u64)val->high << 32) | ((u64)val->low);
8558 }
8559
8560 static unsigned long calc_crc_errors(struct tg3 *tp)
8561 {
8562         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8563
8564         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8565             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8566              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8567                 u32 val;
8568
8569                 spin_lock_bh(&tp->lock);
8570                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8571                         tg3_writephy(tp, MII_TG3_TEST1,
8572                                      val | MII_TG3_TEST1_CRC_EN);
8573                         tg3_readphy(tp, 0x14, &val);
8574                 } else
8575                         val = 0;
8576                 spin_unlock_bh(&tp->lock);
8577
8578                 tp->phy_crc_errors += val;
8579
8580                 return tp->phy_crc_errors;
8581         }
8582
8583         return get_stat64(&hw_stats->rx_fcs_errors);
8584 }
8585
8586 #define ESTAT_ADD(member) \
8587         estats->member =        old_estats->member + \
8588                                 get_estat64(&hw_stats->member)
8589
8590 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8591 {
8592         struct tg3_ethtool_stats *estats = &tp->estats;
8593         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8594         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8595
8596         if (!hw_stats)
8597                 return old_estats;
8598
8599         ESTAT_ADD(rx_octets);
8600         ESTAT_ADD(rx_fragments);
8601         ESTAT_ADD(rx_ucast_packets);
8602         ESTAT_ADD(rx_mcast_packets);
8603         ESTAT_ADD(rx_bcast_packets);
8604         ESTAT_ADD(rx_fcs_errors);
8605         ESTAT_ADD(rx_align_errors);
8606         ESTAT_ADD(rx_xon_pause_rcvd);
8607         ESTAT_ADD(rx_xoff_pause_rcvd);
8608         ESTAT_ADD(rx_mac_ctrl_rcvd);
8609         ESTAT_ADD(rx_xoff_entered);
8610         ESTAT_ADD(rx_frame_too_long_errors);
8611         ESTAT_ADD(rx_jabbers);
8612         ESTAT_ADD(rx_undersize_packets);
8613         ESTAT_ADD(rx_in_length_errors);
8614         ESTAT_ADD(rx_out_length_errors);
8615         ESTAT_ADD(rx_64_or_less_octet_packets);
8616         ESTAT_ADD(rx_65_to_127_octet_packets);
8617         ESTAT_ADD(rx_128_to_255_octet_packets);
8618         ESTAT_ADD(rx_256_to_511_octet_packets);
8619         ESTAT_ADD(rx_512_to_1023_octet_packets);
8620         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8621         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8622         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8623         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8624         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8625
8626         ESTAT_ADD(tx_octets);
8627         ESTAT_ADD(tx_collisions);
8628         ESTAT_ADD(tx_xon_sent);
8629         ESTAT_ADD(tx_xoff_sent);
8630         ESTAT_ADD(tx_flow_control);
8631         ESTAT_ADD(tx_mac_errors);
8632         ESTAT_ADD(tx_single_collisions);
8633         ESTAT_ADD(tx_mult_collisions);
8634         ESTAT_ADD(tx_deferred);
8635         ESTAT_ADD(tx_excessive_collisions);
8636         ESTAT_ADD(tx_late_collisions);
8637         ESTAT_ADD(tx_collide_2times);
8638         ESTAT_ADD(tx_collide_3times);
8639         ESTAT_ADD(tx_collide_4times);
8640         ESTAT_ADD(tx_collide_5times);
8641         ESTAT_ADD(tx_collide_6times);
8642         ESTAT_ADD(tx_collide_7times);
8643         ESTAT_ADD(tx_collide_8times);
8644         ESTAT_ADD(tx_collide_9times);
8645         ESTAT_ADD(tx_collide_10times);
8646         ESTAT_ADD(tx_collide_11times);
8647         ESTAT_ADD(tx_collide_12times);
8648         ESTAT_ADD(tx_collide_13times);
8649         ESTAT_ADD(tx_collide_14times);
8650         ESTAT_ADD(tx_collide_15times);
8651         ESTAT_ADD(tx_ucast_packets);
8652         ESTAT_ADD(tx_mcast_packets);
8653         ESTAT_ADD(tx_bcast_packets);
8654         ESTAT_ADD(tx_carrier_sense_errors);
8655         ESTAT_ADD(tx_discards);
8656         ESTAT_ADD(tx_errors);
8657
8658         ESTAT_ADD(dma_writeq_full);
8659         ESTAT_ADD(dma_write_prioq_full);
8660         ESTAT_ADD(rxbds_empty);
8661         ESTAT_ADD(rx_discards);
8662         ESTAT_ADD(rx_errors);
8663         ESTAT_ADD(rx_threshold_hit);
8664
8665         ESTAT_ADD(dma_readq_full);
8666         ESTAT_ADD(dma_read_prioq_full);
8667         ESTAT_ADD(tx_comp_queue_full);
8668
8669         ESTAT_ADD(ring_set_send_prod_index);
8670         ESTAT_ADD(ring_status_update);
8671         ESTAT_ADD(nic_irqs);
8672         ESTAT_ADD(nic_avoided_irqs);
8673         ESTAT_ADD(nic_tx_threshold_hit);
8674
8675         return estats;
8676 }
8677
8678 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8679 {
8680         struct tg3 *tp = netdev_priv(dev);
8681         struct net_device_stats *stats = &tp->net_stats;
8682         struct net_device_stats *old_stats = &tp->net_stats_prev;
8683         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8684
8685         if (!hw_stats)
8686                 return old_stats;
8687
8688         stats->rx_packets = old_stats->rx_packets +
8689                 get_stat64(&hw_stats->rx_ucast_packets) +
8690                 get_stat64(&hw_stats->rx_mcast_packets) +
8691                 get_stat64(&hw_stats->rx_bcast_packets);
8692
8693         stats->tx_packets = old_stats->tx_packets +
8694                 get_stat64(&hw_stats->tx_ucast_packets) +
8695                 get_stat64(&hw_stats->tx_mcast_packets) +
8696                 get_stat64(&hw_stats->tx_bcast_packets);
8697
8698         stats->rx_bytes = old_stats->rx_bytes +
8699                 get_stat64(&hw_stats->rx_octets);
8700         stats->tx_bytes = old_stats->tx_bytes +
8701                 get_stat64(&hw_stats->tx_octets);
8702
8703         stats->rx_errors = old_stats->rx_errors +
8704                 get_stat64(&hw_stats->rx_errors);
8705         stats->tx_errors = old_stats->tx_errors +
8706                 get_stat64(&hw_stats->tx_errors) +
8707                 get_stat64(&hw_stats->tx_mac_errors) +
8708                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8709                 get_stat64(&hw_stats->tx_discards);
8710
8711         stats->multicast = old_stats->multicast +
8712                 get_stat64(&hw_stats->rx_mcast_packets);
8713         stats->collisions = old_stats->collisions +
8714                 get_stat64(&hw_stats->tx_collisions);
8715
8716         stats->rx_length_errors = old_stats->rx_length_errors +
8717                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8718                 get_stat64(&hw_stats->rx_undersize_packets);
8719
8720         stats->rx_over_errors = old_stats->rx_over_errors +
8721                 get_stat64(&hw_stats->rxbds_empty);
8722         stats->rx_frame_errors = old_stats->rx_frame_errors +
8723                 get_stat64(&hw_stats->rx_align_errors);
8724         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8725                 get_stat64(&hw_stats->tx_discards);
8726         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8727                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8728
8729         stats->rx_crc_errors = old_stats->rx_crc_errors +
8730                 calc_crc_errors(tp);
8731
8732         stats->rx_missed_errors = old_stats->rx_missed_errors +
8733                 get_stat64(&hw_stats->rx_discards);
8734
8735         return stats;
8736 }
8737
8738 static inline u32 calc_crc(unsigned char *buf, int len)
8739 {
8740         u32 reg;
8741         u32 tmp;
8742         int j, k;
8743
8744         reg = 0xffffffff;
8745
8746         for (j = 0; j < len; j++) {
8747                 reg ^= buf[j];
8748
8749                 for (k = 0; k < 8; k++) {
8750                         tmp = reg & 0x01;
8751
8752                         reg >>= 1;
8753
8754                         if (tmp) {
8755                                 reg ^= 0xedb88320;
8756                         }
8757                 }
8758         }
8759
8760         return ~reg;
8761 }
8762
8763 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8764 {
8765         /* accept or reject all multicast frames */
8766         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8767         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8768         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8769         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8770 }
8771
8772 static void __tg3_set_rx_mode(struct net_device *dev)
8773 {
8774         struct tg3 *tp = netdev_priv(dev);
8775         u32 rx_mode;
8776
8777         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8778                                   RX_MODE_KEEP_VLAN_TAG);
8779
8780         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8781          * flag clear.
8782          */
8783 #if TG3_VLAN_TAG_USED
8784         if (!tp->vlgrp &&
8785             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8786                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8787 #else
8788         /* By definition, VLAN is disabled always in this
8789          * case.
8790          */
8791         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8792                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8793 #endif
8794
8795         if (dev->flags & IFF_PROMISC) {
8796                 /* Promiscuous mode. */
8797                 rx_mode |= RX_MODE_PROMISC;
8798         } else if (dev->flags & IFF_ALLMULTI) {
8799                 /* Accept all multicast. */
8800                 tg3_set_multi (tp, 1);
8801         } else if (dev->mc_count < 1) {
8802                 /* Reject all multicast. */
8803                 tg3_set_multi (tp, 0);
8804         } else {
8805                 /* Accept one or more multicast(s). */
8806                 struct dev_mc_list *mclist;
8807                 unsigned int i;
8808                 u32 mc_filter[4] = { 0, };
8809                 u32 regidx;
8810                 u32 bit;
8811                 u32 crc;
8812
8813                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8814                      i++, mclist = mclist->next) {
8815
8816                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8817                         bit = ~crc & 0x7f;
8818                         regidx = (bit & 0x60) >> 5;
8819                         bit &= 0x1f;
8820                         mc_filter[regidx] |= (1 << bit);
8821                 }
8822
8823                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8824                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8825                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8826                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8827         }
8828
8829         if (rx_mode != tp->rx_mode) {
8830                 tp->rx_mode = rx_mode;
8831                 tw32_f(MAC_RX_MODE, rx_mode);
8832                 udelay(10);
8833         }
8834 }
8835
8836 static void tg3_set_rx_mode(struct net_device *dev)
8837 {
8838         struct tg3 *tp = netdev_priv(dev);
8839
8840         if (!netif_running(dev))
8841                 return;
8842
8843         tg3_full_lock(tp, 0);
8844         __tg3_set_rx_mode(dev);
8845         tg3_full_unlock(tp);
8846 }
8847
8848 #define TG3_REGDUMP_LEN         (32 * 1024)
8849
8850 static int tg3_get_regs_len(struct net_device *dev)
8851 {
8852         return TG3_REGDUMP_LEN;
8853 }
8854
8855 static void tg3_get_regs(struct net_device *dev,
8856                 struct ethtool_regs *regs, void *_p)
8857 {
8858         u32 *p = _p;
8859         struct tg3 *tp = netdev_priv(dev);
8860         u8 *orig_p = _p;
8861         int i;
8862
8863         regs->version = 0;
8864
8865         memset(p, 0, TG3_REGDUMP_LEN);
8866
8867         if (tp->link_config.phy_is_low_power)
8868                 return;
8869
8870         tg3_full_lock(tp, 0);
8871
8872 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8873 #define GET_REG32_LOOP(base,len)                \
8874 do {    p = (u32 *)(orig_p + (base));           \
8875         for (i = 0; i < len; i += 4)            \
8876                 __GET_REG32((base) + i);        \
8877 } while (0)
8878 #define GET_REG32_1(reg)                        \
8879 do {    p = (u32 *)(orig_p + (reg));            \
8880         __GET_REG32((reg));                     \
8881 } while (0)
8882
8883         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8884         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8885         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8886         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8887         GET_REG32_1(SNDDATAC_MODE);
8888         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8889         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8890         GET_REG32_1(SNDBDC_MODE);
8891         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8892         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8893         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8894         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8895         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8896         GET_REG32_1(RCVDCC_MODE);
8897         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8898         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8899         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8900         GET_REG32_1(MBFREE_MODE);
8901         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8902         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8903         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8904         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8905         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8906         GET_REG32_1(RX_CPU_MODE);
8907         GET_REG32_1(RX_CPU_STATE);
8908         GET_REG32_1(RX_CPU_PGMCTR);
8909         GET_REG32_1(RX_CPU_HWBKPT);
8910         GET_REG32_1(TX_CPU_MODE);
8911         GET_REG32_1(TX_CPU_STATE);
8912         GET_REG32_1(TX_CPU_PGMCTR);
8913         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8914         GET_REG32_LOOP(FTQ_RESET, 0x120);
8915         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8916         GET_REG32_1(DMAC_MODE);
8917         GET_REG32_LOOP(GRC_MODE, 0x4c);
8918         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8919                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8920
8921 #undef __GET_REG32
8922 #undef GET_REG32_LOOP
8923 #undef GET_REG32_1
8924
8925         tg3_full_unlock(tp);
8926 }
8927
8928 static int tg3_get_eeprom_len(struct net_device *dev)
8929 {
8930         struct tg3 *tp = netdev_priv(dev);
8931
8932         return tp->nvram_size;
8933 }
8934
8935 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8936 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8937 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8938
8939 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8940 {
8941         struct tg3 *tp = netdev_priv(dev);
8942         int ret;
8943         u8  *pd;
8944         u32 i, offset, len, b_offset, b_count;
8945         __le32 val;
8946
8947         if (tp->link_config.phy_is_low_power)
8948                 return -EAGAIN;
8949
8950         offset = eeprom->offset;
8951         len = eeprom->len;
8952         eeprom->len = 0;
8953
8954         eeprom->magic = TG3_EEPROM_MAGIC;
8955
8956         if (offset & 3) {
8957                 /* adjustments to start on required 4 byte boundary */
8958                 b_offset = offset & 3;
8959                 b_count = 4 - b_offset;
8960                 if (b_count > len) {
8961                         /* i.e. offset=1 len=2 */
8962                         b_count = len;
8963                 }
8964                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8965                 if (ret)
8966                         return ret;
8967                 memcpy(data, ((char*)&val) + b_offset, b_count);
8968                 len -= b_count;
8969                 offset += b_count;
8970                 eeprom->len += b_count;
8971         }
8972
8973         /* read bytes upto the last 4 byte boundary */
8974         pd = &data[eeprom->len];
8975         for (i = 0; i < (len - (len & 3)); i += 4) {
8976                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8977                 if (ret) {
8978                         eeprom->len += i;
8979                         return ret;
8980                 }
8981                 memcpy(pd + i, &val, 4);
8982         }
8983         eeprom->len += i;
8984
8985         if (len & 3) {
8986                 /* read last bytes not ending on 4 byte boundary */
8987                 pd = &data[eeprom->len];
8988                 b_count = len & 3;
8989                 b_offset = offset + len - b_count;
8990                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8991                 if (ret)
8992                         return ret;
8993                 memcpy(pd, &val, b_count);
8994                 eeprom->len += b_count;
8995         }
8996         return 0;
8997 }
8998
8999 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9000
9001 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9002 {
9003         struct tg3 *tp = netdev_priv(dev);
9004         int ret;
9005         u32 offset, len, b_offset, odd_len;
9006         u8 *buf;
9007         __le32 start, end;
9008
9009         if (tp->link_config.phy_is_low_power)
9010                 return -EAGAIN;
9011
9012         if (eeprom->magic != TG3_EEPROM_MAGIC)
9013                 return -EINVAL;
9014
9015         offset = eeprom->offset;
9016         len = eeprom->len;
9017
9018         if ((b_offset = (offset & 3))) {
9019                 /* adjustments to start on required 4 byte boundary */
9020                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9021                 if (ret)
9022                         return ret;
9023                 len += b_offset;
9024                 offset &= ~3;
9025                 if (len < 4)
9026                         len = 4;
9027         }
9028
9029         odd_len = 0;
9030         if (len & 3) {
9031                 /* adjustments to end on required 4 byte boundary */
9032                 odd_len = 1;
9033                 len = (len + 3) & ~3;
9034                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9035                 if (ret)
9036                         return ret;
9037         }
9038
9039         buf = data;
9040         if (b_offset || odd_len) {
9041                 buf = kmalloc(len, GFP_KERNEL);
9042                 if (!buf)
9043                         return -ENOMEM;
9044                 if (b_offset)
9045                         memcpy(buf, &start, 4);
9046                 if (odd_len)
9047                         memcpy(buf+len-4, &end, 4);
9048                 memcpy(buf + b_offset, data, eeprom->len);
9049         }
9050
9051         ret = tg3_nvram_write_block(tp, offset, len, buf);
9052
9053         if (buf != data)
9054                 kfree(buf);
9055
9056         return ret;
9057 }
9058
9059 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9060 {
9061         struct tg3 *tp = netdev_priv(dev);
9062
9063         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9064                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9065                         return -EAGAIN;
9066                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9067         }
9068
9069         cmd->supported = (SUPPORTED_Autoneg);
9070
9071         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9072                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9073                                    SUPPORTED_1000baseT_Full);
9074
9075         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9076                 cmd->supported |= (SUPPORTED_100baseT_Half |
9077                                   SUPPORTED_100baseT_Full |
9078                                   SUPPORTED_10baseT_Half |
9079                                   SUPPORTED_10baseT_Full |
9080                                   SUPPORTED_TP);
9081                 cmd->port = PORT_TP;
9082         } else {
9083                 cmd->supported |= SUPPORTED_FIBRE;
9084                 cmd->port = PORT_FIBRE;
9085         }
9086
9087         cmd->advertising = tp->link_config.advertising;
9088         if (netif_running(dev)) {
9089                 cmd->speed = tp->link_config.active_speed;
9090                 cmd->duplex = tp->link_config.active_duplex;
9091         }
9092         cmd->phy_address = PHY_ADDR;
9093         cmd->transceiver = 0;
9094         cmd->autoneg = tp->link_config.autoneg;
9095         cmd->maxtxpkt = 0;
9096         cmd->maxrxpkt = 0;
9097         return 0;
9098 }
9099
9100 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9101 {
9102         struct tg3 *tp = netdev_priv(dev);
9103
9104         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9105                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9106                         return -EAGAIN;
9107                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9108         }
9109
9110         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9111                 /* These are the only valid advertisement bits allowed.  */
9112                 if (cmd->autoneg == AUTONEG_ENABLE &&
9113                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9114                                           ADVERTISED_1000baseT_Full |
9115                                           ADVERTISED_Autoneg |
9116                                           ADVERTISED_FIBRE)))
9117                         return -EINVAL;
9118                 /* Fiber can only do SPEED_1000.  */
9119                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9120                          (cmd->speed != SPEED_1000))
9121                         return -EINVAL;
9122         /* Copper cannot force SPEED_1000.  */
9123         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9124                    (cmd->speed == SPEED_1000))
9125                 return -EINVAL;
9126         else if ((cmd->speed == SPEED_1000) &&
9127                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9128                 return -EINVAL;
9129
9130         tg3_full_lock(tp, 0);
9131
9132         tp->link_config.autoneg = cmd->autoneg;
9133         if (cmd->autoneg == AUTONEG_ENABLE) {
9134                 tp->link_config.advertising = (cmd->advertising |
9135                                               ADVERTISED_Autoneg);
9136                 tp->link_config.speed = SPEED_INVALID;
9137                 tp->link_config.duplex = DUPLEX_INVALID;
9138         } else {
9139                 tp->link_config.advertising = 0;
9140                 tp->link_config.speed = cmd->speed;
9141                 tp->link_config.duplex = cmd->duplex;
9142         }
9143
9144         tp->link_config.orig_speed = tp->link_config.speed;
9145         tp->link_config.orig_duplex = tp->link_config.duplex;
9146         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9147
9148         if (netif_running(dev))
9149                 tg3_setup_phy(tp, 1);
9150
9151         tg3_full_unlock(tp);
9152
9153         return 0;
9154 }
9155
9156 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9157 {
9158         struct tg3 *tp = netdev_priv(dev);
9159
9160         strcpy(info->driver, DRV_MODULE_NAME);
9161         strcpy(info->version, DRV_MODULE_VERSION);
9162         strcpy(info->fw_version, tp->fw_ver);
9163         strcpy(info->bus_info, pci_name(tp->pdev));
9164 }
9165
9166 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9167 {
9168         struct tg3 *tp = netdev_priv(dev);
9169
9170         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9171             device_can_wakeup(&tp->pdev->dev))
9172                 wol->supported = WAKE_MAGIC;
9173         else
9174                 wol->supported = 0;
9175         wol->wolopts = 0;
9176         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9177             device_can_wakeup(&tp->pdev->dev))
9178                 wol->wolopts = WAKE_MAGIC;
9179         memset(&wol->sopass, 0, sizeof(wol->sopass));
9180 }
9181
9182 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9183 {
9184         struct tg3 *tp = netdev_priv(dev);
9185         struct device *dp = &tp->pdev->dev;
9186
9187         if (wol->wolopts & ~WAKE_MAGIC)
9188                 return -EINVAL;
9189         if ((wol->wolopts & WAKE_MAGIC) &&
9190             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9191                 return -EINVAL;
9192
9193         spin_lock_bh(&tp->lock);
9194         if (wol->wolopts & WAKE_MAGIC) {
9195                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9196                 device_set_wakeup_enable(dp, true);
9197         } else {
9198                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9199                 device_set_wakeup_enable(dp, false);
9200         }
9201         spin_unlock_bh(&tp->lock);
9202
9203         return 0;
9204 }
9205
9206 static u32 tg3_get_msglevel(struct net_device *dev)
9207 {
9208         struct tg3 *tp = netdev_priv(dev);
9209         return tp->msg_enable;
9210 }
9211
9212 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9213 {
9214         struct tg3 *tp = netdev_priv(dev);
9215         tp->msg_enable = value;
9216 }
9217
9218 static int tg3_set_tso(struct net_device *dev, u32 value)
9219 {
9220         struct tg3 *tp = netdev_priv(dev);
9221
9222         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9223                 if (value)
9224                         return -EINVAL;
9225                 return 0;
9226         }
9227         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9228             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9229                 if (value) {
9230                         dev->features |= NETIF_F_TSO6;
9231                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9232                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9233                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9234                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9235                                 dev->features |= NETIF_F_TSO_ECN;
9236                 } else
9237                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9238         }
9239         return ethtool_op_set_tso(dev, value);
9240 }
9241
9242 static int tg3_nway_reset(struct net_device *dev)
9243 {
9244         struct tg3 *tp = netdev_priv(dev);
9245         int r;
9246
9247         if (!netif_running(dev))
9248                 return -EAGAIN;
9249
9250         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9251                 return -EINVAL;
9252
9253         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9254                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9255                         return -EAGAIN;
9256                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9257         } else {
9258                 u32 bmcr;
9259
9260                 spin_lock_bh(&tp->lock);
9261                 r = -EINVAL;
9262                 tg3_readphy(tp, MII_BMCR, &bmcr);
9263                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9264                     ((bmcr & BMCR_ANENABLE) ||
9265                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9266                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9267                                                    BMCR_ANENABLE);
9268                         r = 0;
9269                 }
9270                 spin_unlock_bh(&tp->lock);
9271         }
9272
9273         return r;
9274 }
9275
9276 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9277 {
9278         struct tg3 *tp = netdev_priv(dev);
9279
9280         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9281         ering->rx_mini_max_pending = 0;
9282         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9283                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9284         else
9285                 ering->rx_jumbo_max_pending = 0;
9286
9287         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9288
9289         ering->rx_pending = tp->rx_pending;
9290         ering->rx_mini_pending = 0;
9291         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9292                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9293         else
9294                 ering->rx_jumbo_pending = 0;
9295
9296         ering->tx_pending = tp->tx_pending;
9297 }
9298
9299 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9300 {
9301         struct tg3 *tp = netdev_priv(dev);
9302         int irq_sync = 0, err = 0;
9303
9304         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9305             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9306             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9307             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9308             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9309              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9310                 return -EINVAL;
9311
9312         if (netif_running(dev)) {
9313                 tg3_phy_stop(tp);
9314                 tg3_netif_stop(tp);
9315                 irq_sync = 1;
9316         }
9317
9318         tg3_full_lock(tp, irq_sync);
9319
9320         tp->rx_pending = ering->rx_pending;
9321
9322         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9323             tp->rx_pending > 63)
9324                 tp->rx_pending = 63;
9325         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9326         tp->tx_pending = ering->tx_pending;
9327
9328         if (netif_running(dev)) {
9329                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9330                 err = tg3_restart_hw(tp, 1);
9331                 if (!err)
9332                         tg3_netif_start(tp);
9333         }
9334
9335         tg3_full_unlock(tp);
9336
9337         if (irq_sync && !err)
9338                 tg3_phy_start(tp);
9339
9340         return err;
9341 }
9342
9343 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9344 {
9345         struct tg3 *tp = netdev_priv(dev);
9346
9347         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9348
9349         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9350                 epause->rx_pause = 1;
9351         else
9352                 epause->rx_pause = 0;
9353
9354         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9355                 epause->tx_pause = 1;
9356         else
9357                 epause->tx_pause = 0;
9358 }
9359
9360 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9361 {
9362         struct tg3 *tp = netdev_priv(dev);
9363         int err = 0;
9364
9365         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9366                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9367                         return -EAGAIN;
9368
9369                 if (epause->autoneg) {
9370                         u32 newadv;
9371                         struct phy_device *phydev;
9372
9373                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9374
9375                         if (epause->rx_pause) {
9376                                 if (epause->tx_pause)
9377                                         newadv = ADVERTISED_Pause;
9378                                 else
9379                                         newadv = ADVERTISED_Pause |
9380                                                  ADVERTISED_Asym_Pause;
9381                         } else if (epause->tx_pause) {
9382                                 newadv = ADVERTISED_Asym_Pause;
9383                         } else
9384                                 newadv = 0;
9385
9386                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9387                                 u32 oldadv = phydev->advertising &
9388                                              (ADVERTISED_Pause |
9389                                               ADVERTISED_Asym_Pause);
9390                                 if (oldadv != newadv) {
9391                                         phydev->advertising &=
9392                                                 ~(ADVERTISED_Pause |
9393                                                   ADVERTISED_Asym_Pause);
9394                                         phydev->advertising |= newadv;
9395                                         err = phy_start_aneg(phydev);
9396                                 }
9397                         } else {
9398                                 tp->link_config.advertising &=
9399                                                 ~(ADVERTISED_Pause |
9400                                                   ADVERTISED_Asym_Pause);
9401                                 tp->link_config.advertising |= newadv;
9402                         }
9403                 } else {
9404                         if (epause->rx_pause)
9405                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9406                         else
9407                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9408
9409                         if (epause->tx_pause)
9410                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9411                         else
9412                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9413
9414                         if (netif_running(dev))
9415                                 tg3_setup_flow_control(tp, 0, 0);
9416                 }
9417         } else {
9418                 int irq_sync = 0;
9419
9420                 if (netif_running(dev)) {
9421                         tg3_netif_stop(tp);
9422                         irq_sync = 1;
9423                 }
9424
9425                 tg3_full_lock(tp, irq_sync);
9426
9427                 if (epause->autoneg)
9428                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9429                 else
9430                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9431                 if (epause->rx_pause)
9432                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9433                 else
9434                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9435                 if (epause->tx_pause)
9436                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9437                 else
9438                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9439
9440                 if (netif_running(dev)) {
9441                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9442                         err = tg3_restart_hw(tp, 1);
9443                         if (!err)
9444                                 tg3_netif_start(tp);
9445                 }
9446
9447                 tg3_full_unlock(tp);
9448         }
9449
9450         return err;
9451 }
9452
9453 static u32 tg3_get_rx_csum(struct net_device *dev)
9454 {
9455         struct tg3 *tp = netdev_priv(dev);
9456         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9457 }
9458
9459 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9460 {
9461         struct tg3 *tp = netdev_priv(dev);
9462
9463         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9464                 if (data != 0)
9465                         return -EINVAL;
9466                 return 0;
9467         }
9468
9469         spin_lock_bh(&tp->lock);
9470         if (data)
9471                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9472         else
9473                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9474         spin_unlock_bh(&tp->lock);
9475
9476         return 0;
9477 }
9478
9479 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9480 {
9481         struct tg3 *tp = netdev_priv(dev);
9482
9483         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9484                 if (data != 0)
9485                         return -EINVAL;
9486                 return 0;
9487         }
9488
9489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9492             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9493             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9494                 ethtool_op_set_tx_ipv6_csum(dev, data);
9495         else
9496                 ethtool_op_set_tx_csum(dev, data);
9497
9498         return 0;
9499 }
9500
9501 static int tg3_get_sset_count (struct net_device *dev, int sset)
9502 {
9503         switch (sset) {
9504         case ETH_SS_TEST:
9505                 return TG3_NUM_TEST;
9506         case ETH_SS_STATS:
9507                 return TG3_NUM_STATS;
9508         default:
9509                 return -EOPNOTSUPP;
9510         }
9511 }
9512
9513 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9514 {
9515         switch (stringset) {
9516         case ETH_SS_STATS:
9517                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9518                 break;
9519         case ETH_SS_TEST:
9520                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9521                 break;
9522         default:
9523                 WARN_ON(1);     /* we need a WARN() */
9524                 break;
9525         }
9526 }
9527
9528 static int tg3_phys_id(struct net_device *dev, u32 data)
9529 {
9530         struct tg3 *tp = netdev_priv(dev);
9531         int i;
9532
9533         if (!netif_running(tp->dev))
9534                 return -EAGAIN;
9535
9536         if (data == 0)
9537                 data = UINT_MAX / 2;
9538
9539         for (i = 0; i < (data * 2); i++) {
9540                 if ((i % 2) == 0)
9541                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9542                                            LED_CTRL_1000MBPS_ON |
9543                                            LED_CTRL_100MBPS_ON |
9544                                            LED_CTRL_10MBPS_ON |
9545                                            LED_CTRL_TRAFFIC_OVERRIDE |
9546                                            LED_CTRL_TRAFFIC_BLINK |
9547                                            LED_CTRL_TRAFFIC_LED);
9548
9549                 else
9550                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9551                                            LED_CTRL_TRAFFIC_OVERRIDE);
9552
9553                 if (msleep_interruptible(500))
9554                         break;
9555         }
9556         tw32(MAC_LED_CTRL, tp->led_ctrl);
9557         return 0;
9558 }
9559
9560 static void tg3_get_ethtool_stats (struct net_device *dev,
9561                                    struct ethtool_stats *estats, u64 *tmp_stats)
9562 {
9563         struct tg3 *tp = netdev_priv(dev);
9564         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9565 }
9566
9567 #define NVRAM_TEST_SIZE 0x100
9568 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9569 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9570 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9571 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9572 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9573
9574 static int tg3_test_nvram(struct tg3 *tp)
9575 {
9576         u32 csum, magic;
9577         __le32 *buf;
9578         int i, j, k, err = 0, size;
9579
9580         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9581                 return -EIO;
9582
9583         if (magic == TG3_EEPROM_MAGIC)
9584                 size = NVRAM_TEST_SIZE;
9585         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9586                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9587                     TG3_EEPROM_SB_FORMAT_1) {
9588                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9589                         case TG3_EEPROM_SB_REVISION_0:
9590                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9591                                 break;
9592                         case TG3_EEPROM_SB_REVISION_2:
9593                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9594                                 break;
9595                         case TG3_EEPROM_SB_REVISION_3:
9596                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9597                                 break;
9598                         default:
9599                                 return 0;
9600                         }
9601                 } else
9602                         return 0;
9603         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9604                 size = NVRAM_SELFBOOT_HW_SIZE;
9605         else
9606                 return -EIO;
9607
9608         buf = kmalloc(size, GFP_KERNEL);
9609         if (buf == NULL)
9610                 return -ENOMEM;
9611
9612         err = -EIO;
9613         for (i = 0, j = 0; i < size; i += 4, j++) {
9614                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9615                         break;
9616         }
9617         if (i < size)
9618                 goto out;
9619
9620         /* Selfboot format */
9621         magic = swab32(le32_to_cpu(buf[0]));
9622         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9623             TG3_EEPROM_MAGIC_FW) {
9624                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9625
9626                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9627                     TG3_EEPROM_SB_REVISION_2) {
9628                         /* For rev 2, the csum doesn't include the MBA. */
9629                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9630                                 csum8 += buf8[i];
9631                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9632                                 csum8 += buf8[i];
9633                 } else {
9634                         for (i = 0; i < size; i++)
9635                                 csum8 += buf8[i];
9636                 }
9637
9638                 if (csum8 == 0) {
9639                         err = 0;
9640                         goto out;
9641                 }
9642
9643                 err = -EIO;
9644                 goto out;
9645         }
9646
9647         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9648             TG3_EEPROM_MAGIC_HW) {
9649                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9650                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9651                 u8 *buf8 = (u8 *) buf;
9652
9653                 /* Separate the parity bits and the data bytes.  */
9654                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9655                         if ((i == 0) || (i == 8)) {
9656                                 int l;
9657                                 u8 msk;
9658
9659                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9660                                         parity[k++] = buf8[i] & msk;
9661                                 i++;
9662                         }
9663                         else if (i == 16) {
9664                                 int l;
9665                                 u8 msk;
9666
9667                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9668                                         parity[k++] = buf8[i] & msk;
9669                                 i++;
9670
9671                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9672                                         parity[k++] = buf8[i] & msk;
9673                                 i++;
9674                         }
9675                         data[j++] = buf8[i];
9676                 }
9677
9678                 err = -EIO;
9679                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9680                         u8 hw8 = hweight8(data[i]);
9681
9682                         if ((hw8 & 0x1) && parity[i])
9683                                 goto out;
9684                         else if (!(hw8 & 0x1) && !parity[i])
9685                                 goto out;
9686                 }
9687                 err = 0;
9688                 goto out;
9689         }
9690
9691         /* Bootstrap checksum at offset 0x10 */
9692         csum = calc_crc((unsigned char *) buf, 0x10);
9693         if(csum != le32_to_cpu(buf[0x10/4]))
9694                 goto out;
9695
9696         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9697         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9698         if (csum != le32_to_cpu(buf[0xfc/4]))
9699                  goto out;
9700
9701         err = 0;
9702
9703 out:
9704         kfree(buf);
9705         return err;
9706 }
9707
9708 #define TG3_SERDES_TIMEOUT_SEC  2
9709 #define TG3_COPPER_TIMEOUT_SEC  6
9710
9711 static int tg3_test_link(struct tg3 *tp)
9712 {
9713         int i, max;
9714
9715         if (!netif_running(tp->dev))
9716                 return -ENODEV;
9717
9718         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9719                 max = TG3_SERDES_TIMEOUT_SEC;
9720         else
9721                 max = TG3_COPPER_TIMEOUT_SEC;
9722
9723         for (i = 0; i < max; i++) {
9724                 if (netif_carrier_ok(tp->dev))
9725                         return 0;
9726
9727                 if (msleep_interruptible(1000))
9728                         break;
9729         }
9730
9731         return -EIO;
9732 }
9733
9734 /* Only test the commonly used registers */
9735 static int tg3_test_registers(struct tg3 *tp)
9736 {
9737         int i, is_5705, is_5750;
9738         u32 offset, read_mask, write_mask, val, save_val, read_val;
9739         static struct {
9740                 u16 offset;
9741                 u16 flags;
9742 #define TG3_FL_5705     0x1
9743 #define TG3_FL_NOT_5705 0x2
9744 #define TG3_FL_NOT_5788 0x4
9745 #define TG3_FL_NOT_5750 0x8
9746                 u32 read_mask;
9747                 u32 write_mask;
9748         } reg_tbl[] = {
9749                 /* MAC Control Registers */
9750                 { MAC_MODE, TG3_FL_NOT_5705,
9751                         0x00000000, 0x00ef6f8c },
9752                 { MAC_MODE, TG3_FL_5705,
9753                         0x00000000, 0x01ef6b8c },
9754                 { MAC_STATUS, TG3_FL_NOT_5705,
9755                         0x03800107, 0x00000000 },
9756                 { MAC_STATUS, TG3_FL_5705,
9757                         0x03800100, 0x00000000 },
9758                 { MAC_ADDR_0_HIGH, 0x0000,
9759                         0x00000000, 0x0000ffff },
9760                 { MAC_ADDR_0_LOW, 0x0000,
9761                         0x00000000, 0xffffffff },
9762                 { MAC_RX_MTU_SIZE, 0x0000,
9763                         0x00000000, 0x0000ffff },
9764                 { MAC_TX_MODE, 0x0000,
9765                         0x00000000, 0x00000070 },
9766                 { MAC_TX_LENGTHS, 0x0000,
9767                         0x00000000, 0x00003fff },
9768                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9769                         0x00000000, 0x000007fc },
9770                 { MAC_RX_MODE, TG3_FL_5705,
9771                         0x00000000, 0x000007dc },
9772                 { MAC_HASH_REG_0, 0x0000,
9773                         0x00000000, 0xffffffff },
9774                 { MAC_HASH_REG_1, 0x0000,
9775                         0x00000000, 0xffffffff },
9776                 { MAC_HASH_REG_2, 0x0000,
9777                         0x00000000, 0xffffffff },
9778                 { MAC_HASH_REG_3, 0x0000,
9779                         0x00000000, 0xffffffff },
9780
9781                 /* Receive Data and Receive BD Initiator Control Registers. */
9782                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9783                         0x00000000, 0xffffffff },
9784                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9785                         0x00000000, 0xffffffff },
9786                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9787                         0x00000000, 0x00000003 },
9788                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9789                         0x00000000, 0xffffffff },
9790                 { RCVDBDI_STD_BD+0, 0x0000,
9791                         0x00000000, 0xffffffff },
9792                 { RCVDBDI_STD_BD+4, 0x0000,
9793                         0x00000000, 0xffffffff },
9794                 { RCVDBDI_STD_BD+8, 0x0000,
9795                         0x00000000, 0xffff0002 },
9796                 { RCVDBDI_STD_BD+0xc, 0x0000,
9797                         0x00000000, 0xffffffff },
9798
9799                 /* Receive BD Initiator Control Registers. */
9800                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9801                         0x00000000, 0xffffffff },
9802                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9803                         0x00000000, 0x000003ff },
9804                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9805                         0x00000000, 0xffffffff },
9806
9807                 /* Host Coalescing Control Registers. */
9808                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9809                         0x00000000, 0x00000004 },
9810                 { HOSTCC_MODE, TG3_FL_5705,
9811                         0x00000000, 0x000000f6 },
9812                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9813                         0x00000000, 0xffffffff },
9814                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9815                         0x00000000, 0x000003ff },
9816                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9817                         0x00000000, 0xffffffff },
9818                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9819                         0x00000000, 0x000003ff },
9820                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9821                         0x00000000, 0xffffffff },
9822                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9823                         0x00000000, 0x000000ff },
9824                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9825                         0x00000000, 0xffffffff },
9826                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9827                         0x00000000, 0x000000ff },
9828                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9829                         0x00000000, 0xffffffff },
9830                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9831                         0x00000000, 0xffffffff },
9832                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9833                         0x00000000, 0xffffffff },
9834                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9835                         0x00000000, 0x000000ff },
9836                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9837                         0x00000000, 0xffffffff },
9838                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9839                         0x00000000, 0x000000ff },
9840                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9841                         0x00000000, 0xffffffff },
9842                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9843                         0x00000000, 0xffffffff },
9844                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9845                         0x00000000, 0xffffffff },
9846                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9847                         0x00000000, 0xffffffff },
9848                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9849                         0x00000000, 0xffffffff },
9850                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9851                         0xffffffff, 0x00000000 },
9852                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9853                         0xffffffff, 0x00000000 },
9854
9855                 /* Buffer Manager Control Registers. */
9856                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9857                         0x00000000, 0x007fff80 },
9858                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9859                         0x00000000, 0x007fffff },
9860                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9861                         0x00000000, 0x0000003f },
9862                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9863                         0x00000000, 0x000001ff },
9864                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9865                         0x00000000, 0x000001ff },
9866                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9867                         0xffffffff, 0x00000000 },
9868                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9869                         0xffffffff, 0x00000000 },
9870
9871                 /* Mailbox Registers */
9872                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9873                         0x00000000, 0x000001ff },
9874                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9875                         0x00000000, 0x000001ff },
9876                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9877                         0x00000000, 0x000007ff },
9878                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9879                         0x00000000, 0x000001ff },
9880
9881                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9882         };
9883
9884         is_5705 = is_5750 = 0;
9885         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9886                 is_5705 = 1;
9887                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9888                         is_5750 = 1;
9889         }
9890
9891         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9892                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9893                         continue;
9894
9895                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9896                         continue;
9897
9898                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9899                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9900                         continue;
9901
9902                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9903                         continue;
9904
9905                 offset = (u32) reg_tbl[i].offset;
9906                 read_mask = reg_tbl[i].read_mask;
9907                 write_mask = reg_tbl[i].write_mask;
9908
9909                 /* Save the original register content */
9910                 save_val = tr32(offset);
9911
9912                 /* Determine the read-only value. */
9913                 read_val = save_val & read_mask;
9914
9915                 /* Write zero to the register, then make sure the read-only bits
9916                  * are not changed and the read/write bits are all zeros.
9917                  */
9918                 tw32(offset, 0);
9919
9920                 val = tr32(offset);
9921
9922                 /* Test the read-only and read/write bits. */
9923                 if (((val & read_mask) != read_val) || (val & write_mask))
9924                         goto out;
9925
9926                 /* Write ones to all the bits defined by RdMask and WrMask, then
9927                  * make sure the read-only bits are not changed and the
9928                  * read/write bits are all ones.
9929                  */
9930                 tw32(offset, read_mask | write_mask);
9931
9932                 val = tr32(offset);
9933
9934                 /* Test the read-only bits. */
9935                 if ((val & read_mask) != read_val)
9936                         goto out;
9937
9938                 /* Test the read/write bits. */
9939                 if ((val & write_mask) != write_mask)
9940                         goto out;
9941
9942                 tw32(offset, save_val);
9943         }
9944
9945         return 0;
9946
9947 out:
9948         if (netif_msg_hw(tp))
9949                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9950                        offset);
9951         tw32(offset, save_val);
9952         return -EIO;
9953 }
9954
9955 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9956 {
9957         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9958         int i;
9959         u32 j;
9960
9961         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9962                 for (j = 0; j < len; j += 4) {
9963                         u32 val;
9964
9965                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9966                         tg3_read_mem(tp, offset + j, &val);
9967                         if (val != test_pattern[i])
9968                                 return -EIO;
9969                 }
9970         }
9971         return 0;
9972 }
9973
9974 static int tg3_test_memory(struct tg3 *tp)
9975 {
9976         static struct mem_entry {
9977                 u32 offset;
9978                 u32 len;
9979         } mem_tbl_570x[] = {
9980                 { 0x00000000, 0x00b50},
9981                 { 0x00002000, 0x1c000},
9982                 { 0xffffffff, 0x00000}
9983         }, mem_tbl_5705[] = {
9984                 { 0x00000100, 0x0000c},
9985                 { 0x00000200, 0x00008},
9986                 { 0x00004000, 0x00800},
9987                 { 0x00006000, 0x01000},
9988                 { 0x00008000, 0x02000},
9989                 { 0x00010000, 0x0e000},
9990                 { 0xffffffff, 0x00000}
9991         }, mem_tbl_5755[] = {
9992                 { 0x00000200, 0x00008},
9993                 { 0x00004000, 0x00800},
9994                 { 0x00006000, 0x00800},
9995                 { 0x00008000, 0x02000},
9996                 { 0x00010000, 0x0c000},
9997                 { 0xffffffff, 0x00000}
9998         }, mem_tbl_5906[] = {
9999                 { 0x00000200, 0x00008},
10000                 { 0x00004000, 0x00400},
10001                 { 0x00006000, 0x00400},
10002                 { 0x00008000, 0x01000},
10003                 { 0x00010000, 0x01000},
10004                 { 0xffffffff, 0x00000}
10005         };
10006         struct mem_entry *mem_tbl;
10007         int err = 0;
10008         int i;
10009
10010         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10011                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10012                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10013                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10014                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10015                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10016                         mem_tbl = mem_tbl_5755;
10017                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10018                         mem_tbl = mem_tbl_5906;
10019                 else
10020                         mem_tbl = mem_tbl_5705;
10021         } else
10022                 mem_tbl = mem_tbl_570x;
10023
10024         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10025                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10026                     mem_tbl[i].len)) != 0)
10027                         break;
10028         }
10029
10030         return err;
10031 }
10032
10033 #define TG3_MAC_LOOPBACK        0
10034 #define TG3_PHY_LOOPBACK        1
10035
10036 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10037 {
10038         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10039         u32 desc_idx;
10040         struct sk_buff *skb, *rx_skb;
10041         u8 *tx_data;
10042         dma_addr_t map;
10043         int num_pkts, tx_len, rx_len, i, err;
10044         struct tg3_rx_buffer_desc *desc;
10045
10046         if (loopback_mode == TG3_MAC_LOOPBACK) {
10047                 /* HW errata - mac loopback fails in some cases on 5780.
10048                  * Normal traffic and PHY loopback are not affected by
10049                  * errata.
10050                  */
10051                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10052                         return 0;
10053
10054                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10055                            MAC_MODE_PORT_INT_LPBACK;
10056                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10057                         mac_mode |= MAC_MODE_LINK_POLARITY;
10058                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10059                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10060                 else
10061                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10062                 tw32(MAC_MODE, mac_mode);
10063         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10064                 u32 val;
10065
10066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10067                         u32 phytest;
10068
10069                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10070                                 u32 phy;
10071
10072                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10073                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10074                                 if (!tg3_readphy(tp, 0x1b, &phy))
10075                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10076                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10077                         }
10078                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10079                 } else
10080                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10081
10082                 tg3_phy_toggle_automdix(tp, 0);
10083
10084                 tg3_writephy(tp, MII_BMCR, val);
10085                 udelay(40);
10086
10087                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10088                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10089                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10090                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10091                 } else
10092                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10093
10094                 /* reset to prevent losing 1st rx packet intermittently */
10095                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10096                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10097                         udelay(10);
10098                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10099                 }
10100                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10101                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10102                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10103                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10104                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10105                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10106                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10107                 }
10108                 tw32(MAC_MODE, mac_mode);
10109         }
10110         else
10111                 return -EINVAL;
10112
10113         err = -EIO;
10114
10115         tx_len = 1514;
10116         skb = netdev_alloc_skb(tp->dev, tx_len);
10117         if (!skb)
10118                 return -ENOMEM;
10119
10120         tx_data = skb_put(skb, tx_len);
10121         memcpy(tx_data, tp->dev->dev_addr, 6);
10122         memset(tx_data + 6, 0x0, 8);
10123
10124         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10125
10126         for (i = 14; i < tx_len; i++)
10127                 tx_data[i] = (u8) (i & 0xff);
10128
10129         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10130
10131         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10132              HOSTCC_MODE_NOW);
10133
10134         udelay(10);
10135
10136         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10137
10138         num_pkts = 0;
10139
10140         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10141
10142         tp->tx_prod++;
10143         num_pkts++;
10144
10145         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10146                      tp->tx_prod);
10147         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10148
10149         udelay(10);
10150
10151         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10152         for (i = 0; i < 25; i++) {
10153                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10154                        HOSTCC_MODE_NOW);
10155
10156                 udelay(10);
10157
10158                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10159                 rx_idx = tp->hw_status->idx[0].rx_producer;
10160                 if ((tx_idx == tp->tx_prod) &&
10161                     (rx_idx == (rx_start_idx + num_pkts)))
10162                         break;
10163         }
10164
10165         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10166         dev_kfree_skb(skb);
10167
10168         if (tx_idx != tp->tx_prod)
10169                 goto out;
10170
10171         if (rx_idx != rx_start_idx + num_pkts)
10172                 goto out;
10173
10174         desc = &tp->rx_rcb[rx_start_idx];
10175         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10176         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10177         if (opaque_key != RXD_OPAQUE_RING_STD)
10178                 goto out;
10179
10180         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10181             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10182                 goto out;
10183
10184         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10185         if (rx_len != tx_len)
10186                 goto out;
10187
10188         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10189
10190         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10191         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10192
10193         for (i = 14; i < tx_len; i++) {
10194                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10195                         goto out;
10196         }
10197         err = 0;
10198
10199         /* tg3_free_rings will unmap and free the rx_skb */
10200 out:
10201         return err;
10202 }
10203
10204 #define TG3_MAC_LOOPBACK_FAILED         1
10205 #define TG3_PHY_LOOPBACK_FAILED         2
10206 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10207                                          TG3_PHY_LOOPBACK_FAILED)
10208
10209 static int tg3_test_loopback(struct tg3 *tp)
10210 {
10211         int err = 0;
10212         u32 cpmuctrl = 0;
10213
10214         if (!netif_running(tp->dev))
10215                 return TG3_LOOPBACK_FAILED;
10216
10217         err = tg3_reset_hw(tp, 1);
10218         if (err)
10219                 return TG3_LOOPBACK_FAILED;
10220
10221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10224                 int i;
10225                 u32 status;
10226
10227                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10228
10229                 /* Wait for up to 40 microseconds to acquire lock. */
10230                 for (i = 0; i < 4; i++) {
10231                         status = tr32(TG3_CPMU_MUTEX_GNT);
10232                         if (status == CPMU_MUTEX_GNT_DRIVER)
10233                                 break;
10234                         udelay(10);
10235                 }
10236
10237                 if (status != CPMU_MUTEX_GNT_DRIVER)
10238                         return TG3_LOOPBACK_FAILED;
10239
10240                 /* Turn off link-based power management. */
10241                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10242                 tw32(TG3_CPMU_CTRL,
10243                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10244                                   CPMU_CTRL_LINK_AWARE_MODE));
10245         }
10246
10247         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10248                 err |= TG3_MAC_LOOPBACK_FAILED;
10249
10250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10253                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10254
10255                 /* Release the mutex */
10256                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10257         }
10258
10259         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10260             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10261                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10262                         err |= TG3_PHY_LOOPBACK_FAILED;
10263         }
10264
10265         return err;
10266 }
10267
10268 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10269                           u64 *data)
10270 {
10271         struct tg3 *tp = netdev_priv(dev);
10272
10273         if (tp->link_config.phy_is_low_power)
10274                 tg3_set_power_state(tp, PCI_D0);
10275
10276         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10277
10278         if (tg3_test_nvram(tp) != 0) {
10279                 etest->flags |= ETH_TEST_FL_FAILED;
10280                 data[0] = 1;
10281         }
10282         if (tg3_test_link(tp) != 0) {
10283                 etest->flags |= ETH_TEST_FL_FAILED;
10284                 data[1] = 1;
10285         }
10286         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10287                 int err, err2 = 0, irq_sync = 0;
10288
10289                 if (netif_running(dev)) {
10290                         tg3_phy_stop(tp);
10291                         tg3_netif_stop(tp);
10292                         irq_sync = 1;
10293                 }
10294
10295                 tg3_full_lock(tp, irq_sync);
10296
10297                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10298                 err = tg3_nvram_lock(tp);
10299                 tg3_halt_cpu(tp, RX_CPU_BASE);
10300                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10301                         tg3_halt_cpu(tp, TX_CPU_BASE);
10302                 if (!err)
10303                         tg3_nvram_unlock(tp);
10304
10305                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10306                         tg3_phy_reset(tp);
10307
10308                 if (tg3_test_registers(tp) != 0) {
10309                         etest->flags |= ETH_TEST_FL_FAILED;
10310                         data[2] = 1;
10311                 }
10312                 if (tg3_test_memory(tp) != 0) {
10313                         etest->flags |= ETH_TEST_FL_FAILED;
10314                         data[3] = 1;
10315                 }
10316                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10317                         etest->flags |= ETH_TEST_FL_FAILED;
10318
10319                 tg3_full_unlock(tp);
10320
10321                 if (tg3_test_interrupt(tp) != 0) {
10322                         etest->flags |= ETH_TEST_FL_FAILED;
10323                         data[5] = 1;
10324                 }
10325
10326                 tg3_full_lock(tp, 0);
10327
10328                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10329                 if (netif_running(dev)) {
10330                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10331                         err2 = tg3_restart_hw(tp, 1);
10332                         if (!err2)
10333                                 tg3_netif_start(tp);
10334                 }
10335
10336                 tg3_full_unlock(tp);
10337
10338                 if (irq_sync && !err2)
10339                         tg3_phy_start(tp);
10340         }
10341         if (tp->link_config.phy_is_low_power)
10342                 tg3_set_power_state(tp, PCI_D3hot);
10343
10344 }
10345
10346 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10347 {
10348         struct mii_ioctl_data *data = if_mii(ifr);
10349         struct tg3 *tp = netdev_priv(dev);
10350         int err;
10351
10352         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10353                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10354                         return -EAGAIN;
10355                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10356         }
10357
10358         switch(cmd) {
10359         case SIOCGMIIPHY:
10360                 data->phy_id = PHY_ADDR;
10361
10362                 /* fallthru */
10363         case SIOCGMIIREG: {
10364                 u32 mii_regval;
10365
10366                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10367                         break;                  /* We have no PHY */
10368
10369                 if (tp->link_config.phy_is_low_power)
10370                         return -EAGAIN;
10371
10372                 spin_lock_bh(&tp->lock);
10373                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10374                 spin_unlock_bh(&tp->lock);
10375
10376                 data->val_out = mii_regval;
10377
10378                 return err;
10379         }
10380
10381         case SIOCSMIIREG:
10382                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10383                         break;                  /* We have no PHY */
10384
10385                 if (!capable(CAP_NET_ADMIN))
10386                         return -EPERM;
10387
10388                 if (tp->link_config.phy_is_low_power)
10389                         return -EAGAIN;
10390
10391                 spin_lock_bh(&tp->lock);
10392                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10393                 spin_unlock_bh(&tp->lock);
10394
10395                 return err;
10396
10397         default:
10398                 /* do nothing */
10399                 break;
10400         }
10401         return -EOPNOTSUPP;
10402 }
10403
10404 #if TG3_VLAN_TAG_USED
10405 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10406 {
10407         struct tg3 *tp = netdev_priv(dev);
10408
10409         if (netif_running(dev))
10410                 tg3_netif_stop(tp);
10411
10412         tg3_full_lock(tp, 0);
10413
10414         tp->vlgrp = grp;
10415
10416         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10417         __tg3_set_rx_mode(dev);
10418
10419         if (netif_running(dev))
10420                 tg3_netif_start(tp);
10421
10422         tg3_full_unlock(tp);
10423 }
10424 #endif
10425
10426 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10427 {
10428         struct tg3 *tp = netdev_priv(dev);
10429
10430         memcpy(ec, &tp->coal, sizeof(*ec));
10431         return 0;
10432 }
10433
10434 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10435 {
10436         struct tg3 *tp = netdev_priv(dev);
10437         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10438         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10439
10440         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10441                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10442                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10443                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10444                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10445         }
10446
10447         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10448             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10449             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10450             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10451             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10452             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10453             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10454             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10455             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10456             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10457                 return -EINVAL;
10458
10459         /* No rx interrupts will be generated if both are zero */
10460         if ((ec->rx_coalesce_usecs == 0) &&
10461             (ec->rx_max_coalesced_frames == 0))
10462                 return -EINVAL;
10463
10464         /* No tx interrupts will be generated if both are zero */
10465         if ((ec->tx_coalesce_usecs == 0) &&
10466             (ec->tx_max_coalesced_frames == 0))
10467                 return -EINVAL;
10468
10469         /* Only copy relevant parameters, ignore all others. */
10470         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10471         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10472         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10473         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10474         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10475         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10476         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10477         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10478         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10479
10480         if (netif_running(dev)) {
10481                 tg3_full_lock(tp, 0);
10482                 __tg3_set_coalesce(tp, &tp->coal);
10483                 tg3_full_unlock(tp);
10484         }
10485         return 0;
10486 }
10487
10488 static const struct ethtool_ops tg3_ethtool_ops = {
10489         .get_settings           = tg3_get_settings,
10490         .set_settings           = tg3_set_settings,
10491         .get_drvinfo            = tg3_get_drvinfo,
10492         .get_regs_len           = tg3_get_regs_len,
10493         .get_regs               = tg3_get_regs,
10494         .get_wol                = tg3_get_wol,
10495         .set_wol                = tg3_set_wol,
10496         .get_msglevel           = tg3_get_msglevel,
10497         .set_msglevel           = tg3_set_msglevel,
10498         .nway_reset             = tg3_nway_reset,
10499         .get_link               = ethtool_op_get_link,
10500         .get_eeprom_len         = tg3_get_eeprom_len,
10501         .get_eeprom             = tg3_get_eeprom,
10502         .set_eeprom             = tg3_set_eeprom,
10503         .get_ringparam          = tg3_get_ringparam,
10504         .set_ringparam          = tg3_set_ringparam,
10505         .get_pauseparam         = tg3_get_pauseparam,
10506         .set_pauseparam         = tg3_set_pauseparam,
10507         .get_rx_csum            = tg3_get_rx_csum,
10508         .set_rx_csum            = tg3_set_rx_csum,
10509         .set_tx_csum            = tg3_set_tx_csum,
10510         .set_sg                 = ethtool_op_set_sg,
10511         .set_tso                = tg3_set_tso,
10512         .self_test              = tg3_self_test,
10513         .get_strings            = tg3_get_strings,
10514         .phys_id                = tg3_phys_id,
10515         .get_ethtool_stats      = tg3_get_ethtool_stats,
10516         .get_coalesce           = tg3_get_coalesce,
10517         .set_coalesce           = tg3_set_coalesce,
10518         .get_sset_count         = tg3_get_sset_count,
10519 };
10520
10521 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10522 {
10523         u32 cursize, val, magic;
10524
10525         tp->nvram_size = EEPROM_CHIP_SIZE;
10526
10527         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10528                 return;
10529
10530         if ((magic != TG3_EEPROM_MAGIC) &&
10531             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10532             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10533                 return;
10534
10535         /*
10536          * Size the chip by reading offsets at increasing powers of two.
10537          * When we encounter our validation signature, we know the addressing
10538          * has wrapped around, and thus have our chip size.
10539          */
10540         cursize = 0x10;
10541
10542         while (cursize < tp->nvram_size) {
10543                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10544                         return;
10545
10546                 if (val == magic)
10547                         break;
10548
10549                 cursize <<= 1;
10550         }
10551
10552         tp->nvram_size = cursize;
10553 }
10554
10555 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10556 {
10557         u32 val;
10558
10559         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10560                 return;
10561
10562         /* Selfboot format */
10563         if (val != TG3_EEPROM_MAGIC) {
10564                 tg3_get_eeprom_size(tp);
10565                 return;
10566         }
10567
10568         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10569                 if (val != 0) {
10570                         tp->nvram_size = (val >> 16) * 1024;
10571                         return;
10572                 }
10573         }
10574         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10575 }
10576
10577 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10578 {
10579         u32 nvcfg1;
10580
10581         nvcfg1 = tr32(NVRAM_CFG1);
10582         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10583                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10584         }
10585         else {
10586                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10587                 tw32(NVRAM_CFG1, nvcfg1);
10588         }
10589
10590         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10591             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10592                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10593                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10594                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10595                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10596                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10597                                 break;
10598                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10599                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10600                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10601                                 break;
10602                         case FLASH_VENDOR_ATMEL_EEPROM:
10603                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10604                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10605                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10606                                 break;
10607                         case FLASH_VENDOR_ST:
10608                                 tp->nvram_jedecnum = JEDEC_ST;
10609                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10610                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10611                                 break;
10612                         case FLASH_VENDOR_SAIFUN:
10613                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10614                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10615                                 break;
10616                         case FLASH_VENDOR_SST_SMALL:
10617                         case FLASH_VENDOR_SST_LARGE:
10618                                 tp->nvram_jedecnum = JEDEC_SST;
10619                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10620                                 break;
10621                 }
10622         }
10623         else {
10624                 tp->nvram_jedecnum = JEDEC_ATMEL;
10625                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10626                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10627         }
10628 }
10629
10630 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10631 {
10632         u32 nvcfg1;
10633
10634         nvcfg1 = tr32(NVRAM_CFG1);
10635
10636         /* NVRAM protection for TPM */
10637         if (nvcfg1 & (1 << 27))
10638                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10639
10640         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10641                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10642                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10643                         tp->nvram_jedecnum = JEDEC_ATMEL;
10644                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10645                         break;
10646                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10647                         tp->nvram_jedecnum = JEDEC_ATMEL;
10648                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10649                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10650                         break;
10651                 case FLASH_5752VENDOR_ST_M45PE10:
10652                 case FLASH_5752VENDOR_ST_M45PE20:
10653                 case FLASH_5752VENDOR_ST_M45PE40:
10654                         tp->nvram_jedecnum = JEDEC_ST;
10655                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10656                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10657                         break;
10658         }
10659
10660         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10661                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10662                         case FLASH_5752PAGE_SIZE_256:
10663                                 tp->nvram_pagesize = 256;
10664                                 break;
10665                         case FLASH_5752PAGE_SIZE_512:
10666                                 tp->nvram_pagesize = 512;
10667                                 break;
10668                         case FLASH_5752PAGE_SIZE_1K:
10669                                 tp->nvram_pagesize = 1024;
10670                                 break;
10671                         case FLASH_5752PAGE_SIZE_2K:
10672                                 tp->nvram_pagesize = 2048;
10673                                 break;
10674                         case FLASH_5752PAGE_SIZE_4K:
10675                                 tp->nvram_pagesize = 4096;
10676                                 break;
10677                         case FLASH_5752PAGE_SIZE_264:
10678                                 tp->nvram_pagesize = 264;
10679                                 break;
10680                 }
10681         }
10682         else {
10683                 /* For eeprom, set pagesize to maximum eeprom size */
10684                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10685
10686                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10687                 tw32(NVRAM_CFG1, nvcfg1);
10688         }
10689 }
10690
10691 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10692 {
10693         u32 nvcfg1, protect = 0;
10694
10695         nvcfg1 = tr32(NVRAM_CFG1);
10696
10697         /* NVRAM protection for TPM */
10698         if (nvcfg1 & (1 << 27)) {
10699                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10700                 protect = 1;
10701         }
10702
10703         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10704         switch (nvcfg1) {
10705                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10706                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10707                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10708                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10709                         tp->nvram_jedecnum = JEDEC_ATMEL;
10710                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10711                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10712                         tp->nvram_pagesize = 264;
10713                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10714                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10715                                 tp->nvram_size = (protect ? 0x3e200 :
10716                                                   TG3_NVRAM_SIZE_512KB);
10717                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10718                                 tp->nvram_size = (protect ? 0x1f200 :
10719                                                   TG3_NVRAM_SIZE_256KB);
10720                         else
10721                                 tp->nvram_size = (protect ? 0x1f200 :
10722                                                   TG3_NVRAM_SIZE_128KB);
10723                         break;
10724                 case FLASH_5752VENDOR_ST_M45PE10:
10725                 case FLASH_5752VENDOR_ST_M45PE20:
10726                 case FLASH_5752VENDOR_ST_M45PE40:
10727                         tp->nvram_jedecnum = JEDEC_ST;
10728                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10729                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10730                         tp->nvram_pagesize = 256;
10731                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10732                                 tp->nvram_size = (protect ?
10733                                                   TG3_NVRAM_SIZE_64KB :
10734                                                   TG3_NVRAM_SIZE_128KB);
10735                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10736                                 tp->nvram_size = (protect ?
10737                                                   TG3_NVRAM_SIZE_64KB :
10738                                                   TG3_NVRAM_SIZE_256KB);
10739                         else
10740                                 tp->nvram_size = (protect ?
10741                                                   TG3_NVRAM_SIZE_128KB :
10742                                                   TG3_NVRAM_SIZE_512KB);
10743                         break;
10744         }
10745 }
10746
10747 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10748 {
10749         u32 nvcfg1;
10750
10751         nvcfg1 = tr32(NVRAM_CFG1);
10752
10753         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10754                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10755                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10756                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10757                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10758                         tp->nvram_jedecnum = JEDEC_ATMEL;
10759                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10760                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10761
10762                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10763                         tw32(NVRAM_CFG1, nvcfg1);
10764                         break;
10765                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10766                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10767                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10768                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10769                         tp->nvram_jedecnum = JEDEC_ATMEL;
10770                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10771                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10772                         tp->nvram_pagesize = 264;
10773                         break;
10774                 case FLASH_5752VENDOR_ST_M45PE10:
10775                 case FLASH_5752VENDOR_ST_M45PE20:
10776                 case FLASH_5752VENDOR_ST_M45PE40:
10777                         tp->nvram_jedecnum = JEDEC_ST;
10778                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10779                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10780                         tp->nvram_pagesize = 256;
10781                         break;
10782         }
10783 }
10784
10785 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10786 {
10787         u32 nvcfg1, protect = 0;
10788
10789         nvcfg1 = tr32(NVRAM_CFG1);
10790
10791         /* NVRAM protection for TPM */
10792         if (nvcfg1 & (1 << 27)) {
10793                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10794                 protect = 1;
10795         }
10796
10797         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10798         switch (nvcfg1) {
10799                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10800                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10801                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10802                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10803                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10804                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10805                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10806                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10807                         tp->nvram_jedecnum = JEDEC_ATMEL;
10808                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10809                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10810                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10811                         tp->nvram_pagesize = 256;
10812                         break;
10813                 case FLASH_5761VENDOR_ST_A_M45PE20:
10814                 case FLASH_5761VENDOR_ST_A_M45PE40:
10815                 case FLASH_5761VENDOR_ST_A_M45PE80:
10816                 case FLASH_5761VENDOR_ST_A_M45PE16:
10817                 case FLASH_5761VENDOR_ST_M_M45PE20:
10818                 case FLASH_5761VENDOR_ST_M_M45PE40:
10819                 case FLASH_5761VENDOR_ST_M_M45PE80:
10820                 case FLASH_5761VENDOR_ST_M_M45PE16:
10821                         tp->nvram_jedecnum = JEDEC_ST;
10822                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10823                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10824                         tp->nvram_pagesize = 256;
10825                         break;
10826         }
10827
10828         if (protect) {
10829                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10830         } else {
10831                 switch (nvcfg1) {
10832                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10833                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10834                         case FLASH_5761VENDOR_ST_A_M45PE16:
10835                         case FLASH_5761VENDOR_ST_M_M45PE16:
10836                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10837                                 break;
10838                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10839                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10840                         case FLASH_5761VENDOR_ST_A_M45PE80:
10841                         case FLASH_5761VENDOR_ST_M_M45PE80:
10842                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10843                                 break;
10844                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10845                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10846                         case FLASH_5761VENDOR_ST_A_M45PE40:
10847                         case FLASH_5761VENDOR_ST_M_M45PE40:
10848                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10849                                 break;
10850                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10851                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10852                         case FLASH_5761VENDOR_ST_A_M45PE20:
10853                         case FLASH_5761VENDOR_ST_M_M45PE20:
10854                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10855                                 break;
10856                 }
10857         }
10858 }
10859
10860 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10861 {
10862         tp->nvram_jedecnum = JEDEC_ATMEL;
10863         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10864         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10865 }
10866
10867 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10868 static void __devinit tg3_nvram_init(struct tg3 *tp)
10869 {
10870         tw32_f(GRC_EEPROM_ADDR,
10871              (EEPROM_ADDR_FSM_RESET |
10872               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10873                EEPROM_ADDR_CLKPERD_SHIFT)));
10874
10875         msleep(1);
10876
10877         /* Enable seeprom accesses. */
10878         tw32_f(GRC_LOCAL_CTRL,
10879              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10880         udelay(100);
10881
10882         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10883             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10884                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10885
10886                 if (tg3_nvram_lock(tp)) {
10887                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10888                                "tg3_nvram_init failed.\n", tp->dev->name);
10889                         return;
10890                 }
10891                 tg3_enable_nvram_access(tp);
10892
10893                 tp->nvram_size = 0;
10894
10895                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10896                         tg3_get_5752_nvram_info(tp);
10897                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10898                         tg3_get_5755_nvram_info(tp);
10899                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10900                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10901                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10902                         tg3_get_5787_nvram_info(tp);
10903                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10904                         tg3_get_5761_nvram_info(tp);
10905                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10906                         tg3_get_5906_nvram_info(tp);
10907                 else
10908                         tg3_get_nvram_info(tp);
10909
10910                 if (tp->nvram_size == 0)
10911                         tg3_get_nvram_size(tp);
10912
10913                 tg3_disable_nvram_access(tp);
10914                 tg3_nvram_unlock(tp);
10915
10916         } else {
10917                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10918
10919                 tg3_get_eeprom_size(tp);
10920         }
10921 }
10922
10923 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10924                                         u32 offset, u32 *val)
10925 {
10926         u32 tmp;
10927         int i;
10928
10929         if (offset > EEPROM_ADDR_ADDR_MASK ||
10930             (offset % 4) != 0)
10931                 return -EINVAL;
10932
10933         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10934                                         EEPROM_ADDR_DEVID_MASK |
10935                                         EEPROM_ADDR_READ);
10936         tw32(GRC_EEPROM_ADDR,
10937              tmp |
10938              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10939              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10940               EEPROM_ADDR_ADDR_MASK) |
10941              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10942
10943         for (i = 0; i < 1000; i++) {
10944                 tmp = tr32(GRC_EEPROM_ADDR);
10945
10946                 if (tmp & EEPROM_ADDR_COMPLETE)
10947                         break;
10948                 msleep(1);
10949         }
10950         if (!(tmp & EEPROM_ADDR_COMPLETE))
10951                 return -EBUSY;
10952
10953         *val = tr32(GRC_EEPROM_DATA);
10954         return 0;
10955 }
10956
10957 #define NVRAM_CMD_TIMEOUT 10000
10958
10959 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10960 {
10961         int i;
10962
10963         tw32(NVRAM_CMD, nvram_cmd);
10964         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10965                 udelay(10);
10966                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10967                         udelay(10);
10968                         break;
10969                 }
10970         }
10971         if (i == NVRAM_CMD_TIMEOUT) {
10972                 return -EBUSY;
10973         }
10974         return 0;
10975 }
10976
10977 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10978 {
10979         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10980             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10981             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10982            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10983             (tp->nvram_jedecnum == JEDEC_ATMEL))
10984
10985                 addr = ((addr / tp->nvram_pagesize) <<
10986                         ATMEL_AT45DB0X1B_PAGE_POS) +
10987                        (addr % tp->nvram_pagesize);
10988
10989         return addr;
10990 }
10991
10992 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10993 {
10994         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10995             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10996             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10997            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10998             (tp->nvram_jedecnum == JEDEC_ATMEL))
10999
11000                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11001                         tp->nvram_pagesize) +
11002                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11003
11004         return addr;
11005 }
11006
11007 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11008 {
11009         int ret;
11010
11011         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11012                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11013
11014         offset = tg3_nvram_phys_addr(tp, offset);
11015
11016         if (offset > NVRAM_ADDR_MSK)
11017                 return -EINVAL;
11018
11019         ret = tg3_nvram_lock(tp);
11020         if (ret)
11021                 return ret;
11022
11023         tg3_enable_nvram_access(tp);
11024
11025         tw32(NVRAM_ADDR, offset);
11026         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11027                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11028
11029         if (ret == 0)
11030                 *val = swab32(tr32(NVRAM_RDDATA));
11031
11032         tg3_disable_nvram_access(tp);
11033
11034         tg3_nvram_unlock(tp);
11035
11036         return ret;
11037 }
11038
11039 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11040 {
11041         u32 v;
11042         int res = tg3_nvram_read(tp, offset, &v);
11043         if (!res)
11044                 *val = cpu_to_le32(v);
11045         return res;
11046 }
11047
11048 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11049 {
11050         int err;
11051         u32 tmp;
11052
11053         err = tg3_nvram_read(tp, offset, &tmp);
11054         *val = swab32(tmp);
11055         return err;
11056 }
11057
11058 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11059                                     u32 offset, u32 len, u8 *buf)
11060 {
11061         int i, j, rc = 0;
11062         u32 val;
11063
11064         for (i = 0; i < len; i += 4) {
11065                 u32 addr;
11066                 __le32 data;
11067
11068                 addr = offset + i;
11069
11070                 memcpy(&data, buf + i, 4);
11071
11072                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11073
11074                 val = tr32(GRC_EEPROM_ADDR);
11075                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11076
11077                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11078                         EEPROM_ADDR_READ);
11079                 tw32(GRC_EEPROM_ADDR, val |
11080                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11081                         (addr & EEPROM_ADDR_ADDR_MASK) |
11082                         EEPROM_ADDR_START |
11083                         EEPROM_ADDR_WRITE);
11084
11085                 for (j = 0; j < 1000; j++) {
11086                         val = tr32(GRC_EEPROM_ADDR);
11087
11088                         if (val & EEPROM_ADDR_COMPLETE)
11089                                 break;
11090                         msleep(1);
11091                 }
11092                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11093                         rc = -EBUSY;
11094                         break;
11095                 }
11096         }
11097
11098         return rc;
11099 }
11100
11101 /* offset and length are dword aligned */
11102 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11103                 u8 *buf)
11104 {
11105         int ret = 0;
11106         u32 pagesize = tp->nvram_pagesize;
11107         u32 pagemask = pagesize - 1;
11108         u32 nvram_cmd;
11109         u8 *tmp;
11110
11111         tmp = kmalloc(pagesize, GFP_KERNEL);
11112         if (tmp == NULL)
11113                 return -ENOMEM;
11114
11115         while (len) {
11116                 int j;
11117                 u32 phy_addr, page_off, size;
11118
11119                 phy_addr = offset & ~pagemask;
11120
11121                 for (j = 0; j < pagesize; j += 4) {
11122                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11123                                                 (__le32 *) (tmp + j))))
11124                                 break;
11125                 }
11126                 if (ret)
11127                         break;
11128
11129                 page_off = offset & pagemask;
11130                 size = pagesize;
11131                 if (len < size)
11132                         size = len;
11133
11134                 len -= size;
11135
11136                 memcpy(tmp + page_off, buf, size);
11137
11138                 offset = offset + (pagesize - page_off);
11139
11140                 tg3_enable_nvram_access(tp);
11141
11142                 /*
11143                  * Before we can erase the flash page, we need
11144                  * to issue a special "write enable" command.
11145                  */
11146                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11147
11148                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11149                         break;
11150
11151                 /* Erase the target page */
11152                 tw32(NVRAM_ADDR, phy_addr);
11153
11154                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11155                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11156
11157                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11158                         break;
11159
11160                 /* Issue another write enable to start the write. */
11161                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11162
11163                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11164                         break;
11165
11166                 for (j = 0; j < pagesize; j += 4) {
11167                         __be32 data;
11168
11169                         data = *((__be32 *) (tmp + j));
11170                         /* swab32(le32_to_cpu(data)), actually */
11171                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11172
11173                         tw32(NVRAM_ADDR, phy_addr + j);
11174
11175                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11176                                 NVRAM_CMD_WR;
11177
11178                         if (j == 0)
11179                                 nvram_cmd |= NVRAM_CMD_FIRST;
11180                         else if (j == (pagesize - 4))
11181                                 nvram_cmd |= NVRAM_CMD_LAST;
11182
11183                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11184                                 break;
11185                 }
11186                 if (ret)
11187                         break;
11188         }
11189
11190         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11191         tg3_nvram_exec_cmd(tp, nvram_cmd);
11192
11193         kfree(tmp);
11194
11195         return ret;
11196 }
11197
11198 /* offset and length are dword aligned */
11199 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11200                 u8 *buf)
11201 {
11202         int i, ret = 0;
11203
11204         for (i = 0; i < len; i += 4, offset += 4) {
11205                 u32 page_off, phy_addr, nvram_cmd;
11206                 __be32 data;
11207
11208                 memcpy(&data, buf + i, 4);
11209                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11210
11211                 page_off = offset % tp->nvram_pagesize;
11212
11213                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11214
11215                 tw32(NVRAM_ADDR, phy_addr);
11216
11217                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11218
11219                 if ((page_off == 0) || (i == 0))
11220                         nvram_cmd |= NVRAM_CMD_FIRST;
11221                 if (page_off == (tp->nvram_pagesize - 4))
11222                         nvram_cmd |= NVRAM_CMD_LAST;
11223
11224                 if (i == (len - 4))
11225                         nvram_cmd |= NVRAM_CMD_LAST;
11226
11227                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11228                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11229                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11230                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11231                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11232                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11233                     (tp->nvram_jedecnum == JEDEC_ST) &&
11234                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11235
11236                         if ((ret = tg3_nvram_exec_cmd(tp,
11237                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11238                                 NVRAM_CMD_DONE)))
11239
11240                                 break;
11241                 }
11242                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11243                         /* We always do complete word writes to eeprom. */
11244                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11245                 }
11246
11247                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11248                         break;
11249         }
11250         return ret;
11251 }
11252
11253 /* offset and length are dword aligned */
11254 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11255 {
11256         int ret;
11257
11258         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11259                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11260                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11261                 udelay(40);
11262         }
11263
11264         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11265                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11266         }
11267         else {
11268                 u32 grc_mode;
11269
11270                 ret = tg3_nvram_lock(tp);
11271                 if (ret)
11272                         return ret;
11273
11274                 tg3_enable_nvram_access(tp);
11275                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11276                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11277                         tw32(NVRAM_WRITE1, 0x406);
11278
11279                 grc_mode = tr32(GRC_MODE);
11280                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11281
11282                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11283                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11284
11285                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11286                                 buf);
11287                 }
11288                 else {
11289                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11290                                 buf);
11291                 }
11292
11293                 grc_mode = tr32(GRC_MODE);
11294                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11295
11296                 tg3_disable_nvram_access(tp);
11297                 tg3_nvram_unlock(tp);
11298         }
11299
11300         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11301                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11302                 udelay(40);
11303         }
11304
11305         return ret;
11306 }
11307
11308 struct subsys_tbl_ent {
11309         u16 subsys_vendor, subsys_devid;
11310         u32 phy_id;
11311 };
11312
11313 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11314         /* Broadcom boards. */
11315         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11316         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11317         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11318         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11319         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11320         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11321         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11322         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11323         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11324         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11325         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11326
11327         /* 3com boards. */
11328         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11329         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11330         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11331         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11332         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11333
11334         /* DELL boards. */
11335         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11336         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11337         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11338         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11339
11340         /* Compaq boards. */
11341         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11342         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11343         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11344         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11345         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11346
11347         /* IBM boards. */
11348         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11349 };
11350
11351 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11352 {
11353         int i;
11354
11355         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11356                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11357                      tp->pdev->subsystem_vendor) &&
11358                     (subsys_id_to_phy_id[i].subsys_devid ==
11359                      tp->pdev->subsystem_device))
11360                         return &subsys_id_to_phy_id[i];
11361         }
11362         return NULL;
11363 }
11364
11365 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11366 {
11367         u32 val;
11368         u16 pmcsr;
11369
11370         /* On some early chips the SRAM cannot be accessed in D3hot state,
11371          * so need make sure we're in D0.
11372          */
11373         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11374         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11375         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11376         msleep(1);
11377
11378         /* Make sure register accesses (indirect or otherwise)
11379          * will function correctly.
11380          */
11381         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11382                                tp->misc_host_ctrl);
11383
11384         /* The memory arbiter has to be enabled in order for SRAM accesses
11385          * to succeed.  Normally on powerup the tg3 chip firmware will make
11386          * sure it is enabled, but other entities such as system netboot
11387          * code might disable it.
11388          */
11389         val = tr32(MEMARB_MODE);
11390         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11391
11392         tp->phy_id = PHY_ID_INVALID;
11393         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11394
11395         /* Assume an onboard device and WOL capable by default.  */
11396         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11397
11398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11399                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11400                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11401                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11402                 }
11403                 val = tr32(VCPU_CFGSHDW);
11404                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11405                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11406                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11407                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11408                     device_may_wakeup(&tp->pdev->dev))
11409                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11410                 goto done;
11411         }
11412
11413         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11414         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11415                 u32 nic_cfg, led_cfg;
11416                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11417                 int eeprom_phy_serdes = 0;
11418
11419                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11420                 tp->nic_sram_data_cfg = nic_cfg;
11421
11422                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11423                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11424                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11425                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11426                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11427                     (ver > 0) && (ver < 0x100))
11428                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11429
11430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11431                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11432
11433                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11434                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11435                         eeprom_phy_serdes = 1;
11436
11437                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11438                 if (nic_phy_id != 0) {
11439                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11440                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11441
11442                         eeprom_phy_id  = (id1 >> 16) << 10;
11443                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11444                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11445                 } else
11446                         eeprom_phy_id = 0;
11447
11448                 tp->phy_id = eeprom_phy_id;
11449                 if (eeprom_phy_serdes) {
11450                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11451                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11452                         else
11453                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11454                 }
11455
11456                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11457                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11458                                     SHASTA_EXT_LED_MODE_MASK);
11459                 else
11460                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11461
11462                 switch (led_cfg) {
11463                 default:
11464                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11465                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11466                         break;
11467
11468                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11469                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11470                         break;
11471
11472                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11473                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11474
11475                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11476                          * read on some older 5700/5701 bootcode.
11477                          */
11478                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11479                             ASIC_REV_5700 ||
11480                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11481                             ASIC_REV_5701)
11482                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11483
11484                         break;
11485
11486                 case SHASTA_EXT_LED_SHARED:
11487                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11488                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11489                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11490                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11491                                                  LED_CTRL_MODE_PHY_2);
11492                         break;
11493
11494                 case SHASTA_EXT_LED_MAC:
11495                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11496                         break;
11497
11498                 case SHASTA_EXT_LED_COMBO:
11499                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11500                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11501                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11502                                                  LED_CTRL_MODE_PHY_2);
11503                         break;
11504
11505                 }
11506
11507                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11508                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11509                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11510                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11511
11512                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11513                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11514
11515                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11516                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11517                         if ((tp->pdev->subsystem_vendor ==
11518                              PCI_VENDOR_ID_ARIMA) &&
11519                             (tp->pdev->subsystem_device == 0x205a ||
11520                              tp->pdev->subsystem_device == 0x2063))
11521                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11522                 } else {
11523                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11524                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11525                 }
11526
11527                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11528                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11529                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11530                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11531                 }
11532
11533                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11534                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11535                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11536
11537                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11538                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11539                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11540
11541                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11542                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11543                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11544
11545                 if (cfg2 & (1 << 17))
11546                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11547
11548                 /* serdes signal pre-emphasis in register 0x590 set by */
11549                 /* bootcode if bit 18 is set */
11550                 if (cfg2 & (1 << 18))
11551                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11552
11553                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11554                         u32 cfg3;
11555
11556                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11557                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11558                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11559                 }
11560
11561                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11562                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11563                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11564                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11565                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11566                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11567         }
11568 done:
11569         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11570         device_set_wakeup_enable(&tp->pdev->dev,
11571                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11572 }
11573
11574 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11575 {
11576         int i;
11577         u32 val;
11578
11579         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11580         tw32(OTP_CTRL, cmd);
11581
11582         /* Wait for up to 1 ms for command to execute. */
11583         for (i = 0; i < 100; i++) {
11584                 val = tr32(OTP_STATUS);
11585                 if (val & OTP_STATUS_CMD_DONE)
11586                         break;
11587                 udelay(10);
11588         }
11589
11590         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11591 }
11592
11593 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11594  * configuration is a 32-bit value that straddles the alignment boundary.
11595  * We do two 32-bit reads and then shift and merge the results.
11596  */
11597 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11598 {
11599         u32 bhalf_otp, thalf_otp;
11600
11601         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11602
11603         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11604                 return 0;
11605
11606         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11607
11608         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11609                 return 0;
11610
11611         thalf_otp = tr32(OTP_READ_DATA);
11612
11613         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11614
11615         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11616                 return 0;
11617
11618         bhalf_otp = tr32(OTP_READ_DATA);
11619
11620         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11621 }
11622
11623 static int __devinit tg3_phy_probe(struct tg3 *tp)
11624 {
11625         u32 hw_phy_id_1, hw_phy_id_2;
11626         u32 hw_phy_id, hw_phy_id_masked;
11627         int err;
11628
11629         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11630                 return tg3_phy_init(tp);
11631
11632         /* Reading the PHY ID register can conflict with ASF
11633          * firwmare access to the PHY hardware.
11634          */
11635         err = 0;
11636         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11637             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11638                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11639         } else {
11640                 /* Now read the physical PHY_ID from the chip and verify
11641                  * that it is sane.  If it doesn't look good, we fall back
11642                  * to either the hard-coded table based PHY_ID and failing
11643                  * that the value found in the eeprom area.
11644                  */
11645                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11646                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11647
11648                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11649                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11650                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11651
11652                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11653         }
11654
11655         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11656                 tp->phy_id = hw_phy_id;
11657                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11658                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11659                 else
11660                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11661         } else {
11662                 if (tp->phy_id != PHY_ID_INVALID) {
11663                         /* Do nothing, phy ID already set up in
11664                          * tg3_get_eeprom_hw_cfg().
11665                          */
11666                 } else {
11667                         struct subsys_tbl_ent *p;
11668
11669                         /* No eeprom signature?  Try the hardcoded
11670                          * subsys device table.
11671                          */
11672                         p = lookup_by_subsys(tp);
11673                         if (!p)
11674                                 return -ENODEV;
11675
11676                         tp->phy_id = p->phy_id;
11677                         if (!tp->phy_id ||
11678                             tp->phy_id == PHY_ID_BCM8002)
11679                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11680                 }
11681         }
11682
11683         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11684             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11685             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11686                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11687
11688                 tg3_readphy(tp, MII_BMSR, &bmsr);
11689                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11690                     (bmsr & BMSR_LSTATUS))
11691                         goto skip_phy_reset;
11692
11693                 err = tg3_phy_reset(tp);
11694                 if (err)
11695                         return err;
11696
11697                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11698                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11699                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11700                 tg3_ctrl = 0;
11701                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11702                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11703                                     MII_TG3_CTRL_ADV_1000_FULL);
11704                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11705                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11706                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11707                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11708                 }
11709
11710                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11711                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11712                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11713                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11714                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11715
11716                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11717                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11718
11719                         tg3_writephy(tp, MII_BMCR,
11720                                      BMCR_ANENABLE | BMCR_ANRESTART);
11721                 }
11722                 tg3_phy_set_wirespeed(tp);
11723
11724                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11725                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11726                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11727         }
11728
11729 skip_phy_reset:
11730         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11731                 err = tg3_init_5401phy_dsp(tp);
11732                 if (err)
11733                         return err;
11734         }
11735
11736         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11737                 err = tg3_init_5401phy_dsp(tp);
11738         }
11739
11740         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11741                 tp->link_config.advertising =
11742                         (ADVERTISED_1000baseT_Half |
11743                          ADVERTISED_1000baseT_Full |
11744                          ADVERTISED_Autoneg |
11745                          ADVERTISED_FIBRE);
11746         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11747                 tp->link_config.advertising &=
11748                         ~(ADVERTISED_1000baseT_Half |
11749                           ADVERTISED_1000baseT_Full);
11750
11751         return err;
11752 }
11753
11754 static void __devinit tg3_read_partno(struct tg3 *tp)
11755 {
11756         unsigned char vpd_data[256];
11757         unsigned int i;
11758         u32 magic;
11759
11760         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11761                 goto out_not_found;
11762
11763         if (magic == TG3_EEPROM_MAGIC) {
11764                 for (i = 0; i < 256; i += 4) {
11765                         u32 tmp;
11766
11767                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11768                                 goto out_not_found;
11769
11770                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11771                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11772                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11773                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11774                 }
11775         } else {
11776                 int vpd_cap;
11777
11778                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11779                 for (i = 0; i < 256; i += 4) {
11780                         u32 tmp, j = 0;
11781                         __le32 v;
11782                         u16 tmp16;
11783
11784                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11785                                               i);
11786                         while (j++ < 100) {
11787                                 pci_read_config_word(tp->pdev, vpd_cap +
11788                                                      PCI_VPD_ADDR, &tmp16);
11789                                 if (tmp16 & 0x8000)
11790                                         break;
11791                                 msleep(1);
11792                         }
11793                         if (!(tmp16 & 0x8000))
11794                                 goto out_not_found;
11795
11796                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11797                                               &tmp);
11798                         v = cpu_to_le32(tmp);
11799                         memcpy(&vpd_data[i], &v, 4);
11800                 }
11801         }
11802
11803         /* Now parse and find the part number. */
11804         for (i = 0; i < 254; ) {
11805                 unsigned char val = vpd_data[i];
11806                 unsigned int block_end;
11807
11808                 if (val == 0x82 || val == 0x91) {
11809                         i = (i + 3 +
11810                              (vpd_data[i + 1] +
11811                               (vpd_data[i + 2] << 8)));
11812                         continue;
11813                 }
11814
11815                 if (val != 0x90)
11816                         goto out_not_found;
11817
11818                 block_end = (i + 3 +
11819                              (vpd_data[i + 1] +
11820                               (vpd_data[i + 2] << 8)));
11821                 i += 3;
11822
11823                 if (block_end > 256)
11824                         goto out_not_found;
11825
11826                 while (i < (block_end - 2)) {
11827                         if (vpd_data[i + 0] == 'P' &&
11828                             vpd_data[i + 1] == 'N') {
11829                                 int partno_len = vpd_data[i + 2];
11830
11831                                 i += 3;
11832                                 if (partno_len > 24 || (partno_len + i) > 256)
11833                                         goto out_not_found;
11834
11835                                 memcpy(tp->board_part_number,
11836                                        &vpd_data[i], partno_len);
11837
11838                                 /* Success. */
11839                                 return;
11840                         }
11841                         i += 3 + vpd_data[i + 2];
11842                 }
11843
11844                 /* Part number not found. */
11845                 goto out_not_found;
11846         }
11847
11848 out_not_found:
11849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11850                 strcpy(tp->board_part_number, "BCM95906");
11851         else
11852                 strcpy(tp->board_part_number, "none");
11853 }
11854
11855 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11856 {
11857         u32 val;
11858
11859         if (tg3_nvram_read_swab(tp, offset, &val) ||
11860             (val & 0xfc000000) != 0x0c000000 ||
11861             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11862             val != 0)
11863                 return 0;
11864
11865         return 1;
11866 }
11867
11868 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11869 {
11870         u32 val, offset, start;
11871         u32 ver_offset;
11872         int i, bcnt;
11873
11874         if (tg3_nvram_read_swab(tp, 0, &val))
11875                 return;
11876
11877         if (val != TG3_EEPROM_MAGIC)
11878                 return;
11879
11880         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11881             tg3_nvram_read_swab(tp, 0x4, &start))
11882                 return;
11883
11884         offset = tg3_nvram_logical_addr(tp, offset);
11885
11886         if (!tg3_fw_img_is_valid(tp, offset) ||
11887             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11888                 return;
11889
11890         offset = offset + ver_offset - start;
11891         for (i = 0; i < 16; i += 4) {
11892                 __le32 v;
11893                 if (tg3_nvram_read_le(tp, offset + i, &v))
11894                         return;
11895
11896                 memcpy(tp->fw_ver + i, &v, 4);
11897         }
11898
11899         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11900              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11901                 return;
11902
11903         for (offset = TG3_NVM_DIR_START;
11904              offset < TG3_NVM_DIR_END;
11905              offset += TG3_NVM_DIRENT_SIZE) {
11906                 if (tg3_nvram_read_swab(tp, offset, &val))
11907                         return;
11908
11909                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11910                         break;
11911         }
11912
11913         if (offset == TG3_NVM_DIR_END)
11914                 return;
11915
11916         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11917                 start = 0x08000000;
11918         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11919                 return;
11920
11921         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11922             !tg3_fw_img_is_valid(tp, offset) ||
11923             tg3_nvram_read_swab(tp, offset + 8, &val))
11924                 return;
11925
11926         offset += val - start;
11927
11928         bcnt = strlen(tp->fw_ver);
11929
11930         tp->fw_ver[bcnt++] = ',';
11931         tp->fw_ver[bcnt++] = ' ';
11932
11933         for (i = 0; i < 4; i++) {
11934                 __le32 v;
11935                 if (tg3_nvram_read_le(tp, offset, &v))
11936                         return;
11937
11938                 offset += sizeof(v);
11939
11940                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11941                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11942                         break;
11943                 }
11944
11945                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11946                 bcnt += sizeof(v);
11947         }
11948
11949         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11950 }
11951
11952 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11953
11954 static int __devinit tg3_get_invariants(struct tg3 *tp)
11955 {
11956         static struct pci_device_id write_reorder_chipsets[] = {
11957                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11958                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11959                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11960                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11961                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11962                              PCI_DEVICE_ID_VIA_8385_0) },
11963                 { },
11964         };
11965         u32 misc_ctrl_reg;
11966         u32 cacheline_sz_reg;
11967         u32 pci_state_reg, grc_misc_cfg;
11968         u32 val;
11969         u16 pci_cmd;
11970         int err, pcie_cap;
11971
11972         /* Force memory write invalidate off.  If we leave it on,
11973          * then on 5700_BX chips we have to enable a workaround.
11974          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11975          * to match the cacheline size.  The Broadcom driver have this
11976          * workaround but turns MWI off all the times so never uses
11977          * it.  This seems to suggest that the workaround is insufficient.
11978          */
11979         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11980         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11981         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11982
11983         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11984          * has the register indirect write enable bit set before
11985          * we try to access any of the MMIO registers.  It is also
11986          * critical that the PCI-X hw workaround situation is decided
11987          * before that as well.
11988          */
11989         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11990                               &misc_ctrl_reg);
11991
11992         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11993                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11995                 u32 prod_id_asic_rev;
11996
11997                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11998                                       &prod_id_asic_rev);
11999                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
12000         }
12001
12002         /* Wrong chip ID in 5752 A0. This code can be removed later
12003          * as A0 is not in production.
12004          */
12005         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12006                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12007
12008         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12009          * we need to disable memory and use config. cycles
12010          * only to access all registers. The 5702/03 chips
12011          * can mistakenly decode the special cycles from the
12012          * ICH chipsets as memory write cycles, causing corruption
12013          * of register and memory space. Only certain ICH bridges
12014          * will drive special cycles with non-zero data during the
12015          * address phase which can fall within the 5703's address
12016          * range. This is not an ICH bug as the PCI spec allows
12017          * non-zero address during special cycles. However, only
12018          * these ICH bridges are known to drive non-zero addresses
12019          * during special cycles.
12020          *
12021          * Since special cycles do not cross PCI bridges, we only
12022          * enable this workaround if the 5703 is on the secondary
12023          * bus of these ICH bridges.
12024          */
12025         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12026             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12027                 static struct tg3_dev_id {
12028                         u32     vendor;
12029                         u32     device;
12030                         u32     rev;
12031                 } ich_chipsets[] = {
12032                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12033                           PCI_ANY_ID },
12034                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12035                           PCI_ANY_ID },
12036                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12037                           0xa },
12038                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12039                           PCI_ANY_ID },
12040                         { },
12041                 };
12042                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12043                 struct pci_dev *bridge = NULL;
12044
12045                 while (pci_id->vendor != 0) {
12046                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12047                                                 bridge);
12048                         if (!bridge) {
12049                                 pci_id++;
12050                                 continue;
12051                         }
12052                         if (pci_id->rev != PCI_ANY_ID) {
12053                                 if (bridge->revision > pci_id->rev)
12054                                         continue;
12055                         }
12056                         if (bridge->subordinate &&
12057                             (bridge->subordinate->number ==
12058                              tp->pdev->bus->number)) {
12059
12060                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12061                                 pci_dev_put(bridge);
12062                                 break;
12063                         }
12064                 }
12065         }
12066
12067         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12068                 static struct tg3_dev_id {
12069                         u32     vendor;
12070                         u32     device;
12071                 } bridge_chipsets[] = {
12072                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12073                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12074                         { },
12075                 };
12076                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12077                 struct pci_dev *bridge = NULL;
12078
12079                 while (pci_id->vendor != 0) {
12080                         bridge = pci_get_device(pci_id->vendor,
12081                                                 pci_id->device,
12082                                                 bridge);
12083                         if (!bridge) {
12084                                 pci_id++;
12085                                 continue;
12086                         }
12087                         if (bridge->subordinate &&
12088                             (bridge->subordinate->number <=
12089                              tp->pdev->bus->number) &&
12090                             (bridge->subordinate->subordinate >=
12091                              tp->pdev->bus->number)) {
12092                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12093                                 pci_dev_put(bridge);
12094                                 break;
12095                         }
12096                 }
12097         }
12098
12099         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12100          * DMA addresses > 40-bit. This bridge may have other additional
12101          * 57xx devices behind it in some 4-port NIC designs for example.
12102          * Any tg3 device found behind the bridge will also need the 40-bit
12103          * DMA workaround.
12104          */
12105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12107                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12108                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12109                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12110         }
12111         else {
12112                 struct pci_dev *bridge = NULL;
12113
12114                 do {
12115                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12116                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12117                                                 bridge);
12118                         if (bridge && bridge->subordinate &&
12119                             (bridge->subordinate->number <=
12120                              tp->pdev->bus->number) &&
12121                             (bridge->subordinate->subordinate >=
12122                              tp->pdev->bus->number)) {
12123                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12124                                 pci_dev_put(bridge);
12125                                 break;
12126                         }
12127                 } while (bridge);
12128         }
12129
12130         /* Initialize misc host control in PCI block. */
12131         tp->misc_host_ctrl |= (misc_ctrl_reg &
12132                                MISC_HOST_CTRL_CHIPREV);
12133         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12134                                tp->misc_host_ctrl);
12135
12136         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12137                               &cacheline_sz_reg);
12138
12139         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12140         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12141         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12142         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12143
12144         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12145             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12146                 tp->pdev_peer = tg3_find_peer(tp);
12147
12148         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12156             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12157                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12158
12159         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12160             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12161                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12162
12163         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12164                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12165                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12166                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12167                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12168                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12169                      tp->pdev_peer == tp->pdev))
12170                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12171
12172                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12173                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12175                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12176                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12177                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12178                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12179                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12180                 } else {
12181                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12183                                 ASIC_REV_5750 &&
12184                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12185                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12186                 }
12187         }
12188
12189         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12190              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12191                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12192
12193         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12194                               &pci_state_reg);
12195
12196         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12197         if (pcie_cap != 0) {
12198                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12199
12200                 pcie_set_readrq(tp->pdev, 4096);
12201
12202                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12203                         u16 lnkctl;
12204
12205                         pci_read_config_word(tp->pdev,
12206                                              pcie_cap + PCI_EXP_LNKCTL,
12207                                              &lnkctl);
12208                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12209                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12210                 }
12211         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12212                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12213         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12214                    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12215                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12216                 if (!tp->pcix_cap) {
12217                         printk(KERN_ERR PFX "Cannot find PCI-X "
12218                                             "capability, aborting.\n");
12219                         return -EIO;
12220                 }
12221
12222                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12223                         tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12224         }
12225
12226         /* If we have an AMD 762 or VIA K8T800 chipset, write
12227          * reordering to the mailbox registers done by the host
12228          * controller can cause major troubles.  We read back from
12229          * every mailbox register write to force the writes to be
12230          * posted to the chip in order.
12231          */
12232         if (pci_dev_present(write_reorder_chipsets) &&
12233             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12234                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12235
12236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12237             tp->pci_lat_timer < 64) {
12238                 tp->pci_lat_timer = 64;
12239
12240                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12241                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12242                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12243                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12244
12245                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12246                                        cacheline_sz_reg);
12247         }
12248
12249         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12250                 /* 5700 BX chips need to have their TX producer index
12251                  * mailboxes written twice to workaround a bug.
12252                  */
12253                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12254
12255                 /* If we are in PCI-X mode, enable register write workaround.
12256                  *
12257                  * The workaround is to use indirect register accesses
12258                  * for all chip writes not to mailbox registers.
12259                  */
12260                 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12261                         u32 pm_reg;
12262
12263                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12264
12265                         /* The chip can have it's power management PCI config
12266                          * space registers clobbered due to this bug.
12267                          * So explicitly force the chip into D0 here.
12268                          */
12269                         pci_read_config_dword(tp->pdev,
12270                                               tp->pm_cap + PCI_PM_CTRL,
12271                                               &pm_reg);
12272                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12273                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12274                         pci_write_config_dword(tp->pdev,
12275                                                tp->pm_cap + PCI_PM_CTRL,
12276                                                pm_reg);
12277
12278                         /* Also, force SERR#/PERR# in PCI command. */
12279                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12280                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12281                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12282                 }
12283         }
12284
12285         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12286                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12287         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12288                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12289
12290         /* Chip-specific fixup from Broadcom driver */
12291         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12292             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12293                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12294                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12295         }
12296
12297         /* Default fast path register access methods */
12298         tp->read32 = tg3_read32;
12299         tp->write32 = tg3_write32;
12300         tp->read32_mbox = tg3_read32;
12301         tp->write32_mbox = tg3_write32;
12302         tp->write32_tx_mbox = tg3_write32;
12303         tp->write32_rx_mbox = tg3_write32;
12304
12305         /* Various workaround register access methods */
12306         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12307                 tp->write32 = tg3_write_indirect_reg32;
12308         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12309                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12310                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12311                 /*
12312                  * Back to back register writes can cause problems on these
12313                  * chips, the workaround is to read back all reg writes
12314                  * except those to mailbox regs.
12315                  *
12316                  * See tg3_write_indirect_reg32().
12317                  */
12318                 tp->write32 = tg3_write_flush_reg32;
12319         }
12320
12321
12322         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12323             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12324                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12325                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12326                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12327         }
12328
12329         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12330                 tp->read32 = tg3_read_indirect_reg32;
12331                 tp->write32 = tg3_write_indirect_reg32;
12332                 tp->read32_mbox = tg3_read_indirect_mbox;
12333                 tp->write32_mbox = tg3_write_indirect_mbox;
12334                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12335                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12336
12337                 iounmap(tp->regs);
12338                 tp->regs = NULL;
12339
12340                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12341                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12342                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12343         }
12344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12345                 tp->read32_mbox = tg3_read32_mbox_5906;
12346                 tp->write32_mbox = tg3_write32_mbox_5906;
12347                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12348                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12349         }
12350
12351         if (tp->write32 == tg3_write_indirect_reg32 ||
12352             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12353              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12354               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12355                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12356
12357         /* Get eeprom hw config before calling tg3_set_power_state().
12358          * In particular, the TG3_FLG2_IS_NIC flag must be
12359          * determined before calling tg3_set_power_state() so that
12360          * we know whether or not to switch out of Vaux power.
12361          * When the flag is set, it means that GPIO1 is used for eeprom
12362          * write protect and also implies that it is a LOM where GPIOs
12363          * are not used to switch power.
12364          */
12365         tg3_get_eeprom_hw_cfg(tp);
12366
12367         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12368                 /* Allow reads and writes to the
12369                  * APE register and memory space.
12370                  */
12371                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12372                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12373                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12374                                        pci_state_reg);
12375         }
12376
12377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12380                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12381
12382         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12383          * GPIO1 driven high will bring 5700's external PHY out of reset.
12384          * It is also used as eeprom write protect on LOMs.
12385          */
12386         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12388             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12389                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12390                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12391         /* Unused GPIO3 must be driven as output on 5752 because there
12392          * are no pull-up resistors on unused GPIO pins.
12393          */
12394         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12395                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12396
12397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12398                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12399
12400         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12401                 /* Turn off the debug UART. */
12402                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12403                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12404                         /* Keep VMain power. */
12405                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12406                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12407         }
12408
12409         /* Force the chip into D0. */
12410         err = tg3_set_power_state(tp, PCI_D0);
12411         if (err) {
12412                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12413                        pci_name(tp->pdev));
12414                 return err;
12415         }
12416
12417         /* 5700 B0 chips do not support checksumming correctly due
12418          * to hardware bugs.
12419          */
12420         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12421                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12422
12423         /* Derive initial jumbo mode from MTU assigned in
12424          * ether_setup() via the alloc_etherdev() call
12425          */
12426         if (tp->dev->mtu > ETH_DATA_LEN &&
12427             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12428                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12429
12430         /* Determine WakeOnLan speed to use. */
12431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12432             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12433             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12434             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12435                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12436         } else {
12437                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12438         }
12439
12440         /* A few boards don't want Ethernet@WireSpeed phy feature */
12441         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12442             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12443              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12444              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12445             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12446             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12447                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12448
12449         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12450             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12451                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12452         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12453                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12454
12455         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12457                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12458                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12459                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12460                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12461                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12462                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12463                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12464                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12465                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12466                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12467                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12468         }
12469
12470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12471             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12472                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12473                 if (tp->phy_otp == 0)
12474                         tp->phy_otp = TG3_OTP_DEFAULT;
12475         }
12476
12477         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12478                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12479         else
12480                 tp->mi_mode = MAC_MI_MODE_BASE;
12481
12482         tp->coalesce_mode = 0;
12483         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12484             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12485                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12486
12487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12488                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12489
12490         err = tg3_mdio_init(tp);
12491         if (err)
12492                 return err;
12493
12494         /* Initialize data/descriptor byte/word swapping. */
12495         val = tr32(GRC_MODE);
12496         val &= GRC_MODE_HOST_STACKUP;
12497         tw32(GRC_MODE, val | tp->grc_mode);
12498
12499         tg3_switch_clocks(tp);
12500
12501         /* Clear this out for sanity. */
12502         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12503
12504         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12505                               &pci_state_reg);
12506         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12507             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12508                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12509
12510                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12511                     chiprevid == CHIPREV_ID_5701_B0 ||
12512                     chiprevid == CHIPREV_ID_5701_B2 ||
12513                     chiprevid == CHIPREV_ID_5701_B5) {
12514                         void __iomem *sram_base;
12515
12516                         /* Write some dummy words into the SRAM status block
12517                          * area, see if it reads back correctly.  If the return
12518                          * value is bad, force enable the PCIX workaround.
12519                          */
12520                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12521
12522                         writel(0x00000000, sram_base);
12523                         writel(0x00000000, sram_base + 4);
12524                         writel(0xffffffff, sram_base + 4);
12525                         if (readl(sram_base) != 0x00000000)
12526                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12527                 }
12528         }
12529
12530         udelay(50);
12531         tg3_nvram_init(tp);
12532
12533         grc_misc_cfg = tr32(GRC_MISC_CFG);
12534         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12535
12536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12537             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12538              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12539                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12540
12541         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12542             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12543                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12544         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12545                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12546                                       HOSTCC_MODE_CLRTICK_TXBD);
12547
12548                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12549                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12550                                        tp->misc_host_ctrl);
12551         }
12552
12553         /* Preserve the APE MAC_MODE bits */
12554         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12555                 tp->mac_mode = tr32(MAC_MODE) |
12556                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12557         else
12558                 tp->mac_mode = TG3_DEF_MAC_MODE;
12559
12560         /* these are limited to 10/100 only */
12561         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12562              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12563             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12564              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12565              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12566               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12567               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12568             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12569              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12570               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12571               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12573                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12574
12575         err = tg3_phy_probe(tp);
12576         if (err) {
12577                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12578                        pci_name(tp->pdev), err);
12579                 /* ... but do not return immediately ... */
12580                 tg3_mdio_fini(tp);
12581         }
12582
12583         tg3_read_partno(tp);
12584         tg3_read_fw_ver(tp);
12585
12586         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12587                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12588         } else {
12589                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12590                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12591                 else
12592                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12593         }
12594
12595         /* 5700 {AX,BX} chips have a broken status block link
12596          * change bit implementation, so we must use the
12597          * status register in those cases.
12598          */
12599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12600                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12601         else
12602                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12603
12604         /* The led_ctrl is set during tg3_phy_probe, here we might
12605          * have to force the link status polling mechanism based
12606          * upon subsystem IDs.
12607          */
12608         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12609             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12610             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12611                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12612                                   TG3_FLAG_USE_LINKCHG_REG);
12613         }
12614
12615         /* For all SERDES we poll the MAC status register. */
12616         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12617                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12618         else
12619                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12620
12621         tp->rx_offset = NET_IP_ALIGN;
12622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12623             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12624                 tp->rx_offset = 0;
12625
12626         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12627
12628         /* Increment the rx prod index on the rx std ring by at most
12629          * 8 for these chips to workaround hw errata.
12630          */
12631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12634                 tp->rx_std_max_post = 8;
12635
12636         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12637                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12638                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12639
12640         return err;
12641 }
12642
12643 #ifdef CONFIG_SPARC
12644 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12645 {
12646         struct net_device *dev = tp->dev;
12647         struct pci_dev *pdev = tp->pdev;
12648         struct device_node *dp = pci_device_to_OF_node(pdev);
12649         const unsigned char *addr;
12650         int len;
12651
12652         addr = of_get_property(dp, "local-mac-address", &len);
12653         if (addr && len == 6) {
12654                 memcpy(dev->dev_addr, addr, 6);
12655                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12656                 return 0;
12657         }
12658         return -ENODEV;
12659 }
12660
12661 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12662 {
12663         struct net_device *dev = tp->dev;
12664
12665         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12666         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12667         return 0;
12668 }
12669 #endif
12670
12671 static int __devinit tg3_get_device_address(struct tg3 *tp)
12672 {
12673         struct net_device *dev = tp->dev;
12674         u32 hi, lo, mac_offset;
12675         int addr_ok = 0;
12676
12677 #ifdef CONFIG_SPARC
12678         if (!tg3_get_macaddr_sparc(tp))
12679                 return 0;
12680 #endif
12681
12682         mac_offset = 0x7c;
12683         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12684             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12685                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12686                         mac_offset = 0xcc;
12687                 if (tg3_nvram_lock(tp))
12688                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12689                 else
12690                         tg3_nvram_unlock(tp);
12691         }
12692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12693                 mac_offset = 0x10;
12694
12695         /* First try to get it from MAC address mailbox. */
12696         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12697         if ((hi >> 16) == 0x484b) {
12698                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12699                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12700
12701                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12702                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12703                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12704                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12705                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12706
12707                 /* Some old bootcode may report a 0 MAC address in SRAM */
12708                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12709         }
12710         if (!addr_ok) {
12711                 /* Next, try NVRAM. */
12712                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12713                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12714                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12715                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12716                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12717                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12718                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12719                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12720                 }
12721                 /* Finally just fetch it out of the MAC control regs. */
12722                 else {
12723                         hi = tr32(MAC_ADDR_0_HIGH);
12724                         lo = tr32(MAC_ADDR_0_LOW);
12725
12726                         dev->dev_addr[5] = lo & 0xff;
12727                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12728                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12729                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12730                         dev->dev_addr[1] = hi & 0xff;
12731                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12732                 }
12733         }
12734
12735         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12736 #ifdef CONFIG_SPARC
12737                 if (!tg3_get_default_macaddr_sparc(tp))
12738                         return 0;
12739 #endif
12740                 return -EINVAL;
12741         }
12742         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12743         return 0;
12744 }
12745
12746 #define BOUNDARY_SINGLE_CACHELINE       1
12747 #define BOUNDARY_MULTI_CACHELINE        2
12748
12749 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12750 {
12751         int cacheline_size;
12752         u8 byte;
12753         int goal;
12754
12755         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12756         if (byte == 0)
12757                 cacheline_size = 1024;
12758         else
12759                 cacheline_size = (int) byte * 4;
12760
12761         /* On 5703 and later chips, the boundary bits have no
12762          * effect.
12763          */
12764         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12765             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12766             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12767                 goto out;
12768
12769 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12770         goal = BOUNDARY_MULTI_CACHELINE;
12771 #else
12772 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12773         goal = BOUNDARY_SINGLE_CACHELINE;
12774 #else
12775         goal = 0;
12776 #endif
12777 #endif
12778
12779         if (!goal)
12780                 goto out;
12781
12782         /* PCI controllers on most RISC systems tend to disconnect
12783          * when a device tries to burst across a cache-line boundary.
12784          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12785          *
12786          * Unfortunately, for PCI-E there are only limited
12787          * write-side controls for this, and thus for reads
12788          * we will still get the disconnects.  We'll also waste
12789          * these PCI cycles for both read and write for chips
12790          * other than 5700 and 5701 which do not implement the
12791          * boundary bits.
12792          */
12793         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12794             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12795                 switch (cacheline_size) {
12796                 case 16:
12797                 case 32:
12798                 case 64:
12799                 case 128:
12800                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12801                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12802                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12803                         } else {
12804                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12805                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12806                         }
12807                         break;
12808
12809                 case 256:
12810                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12811                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12812                         break;
12813
12814                 default:
12815                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12816                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12817                         break;
12818                 }
12819         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12820                 switch (cacheline_size) {
12821                 case 16:
12822                 case 32:
12823                 case 64:
12824                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12825                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12826                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12827                                 break;
12828                         }
12829                         /* fallthrough */
12830                 case 128:
12831                 default:
12832                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12833                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12834                         break;
12835                 }
12836         } else {
12837                 switch (cacheline_size) {
12838                 case 16:
12839                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12840                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12841                                         DMA_RWCTRL_WRITE_BNDRY_16);
12842                                 break;
12843                         }
12844                         /* fallthrough */
12845                 case 32:
12846                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12847                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12848                                         DMA_RWCTRL_WRITE_BNDRY_32);
12849                                 break;
12850                         }
12851                         /* fallthrough */
12852                 case 64:
12853                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12854                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12855                                         DMA_RWCTRL_WRITE_BNDRY_64);
12856                                 break;
12857                         }
12858                         /* fallthrough */
12859                 case 128:
12860                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12861                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12862                                         DMA_RWCTRL_WRITE_BNDRY_128);
12863                                 break;
12864                         }
12865                         /* fallthrough */
12866                 case 256:
12867                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12868                                 DMA_RWCTRL_WRITE_BNDRY_256);
12869                         break;
12870                 case 512:
12871                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12872                                 DMA_RWCTRL_WRITE_BNDRY_512);
12873                         break;
12874                 case 1024:
12875                 default:
12876                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12877                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12878                         break;
12879                 }
12880         }
12881
12882 out:
12883         return val;
12884 }
12885
12886 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12887 {
12888         struct tg3_internal_buffer_desc test_desc;
12889         u32 sram_dma_descs;
12890         int i, ret;
12891
12892         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12893
12894         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12895         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12896         tw32(RDMAC_STATUS, 0);
12897         tw32(WDMAC_STATUS, 0);
12898
12899         tw32(BUFMGR_MODE, 0);
12900         tw32(FTQ_RESET, 0);
12901
12902         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12903         test_desc.addr_lo = buf_dma & 0xffffffff;
12904         test_desc.nic_mbuf = 0x00002100;
12905         test_desc.len = size;
12906
12907         /*
12908          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12909          * the *second* time the tg3 driver was getting loaded after an
12910          * initial scan.
12911          *
12912          * Broadcom tells me:
12913          *   ...the DMA engine is connected to the GRC block and a DMA
12914          *   reset may affect the GRC block in some unpredictable way...
12915          *   The behavior of resets to individual blocks has not been tested.
12916          *
12917          * Broadcom noted the GRC reset will also reset all sub-components.
12918          */
12919         if (to_device) {
12920                 test_desc.cqid_sqid = (13 << 8) | 2;
12921
12922                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12923                 udelay(40);
12924         } else {
12925                 test_desc.cqid_sqid = (16 << 8) | 7;
12926
12927                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12928                 udelay(40);
12929         }
12930         test_desc.flags = 0x00000005;
12931
12932         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12933                 u32 val;
12934
12935                 val = *(((u32 *)&test_desc) + i);
12936                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12937                                        sram_dma_descs + (i * sizeof(u32)));
12938                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12939         }
12940         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12941
12942         if (to_device) {
12943                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12944         } else {
12945                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12946         }
12947
12948         ret = -ENODEV;
12949         for (i = 0; i < 40; i++) {
12950                 u32 val;
12951
12952                 if (to_device)
12953                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12954                 else
12955                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12956                 if ((val & 0xffff) == sram_dma_descs) {
12957                         ret = 0;
12958                         break;
12959                 }
12960
12961                 udelay(100);
12962         }
12963
12964         return ret;
12965 }
12966
12967 #define TEST_BUFFER_SIZE        0x2000
12968
12969 static int __devinit tg3_test_dma(struct tg3 *tp)
12970 {
12971         dma_addr_t buf_dma;
12972         u32 *buf, saved_dma_rwctrl;
12973         int ret;
12974
12975         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12976         if (!buf) {
12977                 ret = -ENOMEM;
12978                 goto out_nofree;
12979         }
12980
12981         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12982                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12983
12984         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12985
12986         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12987                 /* DMA read watermark not used on PCIE */
12988                 tp->dma_rwctrl |= 0x00180000;
12989         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12990                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12991                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12992                         tp->dma_rwctrl |= 0x003f0000;
12993                 else
12994                         tp->dma_rwctrl |= 0x003f000f;
12995         } else {
12996                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12997                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12998                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12999                         u32 read_water = 0x7;
13000
13001                         /* If the 5704 is behind the EPB bridge, we can
13002                          * do the less restrictive ONE_DMA workaround for
13003                          * better performance.
13004                          */
13005                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13006                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13007                                 tp->dma_rwctrl |= 0x8000;
13008                         else if (ccval == 0x6 || ccval == 0x7)
13009                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13010
13011                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13012                                 read_water = 4;
13013                         /* Set bit 23 to enable PCIX hw bug fix */
13014                         tp->dma_rwctrl |=
13015                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13016                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13017                                 (1 << 23);
13018                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13019                         /* 5780 always in PCIX mode */
13020                         tp->dma_rwctrl |= 0x00144000;
13021                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13022                         /* 5714 always in PCIX mode */
13023                         tp->dma_rwctrl |= 0x00148000;
13024                 } else {
13025                         tp->dma_rwctrl |= 0x001b000f;
13026                 }
13027         }
13028
13029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13031                 tp->dma_rwctrl &= 0xfffffff0;
13032
13033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13035                 /* Remove this if it causes problems for some boards. */
13036                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13037
13038                 /* On 5700/5701 chips, we need to set this bit.
13039                  * Otherwise the chip will issue cacheline transactions
13040                  * to streamable DMA memory with not all the byte
13041                  * enables turned on.  This is an error on several
13042                  * RISC PCI controllers, in particular sparc64.
13043                  *
13044                  * On 5703/5704 chips, this bit has been reassigned
13045                  * a different meaning.  In particular, it is used
13046                  * on those chips to enable a PCI-X workaround.
13047                  */
13048                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13049         }
13050
13051         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13052
13053 #if 0
13054         /* Unneeded, already done by tg3_get_invariants.  */
13055         tg3_switch_clocks(tp);
13056 #endif
13057
13058         ret = 0;
13059         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13060             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13061                 goto out;
13062
13063         /* It is best to perform DMA test with maximum write burst size
13064          * to expose the 5700/5701 write DMA bug.
13065          */
13066         saved_dma_rwctrl = tp->dma_rwctrl;
13067         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13068         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13069
13070         while (1) {
13071                 u32 *p = buf, i;
13072
13073                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13074                         p[i] = i;
13075
13076                 /* Send the buffer to the chip. */
13077                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13078                 if (ret) {
13079                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13080                         break;
13081                 }
13082
13083 #if 0
13084                 /* validate data reached card RAM correctly. */
13085                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13086                         u32 val;
13087                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13088                         if (le32_to_cpu(val) != p[i]) {
13089                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13090                                 /* ret = -ENODEV here? */
13091                         }
13092                         p[i] = 0;
13093                 }
13094 #endif
13095                 /* Now read it back. */
13096                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13097                 if (ret) {
13098                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13099
13100                         break;
13101                 }
13102
13103                 /* Verify it. */
13104                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13105                         if (p[i] == i)
13106                                 continue;
13107
13108                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13109                             DMA_RWCTRL_WRITE_BNDRY_16) {
13110                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13111                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13112                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13113                                 break;
13114                         } else {
13115                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13116                                 ret = -ENODEV;
13117                                 goto out;
13118                         }
13119                 }
13120
13121                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13122                         /* Success. */
13123                         ret = 0;
13124                         break;
13125                 }
13126         }
13127         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13128             DMA_RWCTRL_WRITE_BNDRY_16) {
13129                 static struct pci_device_id dma_wait_state_chipsets[] = {
13130                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13131                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13132                         { },
13133                 };
13134
13135                 /* DMA test passed without adjusting DMA boundary,
13136                  * now look for chipsets that are known to expose the
13137                  * DMA bug without failing the test.
13138                  */
13139                 if (pci_dev_present(dma_wait_state_chipsets)) {
13140                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13141                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13142                 }
13143                 else
13144                         /* Safe to use the calculated DMA boundary. */
13145                         tp->dma_rwctrl = saved_dma_rwctrl;
13146
13147                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13148         }
13149
13150 out:
13151         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13152 out_nofree:
13153         return ret;
13154 }
13155
13156 static void __devinit tg3_init_link_config(struct tg3 *tp)
13157 {
13158         tp->link_config.advertising =
13159                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13160                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13161                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13162                  ADVERTISED_Autoneg | ADVERTISED_MII);
13163         tp->link_config.speed = SPEED_INVALID;
13164         tp->link_config.duplex = DUPLEX_INVALID;
13165         tp->link_config.autoneg = AUTONEG_ENABLE;
13166         tp->link_config.active_speed = SPEED_INVALID;
13167         tp->link_config.active_duplex = DUPLEX_INVALID;
13168         tp->link_config.phy_is_low_power = 0;
13169         tp->link_config.orig_speed = SPEED_INVALID;
13170         tp->link_config.orig_duplex = DUPLEX_INVALID;
13171         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13172 }
13173
13174 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13175 {
13176         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13177                 tp->bufmgr_config.mbuf_read_dma_low_water =
13178                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13179                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13180                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13181                 tp->bufmgr_config.mbuf_high_water =
13182                         DEFAULT_MB_HIGH_WATER_5705;
13183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13184                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13185                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13186                         tp->bufmgr_config.mbuf_high_water =
13187                                 DEFAULT_MB_HIGH_WATER_5906;
13188                 }
13189
13190                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13191                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13192                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13193                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13194                 tp->bufmgr_config.mbuf_high_water_jumbo =
13195                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13196         } else {
13197                 tp->bufmgr_config.mbuf_read_dma_low_water =
13198                         DEFAULT_MB_RDMA_LOW_WATER;
13199                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13200                         DEFAULT_MB_MACRX_LOW_WATER;
13201                 tp->bufmgr_config.mbuf_high_water =
13202                         DEFAULT_MB_HIGH_WATER;
13203
13204                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13205                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13206                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13207                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13208                 tp->bufmgr_config.mbuf_high_water_jumbo =
13209                         DEFAULT_MB_HIGH_WATER_JUMBO;
13210         }
13211
13212         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13213         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13214 }
13215
13216 static char * __devinit tg3_phy_string(struct tg3 *tp)
13217 {
13218         switch (tp->phy_id & PHY_ID_MASK) {
13219         case PHY_ID_BCM5400:    return "5400";
13220         case PHY_ID_BCM5401:    return "5401";
13221         case PHY_ID_BCM5411:    return "5411";
13222         case PHY_ID_BCM5701:    return "5701";
13223         case PHY_ID_BCM5703:    return "5703";
13224         case PHY_ID_BCM5704:    return "5704";
13225         case PHY_ID_BCM5705:    return "5705";
13226         case PHY_ID_BCM5750:    return "5750";
13227         case PHY_ID_BCM5752:    return "5752";
13228         case PHY_ID_BCM5714:    return "5714";
13229         case PHY_ID_BCM5780:    return "5780";
13230         case PHY_ID_BCM5755:    return "5755";
13231         case PHY_ID_BCM5787:    return "5787";
13232         case PHY_ID_BCM5784:    return "5784";
13233         case PHY_ID_BCM5756:    return "5722/5756";
13234         case PHY_ID_BCM5906:    return "5906";
13235         case PHY_ID_BCM5761:    return "5761";
13236         case PHY_ID_BCM8002:    return "8002/serdes";
13237         case 0:                 return "serdes";
13238         default:                return "unknown";
13239         }
13240 }
13241
13242 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13243 {
13244         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13245                 strcpy(str, "PCI Express");
13246                 return str;
13247         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13248                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13249
13250                 strcpy(str, "PCIX:");
13251
13252                 if ((clock_ctrl == 7) ||
13253                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13254                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13255                         strcat(str, "133MHz");
13256                 else if (clock_ctrl == 0)
13257                         strcat(str, "33MHz");
13258                 else if (clock_ctrl == 2)
13259                         strcat(str, "50MHz");
13260                 else if (clock_ctrl == 4)
13261                         strcat(str, "66MHz");
13262                 else if (clock_ctrl == 6)
13263                         strcat(str, "100MHz");
13264         } else {
13265                 strcpy(str, "PCI:");
13266                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13267                         strcat(str, "66MHz");
13268                 else
13269                         strcat(str, "33MHz");
13270         }
13271         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13272                 strcat(str, ":32-bit");
13273         else
13274                 strcat(str, ":64-bit");
13275         return str;
13276 }
13277
13278 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13279 {
13280         struct pci_dev *peer;
13281         unsigned int func, devnr = tp->pdev->devfn & ~7;
13282
13283         for (func = 0; func < 8; func++) {
13284                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13285                 if (peer && peer != tp->pdev)
13286                         break;
13287                 pci_dev_put(peer);
13288         }
13289         /* 5704 can be configured in single-port mode, set peer to
13290          * tp->pdev in that case.
13291          */
13292         if (!peer) {
13293                 peer = tp->pdev;
13294                 return peer;
13295         }
13296
13297         /*
13298          * We don't need to keep the refcount elevated; there's no way
13299          * to remove one half of this device without removing the other
13300          */
13301         pci_dev_put(peer);
13302
13303         return peer;
13304 }
13305
13306 static void __devinit tg3_init_coal(struct tg3 *tp)
13307 {
13308         struct ethtool_coalesce *ec = &tp->coal;
13309
13310         memset(ec, 0, sizeof(*ec));
13311         ec->cmd = ETHTOOL_GCOALESCE;
13312         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13313         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13314         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13315         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13316         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13317         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13318         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13319         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13320         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13321
13322         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13323                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13324                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13325                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13326                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13327                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13328         }
13329
13330         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13331                 ec->rx_coalesce_usecs_irq = 0;
13332                 ec->tx_coalesce_usecs_irq = 0;
13333                 ec->stats_block_coalesce_usecs = 0;
13334         }
13335 }
13336
13337 static const struct net_device_ops tg3_netdev_ops = {
13338         .ndo_open               = tg3_open,
13339         .ndo_stop               = tg3_close,
13340         .ndo_start_xmit         = tg3_start_xmit,
13341         .ndo_get_stats          = tg3_get_stats,
13342         .ndo_validate_addr      = eth_validate_addr,
13343         .ndo_set_multicast_list = tg3_set_rx_mode,
13344         .ndo_set_mac_address    = tg3_set_mac_addr,
13345         .ndo_do_ioctl           = tg3_ioctl,
13346         .ndo_tx_timeout         = tg3_tx_timeout,
13347         .ndo_change_mtu         = tg3_change_mtu,
13348 #if TG3_VLAN_TAG_USED
13349         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13350 #endif
13351 #ifdef CONFIG_NET_POLL_CONTROLLER
13352         .ndo_poll_controller    = tg3_poll_controller,
13353 #endif
13354 };
13355
13356 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13357         .ndo_open               = tg3_open,
13358         .ndo_stop               = tg3_close,
13359         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13360         .ndo_get_stats          = tg3_get_stats,
13361         .ndo_validate_addr      = eth_validate_addr,
13362         .ndo_set_multicast_list = tg3_set_rx_mode,
13363         .ndo_set_mac_address    = tg3_set_mac_addr,
13364         .ndo_do_ioctl           = tg3_ioctl,
13365         .ndo_tx_timeout         = tg3_tx_timeout,
13366         .ndo_change_mtu         = tg3_change_mtu,
13367 #if TG3_VLAN_TAG_USED
13368         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13369 #endif
13370 #ifdef CONFIG_NET_POLL_CONTROLLER
13371         .ndo_poll_controller    = tg3_poll_controller,
13372 #endif
13373 };
13374
13375 static int __devinit tg3_init_one(struct pci_dev *pdev,
13376                                   const struct pci_device_id *ent)
13377 {
13378         static int tg3_version_printed = 0;
13379         resource_size_t tg3reg_len;
13380         struct net_device *dev;
13381         struct tg3 *tp;
13382         int err, pm_cap;
13383         char str[40];
13384         u64 dma_mask, persist_dma_mask;
13385
13386         if (tg3_version_printed++ == 0)
13387                 printk(KERN_INFO "%s", version);
13388
13389         err = pci_enable_device(pdev);
13390         if (err) {
13391                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13392                        "aborting.\n");
13393                 return err;
13394         }
13395
13396         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13397                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13398                        "base address, aborting.\n");
13399                 err = -ENODEV;
13400                 goto err_out_disable_pdev;
13401         }
13402
13403         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13404         if (err) {
13405                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13406                        "aborting.\n");
13407                 goto err_out_disable_pdev;
13408         }
13409
13410         pci_set_master(pdev);
13411
13412         /* Find power-management capability. */
13413         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13414         if (pm_cap == 0) {
13415                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13416                        "aborting.\n");
13417                 err = -EIO;
13418                 goto err_out_free_res;
13419         }
13420
13421         dev = alloc_etherdev(sizeof(*tp));
13422         if (!dev) {
13423                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13424                 err = -ENOMEM;
13425                 goto err_out_free_res;
13426         }
13427
13428         SET_NETDEV_DEV(dev, &pdev->dev);
13429
13430 #if TG3_VLAN_TAG_USED
13431         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13432 #endif
13433
13434         tp = netdev_priv(dev);
13435         tp->pdev = pdev;
13436         tp->dev = dev;
13437         tp->pm_cap = pm_cap;
13438         tp->rx_mode = TG3_DEF_RX_MODE;
13439         tp->tx_mode = TG3_DEF_TX_MODE;
13440
13441         if (tg3_debug > 0)
13442                 tp->msg_enable = tg3_debug;
13443         else
13444                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13445
13446         /* The word/byte swap controls here control register access byte
13447          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13448          * setting below.
13449          */
13450         tp->misc_host_ctrl =
13451                 MISC_HOST_CTRL_MASK_PCI_INT |
13452                 MISC_HOST_CTRL_WORD_SWAP |
13453                 MISC_HOST_CTRL_INDIR_ACCESS |
13454                 MISC_HOST_CTRL_PCISTATE_RW;
13455
13456         /* The NONFRM (non-frame) byte/word swap controls take effect
13457          * on descriptor entries, anything which isn't packet data.
13458          *
13459          * The StrongARM chips on the board (one for tx, one for rx)
13460          * are running in big-endian mode.
13461          */
13462         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13463                         GRC_MODE_WSWAP_NONFRM_DATA);
13464 #ifdef __BIG_ENDIAN
13465         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13466 #endif
13467         spin_lock_init(&tp->lock);
13468         spin_lock_init(&tp->indirect_lock);
13469         INIT_WORK(&tp->reset_task, tg3_reset_task);
13470
13471         dev->mem_start = pci_resource_start(pdev, BAR_0);
13472         tg3reg_len = pci_resource_len(pdev, BAR_0);
13473         dev->mem_end = dev->mem_start + tg3reg_len;
13474
13475         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13476         if (!tp->regs) {
13477                 printk(KERN_ERR PFX "Cannot map device registers, "
13478                        "aborting.\n");
13479                 err = -ENOMEM;
13480                 goto err_out_free_dev;
13481         }
13482
13483         tg3_init_link_config(tp);
13484
13485         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13486         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13487         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13488
13489         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13490         dev->ethtool_ops = &tg3_ethtool_ops;
13491         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13492         dev->irq = pdev->irq;
13493
13494         err = tg3_get_invariants(tp);
13495         if (err) {
13496                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13497                        "aborting.\n");
13498                 goto err_out_iounmap;
13499         }
13500
13501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13503             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13504             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13505             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13507                 dev->netdev_ops = &tg3_netdev_ops;
13508         else
13509                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13510
13511
13512         /* The EPB bridge inside 5714, 5715, and 5780 and any
13513          * device behind the EPB cannot support DMA addresses > 40-bit.
13514          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13515          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13516          * do DMA address check in tg3_start_xmit().
13517          */
13518         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13519                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13520         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13521                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13522 #ifdef CONFIG_HIGHMEM
13523                 dma_mask = DMA_64BIT_MASK;
13524 #endif
13525         } else
13526                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13527
13528         /* Configure DMA attributes. */
13529         if (dma_mask > DMA_32BIT_MASK) {
13530                 err = pci_set_dma_mask(pdev, dma_mask);
13531                 if (!err) {
13532                         dev->features |= NETIF_F_HIGHDMA;
13533                         err = pci_set_consistent_dma_mask(pdev,
13534                                                           persist_dma_mask);
13535                         if (err < 0) {
13536                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13537                                        "DMA for consistent allocations\n");
13538                                 goto err_out_iounmap;
13539                         }
13540                 }
13541         }
13542         if (err || dma_mask == DMA_32BIT_MASK) {
13543                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13544                 if (err) {
13545                         printk(KERN_ERR PFX "No usable DMA configuration, "
13546                                "aborting.\n");
13547                         goto err_out_iounmap;
13548                 }
13549         }
13550
13551         tg3_init_bufmgr_config(tp);
13552
13553         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13554                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13555         }
13556         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13558             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13560             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13561                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13562         } else {
13563                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13564         }
13565
13566         /* TSO is on by default on chips that support hardware TSO.
13567          * Firmware TSO on older chips gives lower performance, so it
13568          * is off by default, but can be enabled using ethtool.
13569          */
13570         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13571                 dev->features |= NETIF_F_TSO;
13572                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13573                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13574                         dev->features |= NETIF_F_TSO6;
13575                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13576                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13577                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13578                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13579                         dev->features |= NETIF_F_TSO_ECN;
13580         }
13581
13582
13583         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13584             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13585             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13586                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13587                 tp->rx_pending = 63;
13588         }
13589
13590         err = tg3_get_device_address(tp);
13591         if (err) {
13592                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13593                        "aborting.\n");
13594                 goto err_out_iounmap;
13595         }
13596
13597         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13598                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13599                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13600                                "base address for APE, aborting.\n");
13601                         err = -ENODEV;
13602                         goto err_out_iounmap;
13603                 }
13604
13605                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13606                 if (!tp->aperegs) {
13607                         printk(KERN_ERR PFX "Cannot map APE registers, "
13608                                "aborting.\n");
13609                         err = -ENOMEM;
13610                         goto err_out_iounmap;
13611                 }
13612
13613                 tg3_ape_lock_init(tp);
13614         }
13615
13616         /*
13617          * Reset chip in case UNDI or EFI driver did not shutdown
13618          * DMA self test will enable WDMAC and we'll see (spurious)
13619          * pending DMA on the PCI bus at that point.
13620          */
13621         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13622             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13623                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13624                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13625         }
13626
13627         err = tg3_test_dma(tp);
13628         if (err) {
13629                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13630                 goto err_out_apeunmap;
13631         }
13632
13633         /* Tigon3 can do ipv4 only... and some chips have buggy
13634          * checksumming.
13635          */
13636         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13637                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13638                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13639                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13640                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13641                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13642                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13643                         dev->features |= NETIF_F_IPV6_CSUM;
13644
13645                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13646         } else
13647                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13648
13649         /* flow control autonegotiation is default behavior */
13650         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13651         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13652
13653         tg3_init_coal(tp);
13654
13655         pci_set_drvdata(pdev, dev);
13656
13657         err = register_netdev(dev);
13658         if (err) {
13659                 printk(KERN_ERR PFX "Cannot register net device, "
13660                        "aborting.\n");
13661                 goto err_out_apeunmap;
13662         }
13663
13664         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13665                dev->name,
13666                tp->board_part_number,
13667                tp->pci_chip_rev_id,
13668                tg3_bus_string(tp, str),
13669                dev->dev_addr);
13670
13671         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13672                 printk(KERN_INFO
13673                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13674                        tp->dev->name,
13675                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13676                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13677         else
13678                 printk(KERN_INFO
13679                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13680                        tp->dev->name, tg3_phy_string(tp),
13681                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13682                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13683                          "10/100/1000Base-T")),
13684                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13685
13686         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13687                dev->name,
13688                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13689                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13690                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13691                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13692                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13693         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13694                dev->name, tp->dma_rwctrl,
13695                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13696                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13697
13698         return 0;
13699
13700 err_out_apeunmap:
13701         if (tp->aperegs) {
13702                 iounmap(tp->aperegs);
13703                 tp->aperegs = NULL;
13704         }
13705
13706 err_out_iounmap:
13707         if (tp->regs) {
13708                 iounmap(tp->regs);
13709                 tp->regs = NULL;
13710         }
13711
13712 err_out_free_dev:
13713         free_netdev(dev);
13714
13715 err_out_free_res:
13716         pci_release_regions(pdev);
13717
13718 err_out_disable_pdev:
13719         pci_disable_device(pdev);
13720         pci_set_drvdata(pdev, NULL);
13721         return err;
13722 }
13723
13724 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13725 {
13726         struct net_device *dev = pci_get_drvdata(pdev);
13727
13728         if (dev) {
13729                 struct tg3 *tp = netdev_priv(dev);
13730
13731                 flush_scheduled_work();
13732
13733                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13734                         tg3_phy_fini(tp);
13735                         tg3_mdio_fini(tp);
13736                 }
13737
13738                 unregister_netdev(dev);
13739                 if (tp->aperegs) {
13740                         iounmap(tp->aperegs);
13741                         tp->aperegs = NULL;
13742                 }
13743                 if (tp->regs) {
13744                         iounmap(tp->regs);
13745                         tp->regs = NULL;
13746                 }
13747                 free_netdev(dev);
13748                 pci_release_regions(pdev);
13749                 pci_disable_device(pdev);
13750                 pci_set_drvdata(pdev, NULL);
13751         }
13752 }
13753
13754 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13755 {
13756         struct net_device *dev = pci_get_drvdata(pdev);
13757         struct tg3 *tp = netdev_priv(dev);
13758         pci_power_t target_state;
13759         int err;
13760
13761         /* PCI register 4 needs to be saved whether netif_running() or not.
13762          * MSI address and data need to be saved if using MSI and
13763          * netif_running().
13764          */
13765         pci_save_state(pdev);
13766
13767         if (!netif_running(dev))
13768                 return 0;
13769
13770         flush_scheduled_work();
13771         tg3_phy_stop(tp);
13772         tg3_netif_stop(tp);
13773
13774         del_timer_sync(&tp->timer);
13775
13776         tg3_full_lock(tp, 1);
13777         tg3_disable_ints(tp);
13778         tg3_full_unlock(tp);
13779
13780         netif_device_detach(dev);
13781
13782         tg3_full_lock(tp, 0);
13783         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13784         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13785         tg3_full_unlock(tp);
13786
13787         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13788
13789         err = tg3_set_power_state(tp, target_state);
13790         if (err) {
13791                 int err2;
13792
13793                 tg3_full_lock(tp, 0);
13794
13795                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13796                 err2 = tg3_restart_hw(tp, 1);
13797                 if (err2)
13798                         goto out;
13799
13800                 tp->timer.expires = jiffies + tp->timer_offset;
13801                 add_timer(&tp->timer);
13802
13803                 netif_device_attach(dev);
13804                 tg3_netif_start(tp);
13805
13806 out:
13807                 tg3_full_unlock(tp);
13808
13809                 if (!err2)
13810                         tg3_phy_start(tp);
13811         }
13812
13813         return err;
13814 }
13815
13816 static int tg3_resume(struct pci_dev *pdev)
13817 {
13818         struct net_device *dev = pci_get_drvdata(pdev);
13819         struct tg3 *tp = netdev_priv(dev);
13820         int err;
13821
13822         pci_restore_state(tp->pdev);
13823
13824         if (!netif_running(dev))
13825                 return 0;
13826
13827         err = tg3_set_power_state(tp, PCI_D0);
13828         if (err)
13829                 return err;
13830
13831         netif_device_attach(dev);
13832
13833         tg3_full_lock(tp, 0);
13834
13835         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13836         err = tg3_restart_hw(tp, 1);
13837         if (err)
13838                 goto out;
13839
13840         tp->timer.expires = jiffies + tp->timer_offset;
13841         add_timer(&tp->timer);
13842
13843         tg3_netif_start(tp);
13844
13845 out:
13846         tg3_full_unlock(tp);
13847
13848         if (!err)
13849                 tg3_phy_start(tp);
13850
13851         return err;
13852 }
13853
13854 static struct pci_driver tg3_driver = {
13855         .name           = DRV_MODULE_NAME,
13856         .id_table       = tg3_pci_tbl,
13857         .probe          = tg3_init_one,
13858         .remove         = __devexit_p(tg3_remove_one),
13859         .suspend        = tg3_suspend,
13860         .resume         = tg3_resume
13861 };
13862
13863 static int __init tg3_init(void)
13864 {
13865         return pci_register_driver(&tg3_driver);
13866 }
13867
13868 static void __exit tg3_cleanup(void)
13869 {
13870         pci_unregister_driver(&tg3_driver);
13871 }
13872
13873 module_init(tg3_init);
13874 module_exit(tg3_cleanup);