]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Add tg3_restart_hw()
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.62"
72 #define DRV_MODULE_RELDATE      "June 30, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265         { 0, }
266 };
267
268 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
269
270 static struct {
271         const char string[ETH_GSTRING_LEN];
272 } ethtool_stats_keys[TG3_NUM_STATS] = {
273         { "rx_octets" },
274         { "rx_fragments" },
275         { "rx_ucast_packets" },
276         { "rx_mcast_packets" },
277         { "rx_bcast_packets" },
278         { "rx_fcs_errors" },
279         { "rx_align_errors" },
280         { "rx_xon_pause_rcvd" },
281         { "rx_xoff_pause_rcvd" },
282         { "rx_mac_ctrl_rcvd" },
283         { "rx_xoff_entered" },
284         { "rx_frame_too_long_errors" },
285         { "rx_jabbers" },
286         { "rx_undersize_packets" },
287         { "rx_in_length_errors" },
288         { "rx_out_length_errors" },
289         { "rx_64_or_less_octet_packets" },
290         { "rx_65_to_127_octet_packets" },
291         { "rx_128_to_255_octet_packets" },
292         { "rx_256_to_511_octet_packets" },
293         { "rx_512_to_1023_octet_packets" },
294         { "rx_1024_to_1522_octet_packets" },
295         { "rx_1523_to_2047_octet_packets" },
296         { "rx_2048_to_4095_octet_packets" },
297         { "rx_4096_to_8191_octet_packets" },
298         { "rx_8192_to_9022_octet_packets" },
299
300         { "tx_octets" },
301         { "tx_collisions" },
302
303         { "tx_xon_sent" },
304         { "tx_xoff_sent" },
305         { "tx_flow_control" },
306         { "tx_mac_errors" },
307         { "tx_single_collisions" },
308         { "tx_mult_collisions" },
309         { "tx_deferred" },
310         { "tx_excessive_collisions" },
311         { "tx_late_collisions" },
312         { "tx_collide_2times" },
313         { "tx_collide_3times" },
314         { "tx_collide_4times" },
315         { "tx_collide_5times" },
316         { "tx_collide_6times" },
317         { "tx_collide_7times" },
318         { "tx_collide_8times" },
319         { "tx_collide_9times" },
320         { "tx_collide_10times" },
321         { "tx_collide_11times" },
322         { "tx_collide_12times" },
323         { "tx_collide_13times" },
324         { "tx_collide_14times" },
325         { "tx_collide_15times" },
326         { "tx_ucast_packets" },
327         { "tx_mcast_packets" },
328         { "tx_bcast_packets" },
329         { "tx_carrier_sense_errors" },
330         { "tx_discards" },
331         { "tx_errors" },
332
333         { "dma_writeq_full" },
334         { "dma_write_prioq_full" },
335         { "rxbds_empty" },
336         { "rx_discards" },
337         { "rx_errors" },
338         { "rx_threshold_hit" },
339
340         { "dma_readq_full" },
341         { "dma_read_prioq_full" },
342         { "tx_comp_queue_full" },
343
344         { "ring_set_send_prod_index" },
345         { "ring_status_update" },
346         { "nic_irqs" },
347         { "nic_avoided_irqs" },
348         { "nic_tx_threshold_hit" }
349 };
350
351 static struct {
352         const char string[ETH_GSTRING_LEN];
353 } ethtool_test_keys[TG3_NUM_TEST] = {
354         { "nvram test     (online) " },
355         { "link test      (online) " },
356         { "register test  (offline)" },
357         { "memory test    (offline)" },
358         { "loopback test  (offline)" },
359         { "interrupt test (offline)" },
360 };
361
362 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
363 {
364         writel(val, tp->regs + off);
365 }
366
367 static u32 tg3_read32(struct tg3 *tp, u32 off)
368 {
369         return (readl(tp->regs + off)); 
370 }
371
372 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 }
381
382 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
383 {
384         writel(val, tp->regs + off);
385         readl(tp->regs + off);
386 }
387
388 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
401 {
402         unsigned long flags;
403
404         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411                                        TG3_64BIT_REG_LOW, val);
412                 return;
413         }
414
415         spin_lock_irqsave(&tp->indirect_lock, flags);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418         spin_unlock_irqrestore(&tp->indirect_lock, flags);
419
420         /* In indirect mode when disabling interrupts, we also need
421          * to clear the interrupt bit in the GRC local ctrl register.
422          */
423         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
424             (val == 0x1)) {
425                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
427         }
428 }
429
430 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
431 {
432         unsigned long flags;
433         u32 val;
434
435         spin_lock_irqsave(&tp->indirect_lock, flags);
436         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438         spin_unlock_irqrestore(&tp->indirect_lock, flags);
439         return val;
440 }
441
442 /* usec_wait specifies the wait time in usec when writing to certain registers
443  * where it is unsafe to read back the register without some delay.
444  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
446  */
447 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
448 {
449         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451                 /* Non-posted methods */
452                 tp->write32(tp, off, val);
453         else {
454                 /* Posted method */
455                 tg3_write32(tp, off, val);
456                 if (usec_wait)
457                         udelay(usec_wait);
458                 tp->read32(tp, off);
459         }
460         /* Wait again after the read for the posted method to guarantee that
461          * the wait time is met.
462          */
463         if (usec_wait)
464                 udelay(usec_wait);
465 }
466
467 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
468 {
469         tp->write32_mbox(tp, off, val);
470         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472                 tp->read32_mbox(tp, off);
473 }
474
475 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
476 {
477         void __iomem *mbox = tp->regs + off;
478         writel(val, mbox);
479         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
480                 writel(val, mbox);
481         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
482                 readl(mbox);
483 }
484
485 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
490
491 #define tw32(reg,val)           tp->write32(tp, reg, val)
492 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg)               tp->read32(tp, reg)
495
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497 {
498         unsigned long flags;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504
505                 /* Always leave this as zero. */
506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507         } else {
508                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
510
511                 /* Always leave this as zero. */
512                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513         }
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
525
526                 /* Always leave this as zero. */
527                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
528         } else {
529                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530                 *val = tr32(TG3PCI_MEM_WIN_DATA);
531
532                 /* Always leave this as zero. */
533                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534         }
535         spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 }
537
538 static void tg3_disable_ints(struct tg3 *tp)
539 {
540         tw32(TG3PCI_MISC_HOST_CTRL,
541              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
542         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
543 }
544
545 static inline void tg3_cond_int(struct tg3 *tp)
546 {
547         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548             (tp->hw_status->status & SD_STATUS_UPDATED))
549                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
550 }
551
552 static void tg3_enable_ints(struct tg3 *tp)
553 {
554         tp->irq_sync = 0;
555         wmb();
556
557         tw32(TG3PCI_MISC_HOST_CTRL,
558              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
559         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560                        (tp->last_tag << 24));
561         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                                (tp->last_tag << 24));
564         tg3_cond_int(tp);
565 }
566
567 static inline unsigned int tg3_has_work(struct tg3 *tp)
568 {
569         struct tg3_hw_status *sblk = tp->hw_status;
570         unsigned int work_exists = 0;
571
572         /* check for phy events */
573         if (!(tp->tg3_flags &
574               (TG3_FLAG_USE_LINKCHG_REG |
575                TG3_FLAG_POLL_SERDES))) {
576                 if (sblk->status & SD_STATUS_LINK_CHG)
577                         work_exists = 1;
578         }
579         /* check for RX/TX work to do */
580         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
582                 work_exists = 1;
583
584         return work_exists;
585 }
586
587 /* tg3_restart_ints
588  *  similar to tg3_enable_ints, but it accurately determines whether there
589  *  is new work pending and can return without flushing the PIO write
590  *  which reenables interrupts 
591  */
592 static void tg3_restart_ints(struct tg3 *tp)
593 {
594         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
595                      tp->last_tag << 24);
596         mmiowb();
597
598         /* When doing tagged status, this work check is unnecessary.
599          * The last_tag we write above tells the chip which piece of
600          * work we've completed.
601          */
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             tg3_has_work(tp))
604                 tw32(HOSTCC_MODE, tp->coalesce_mode |
605                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
606 }
607
608 static inline void tg3_netif_stop(struct tg3 *tp)
609 {
610         tp->dev->trans_start = jiffies; /* prevent tx timeout */
611         netif_poll_disable(tp->dev);
612         netif_tx_disable(tp->dev);
613 }
614
615 static inline void tg3_netif_start(struct tg3 *tp)
616 {
617         netif_wake_queue(tp->dev);
618         /* NOTE: unconditional netif_wake_queue is only appropriate
619          * so long as all callers are assured to have free tx slots
620          * (such as after tg3_init_hw)
621          */
622         netif_poll_enable(tp->dev);
623         tp->hw_status->status |= SD_STATUS_UPDATED;
624         tg3_enable_ints(tp);
625 }
626
627 static void tg3_switch_clocks(struct tg3 *tp)
628 {
629         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
630         u32 orig_clock_ctrl;
631
632         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
633                 return;
634
635         orig_clock_ctrl = clock_ctrl;
636         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637                        CLOCK_CTRL_CLKRUN_OENABLE |
638                        0x1f);
639         tp->pci_clock_ctrl = clock_ctrl;
640
641         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
643                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
645                 }
646         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
647                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
648                             clock_ctrl |
649                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
650                             40);
651                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
653                             40);
654         }
655         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
656 }
657
658 #define PHY_BUSY_LOOPS  5000
659
660 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
661 {
662         u32 frame_val;
663         unsigned int loops;
664         int ret;
665
666         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
667                 tw32_f(MAC_MI_MODE,
668                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
669                 udelay(80);
670         }
671
672         *val = 0x0;
673
674         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675                       MI_COM_PHY_ADDR_MASK);
676         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677                       MI_COM_REG_ADDR_MASK);
678         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
679         
680         tw32_f(MAC_MI_COM, frame_val);
681
682         loops = PHY_BUSY_LOOPS;
683         while (loops != 0) {
684                 udelay(10);
685                 frame_val = tr32(MAC_MI_COM);
686
687                 if ((frame_val & MI_COM_BUSY) == 0) {
688                         udelay(5);
689                         frame_val = tr32(MAC_MI_COM);
690                         break;
691                 }
692                 loops -= 1;
693         }
694
695         ret = -EBUSY;
696         if (loops != 0) {
697                 *val = frame_val & MI_COM_DATA_MASK;
698                 ret = 0;
699         }
700
701         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702                 tw32_f(MAC_MI_MODE, tp->mi_mode);
703                 udelay(80);
704         }
705
706         return ret;
707 }
708
709 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722                       MI_COM_PHY_ADDR_MASK);
723         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724                       MI_COM_REG_ADDR_MASK);
725         frame_val |= (val & MI_COM_DATA_MASK);
726         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
727         
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734                 if ((frame_val & MI_COM_BUSY) == 0) {
735                         udelay(5);
736                         frame_val = tr32(MAC_MI_COM);
737                         break;
738                 }
739                 loops -= 1;
740         }
741
742         ret = -EBUSY;
743         if (loops != 0)
744                 ret = 0;
745
746         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747                 tw32_f(MAC_MI_MODE, tp->mi_mode);
748                 udelay(80);
749         }
750
751         return ret;
752 }
753
754 static void tg3_phy_set_wirespeed(struct tg3 *tp)
755 {
756         u32 val;
757
758         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
759                 return;
760
761         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764                              (val | (1 << 15) | (1 << 4)));
765 }
766
767 static int tg3_bmcr_reset(struct tg3 *tp)
768 {
769         u32 phy_control;
770         int limit, err;
771
772         /* OK, reset it, and poll the BMCR_RESET bit until it
773          * clears or we time out.
774          */
775         phy_control = BMCR_RESET;
776         err = tg3_writephy(tp, MII_BMCR, phy_control);
777         if (err != 0)
778                 return -EBUSY;
779
780         limit = 5000;
781         while (limit--) {
782                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
783                 if (err != 0)
784                         return -EBUSY;
785
786                 if ((phy_control & BMCR_RESET) == 0) {
787                         udelay(40);
788                         break;
789                 }
790                 udelay(10);
791         }
792         if (limit <= 0)
793                 return -EBUSY;
794
795         return 0;
796 }
797
798 static int tg3_wait_macro_done(struct tg3 *tp)
799 {
800         int limit = 100;
801
802         while (limit--) {
803                 u32 tmp32;
804
805                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806                         if ((tmp32 & 0x1000) == 0)
807                                 break;
808                 }
809         }
810         if (limit <= 0)
811                 return -EBUSY;
812
813         return 0;
814 }
815
816 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
817 {
818         static const u32 test_pat[4][6] = {
819         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
823         };
824         int chan;
825
826         for (chan = 0; chan < 4; chan++) {
827                 int i;
828
829                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830                              (chan * 0x2000) | 0x0200);
831                 tg3_writephy(tp, 0x16, 0x0002);
832
833                 for (i = 0; i < 6; i++)
834                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
835                                      test_pat[chan][i]);
836
837                 tg3_writephy(tp, 0x16, 0x0202);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844                              (chan * 0x2000) | 0x0200);
845                 tg3_writephy(tp, 0x16, 0x0082);
846                 if (tg3_wait_macro_done(tp)) {
847                         *resetp = 1;
848                         return -EBUSY;
849                 }
850
851                 tg3_writephy(tp, 0x16, 0x0802);
852                 if (tg3_wait_macro_done(tp)) {
853                         *resetp = 1;
854                         return -EBUSY;
855                 }
856
857                 for (i = 0; i < 6; i += 2) {
858                         u32 low, high;
859
860                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862                             tg3_wait_macro_done(tp)) {
863                                 *resetp = 1;
864                                 return -EBUSY;
865                         }
866                         low &= 0x7fff;
867                         high &= 0x000f;
868                         if (low != test_pat[chan][i] ||
869                             high != test_pat[chan][i+1]) {
870                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
873
874                                 return -EBUSY;
875                         }
876                 }
877         }
878
879         return 0;
880 }
881
882 static int tg3_phy_reset_chanpat(struct tg3 *tp)
883 {
884         int chan;
885
886         for (chan = 0; chan < 4; chan++) {
887                 int i;
888
889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890                              (chan * 0x2000) | 0x0200);
891                 tg3_writephy(tp, 0x16, 0x0002);
892                 for (i = 0; i < 6; i++)
893                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894                 tg3_writephy(tp, 0x16, 0x0202);
895                 if (tg3_wait_macro_done(tp))
896                         return -EBUSY;
897         }
898
899         return 0;
900 }
901
902 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
903 {
904         u32 reg32, phy9_orig;
905         int retries, do_phy_reset, err;
906
907         retries = 10;
908         do_phy_reset = 1;
909         do {
910                 if (do_phy_reset) {
911                         err = tg3_bmcr_reset(tp);
912                         if (err)
913                                 return err;
914                         do_phy_reset = 0;
915                 }
916
917                 /* Disable transmitter and interrupt.  */
918                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
919                         continue;
920
921                 reg32 |= 0x3000;
922                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
923
924                 /* Set full-duplex, 1000 mbps.  */
925                 tg3_writephy(tp, MII_BMCR,
926                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
927
928                 /* Set to master mode.  */
929                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
930                         continue;
931
932                 tg3_writephy(tp, MII_TG3_CTRL,
933                              (MII_TG3_CTRL_AS_MASTER |
934                               MII_TG3_CTRL_ENABLE_AS_MASTER));
935
936                 /* Enable SM_DSP_CLOCK and 6dB.  */
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
938
939                 /* Block the PHY control access.  */
940                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
942
943                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
944                 if (!err)
945                         break;
946         } while (--retries);
947
948         err = tg3_phy_reset_chanpat(tp);
949         if (err)
950                 return err;
951
952         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
954
955         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956         tg3_writephy(tp, 0x16, 0x0000);
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960                 /* Set Extended packet length bit for jumbo frames */
961                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
962         }
963         else {
964                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
965         }
966
967         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
968
969         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
970                 reg32 &= ~0x3000;
971                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
972         } else if (!err)
973                 err = -EBUSY;
974
975         return err;
976 }
977
978 static void tg3_link_report(struct tg3 *);
979
980 /* This will reset the tigon3 PHY if there is no valid
981  * link unless the FORCE argument is non-zero.
982  */
983 static int tg3_phy_reset(struct tg3 *tp)
984 {
985         u32 phy_status;
986         int err;
987
988         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
989         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
990         if (err != 0)
991                 return -EBUSY;
992
993         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994                 netif_carrier_off(tp->dev);
995                 tg3_link_report(tp);
996         }
997
998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001                 err = tg3_phy_reset_5703_4_5(tp);
1002                 if (err)
1003                         return err;
1004                 goto out;
1005         }
1006
1007         err = tg3_bmcr_reset(tp);
1008         if (err)
1009                 return err;
1010
1011 out:
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1019         }
1020         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021                 tg3_writephy(tp, 0x1c, 0x8d68);
1022                 tg3_writephy(tp, 0x1c, 0x8d68);
1023         }
1024         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1033         }
1034         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1039         }
1040         /* Set Extended packet length bit (bit 14) on all chips that */
1041         /* support jumbo frames */
1042         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043                 /* Cannot do read-modify-write on 5401 */
1044                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1045         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1046                 u32 phy_reg;
1047
1048                 /* Set bit 14 with read-modify-write to preserve other bits */
1049                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1052         }
1053
1054         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055          * jumbo frames transmission.
1056          */
1057         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1058                 u32 phy_reg;
1059
1060                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_setup_phy(struct tg3 *, int);
1166
1167 #define RESET_KIND_SHUTDOWN     0
1168 #define RESET_KIND_INIT         1
1169 #define RESET_KIND_SUSPEND      2
1170
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1175
1176 static void tg3_power_down_phy(struct tg3 *tp)
1177 {
1178         /* The PHY should not be powered down on some chips because
1179          * of bugs.
1180          */
1181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1185                 return;
1186         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1187 }
1188
1189 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1190 {
1191         u32 misc_host_ctrl;
1192         u16 power_control, power_caps;
1193         int pm = tp->pm_cap;
1194
1195         /* Make sure register accesses (indirect or otherwise)
1196          * will function correctly.
1197          */
1198         pci_write_config_dword(tp->pdev,
1199                                TG3PCI_MISC_HOST_CTRL,
1200                                tp->misc_host_ctrl);
1201
1202         pci_read_config_word(tp->pdev,
1203                              pm + PCI_PM_CTRL,
1204                              &power_control);
1205         power_control |= PCI_PM_CTRL_PME_STATUS;
1206         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1207         switch (state) {
1208         case PCI_D0:
1209                 power_control |= 0;
1210                 pci_write_config_word(tp->pdev,
1211                                       pm + PCI_PM_CTRL,
1212                                       power_control);
1213                 udelay(100);    /* Delay after power state change */
1214
1215                 /* Switch out of Vaux if it is not a LOM */
1216                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1218
1219                 return 0;
1220
1221         case PCI_D1:
1222                 power_control |= 1;
1223                 break;
1224
1225         case PCI_D2:
1226                 power_control |= 2;
1227                 break;
1228
1229         case PCI_D3hot:
1230                 power_control |= 3;
1231                 break;
1232
1233         default:
1234                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1235                        "requested.\n",
1236                        tp->dev->name, state);
1237                 return -EINVAL;
1238         };
1239
1240         power_control |= PCI_PM_CTRL_PME_ENABLE;
1241
1242         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243         tw32(TG3PCI_MISC_HOST_CTRL,
1244              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1245
1246         if (tp->link_config.phy_is_low_power == 0) {
1247                 tp->link_config.phy_is_low_power = 1;
1248                 tp->link_config.orig_speed = tp->link_config.speed;
1249                 tp->link_config.orig_duplex = tp->link_config.duplex;
1250                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1251         }
1252
1253         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1254                 tp->link_config.speed = SPEED_10;
1255                 tp->link_config.duplex = DUPLEX_HALF;
1256                 tp->link_config.autoneg = AUTONEG_ENABLE;
1257                 tg3_setup_phy(tp, 0);
1258         }
1259
1260         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1261                 int i;
1262                 u32 val;
1263
1264                 for (i = 0; i < 200; i++) {
1265                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1267                                 break;
1268                         msleep(1);
1269                 }
1270         }
1271         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272                                              WOL_DRV_STATE_SHUTDOWN |
1273                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1274
1275         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1276
1277         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1278                 u32 mac_mode;
1279
1280                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1282                         udelay(40);
1283
1284                         mac_mode = MAC_MODE_PORT_MODE_MII;
1285
1286                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1289                 } else {
1290                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1291                 }
1292
1293                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1294                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1295
1296                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1299
1300                 tw32_f(MAC_MODE, mac_mode);
1301                 udelay(100);
1302
1303                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1304                 udelay(10);
1305         }
1306
1307         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1310                 u32 base_val;
1311
1312                 base_val = tp->pci_clock_ctrl;
1313                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314                              CLOCK_CTRL_TXCLK_DISABLE);
1315
1316                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1318         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1319                 /* do nothing */
1320         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1321                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322                 u32 newbits1, newbits2;
1323
1324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327                                     CLOCK_CTRL_TXCLK_DISABLE |
1328                                     CLOCK_CTRL_ALTCLK);
1329                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331                         newbits1 = CLOCK_CTRL_625_CORE;
1332                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1333                 } else {
1334                         newbits1 = CLOCK_CTRL_ALTCLK;
1335                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1336                 }
1337
1338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1339                             40);
1340
1341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1342                             40);
1343
1344                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1345                         u32 newbits3;
1346
1347                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350                                             CLOCK_CTRL_TXCLK_DISABLE |
1351                                             CLOCK_CTRL_44MHZ_CORE);
1352                         } else {
1353                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1354                         }
1355
1356                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357                                     tp->pci_clock_ctrl | newbits3, 40);
1358                 }
1359         }
1360
1361         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363                 /* Turn off the PHY */
1364                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1368                         tg3_power_down_phy(tp);
1369                 }
1370         }
1371
1372         tg3_frob_aux_power(tp);
1373
1374         /* Workaround for unstable PLL clock */
1375         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377                 u32 val = tr32(0x7d00);
1378
1379                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1380                 tw32(0x7d00, val);
1381                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1382                         int err;
1383
1384                         err = tg3_nvram_lock(tp);
1385                         tg3_halt_cpu(tp, RX_CPU_BASE);
1386                         if (!err)
1387                                 tg3_nvram_unlock(tp);
1388                 }
1389         }
1390
1391         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1392
1393         /* Finally, set the new power state. */
1394         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1395         udelay(100);    /* Delay after power state change */
1396
1397         return 0;
1398 }
1399
1400 static void tg3_link_report(struct tg3 *tp)
1401 {
1402         if (!netif_carrier_ok(tp->dev)) {
1403                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1404         } else {
1405                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1406                        tp->dev->name,
1407                        (tp->link_config.active_speed == SPEED_1000 ?
1408                         1000 :
1409                         (tp->link_config.active_speed == SPEED_100 ?
1410                          100 : 10)),
1411                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1412                         "full" : "half"));
1413
1414                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1415                        "%s for RX.\n",
1416                        tp->dev->name,
1417                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1419         }
1420 }
1421
1422 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1423 {
1424         u32 new_tg3_flags = 0;
1425         u32 old_rx_mode = tp->rx_mode;
1426         u32 old_tx_mode = tp->tx_mode;
1427
1428         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1429
1430                 /* Convert 1000BaseX flow control bits to 1000BaseT
1431                  * bits before resolving flow control.
1432                  */
1433                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435                                        ADVERTISE_PAUSE_ASYM);
1436                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1437
1438                         if (local_adv & ADVERTISE_1000XPAUSE)
1439                                 local_adv |= ADVERTISE_PAUSE_CAP;
1440                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1442                         if (remote_adv & LPA_1000XPAUSE)
1443                                 remote_adv |= LPA_PAUSE_CAP;
1444                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1445                                 remote_adv |= LPA_PAUSE_ASYM;
1446                 }
1447
1448                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                                 if (remote_adv & LPA_PAUSE_CAP)
1451                                         new_tg3_flags |=
1452                                                 (TG3_FLAG_RX_PAUSE |
1453                                                 TG3_FLAG_TX_PAUSE);
1454                                 else if (remote_adv & LPA_PAUSE_ASYM)
1455                                         new_tg3_flags |=
1456                                                 (TG3_FLAG_RX_PAUSE);
1457                         } else {
1458                                 if (remote_adv & LPA_PAUSE_CAP)
1459                                         new_tg3_flags |=
1460                                                 (TG3_FLAG_RX_PAUSE |
1461                                                 TG3_FLAG_TX_PAUSE);
1462                         }
1463                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464                         if ((remote_adv & LPA_PAUSE_CAP) &&
1465                         (remote_adv & LPA_PAUSE_ASYM))
1466                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1467                 }
1468
1469                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470                 tp->tg3_flags |= new_tg3_flags;
1471         } else {
1472                 new_tg3_flags = tp->tg3_flags;
1473         }
1474
1475         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1479
1480         if (old_rx_mode != tp->rx_mode) {
1481                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1482         }
1483         
1484         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1486         else
1487                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1488
1489         if (old_tx_mode != tp->tx_mode) {
1490                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1491         }
1492 }
1493
1494 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1495 {
1496         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497         case MII_TG3_AUX_STAT_10HALF:
1498                 *speed = SPEED_10;
1499                 *duplex = DUPLEX_HALF;
1500                 break;
1501
1502         case MII_TG3_AUX_STAT_10FULL:
1503                 *speed = SPEED_10;
1504                 *duplex = DUPLEX_FULL;
1505                 break;
1506
1507         case MII_TG3_AUX_STAT_100HALF:
1508                 *speed = SPEED_100;
1509                 *duplex = DUPLEX_HALF;
1510                 break;
1511
1512         case MII_TG3_AUX_STAT_100FULL:
1513                 *speed = SPEED_100;
1514                 *duplex = DUPLEX_FULL;
1515                 break;
1516
1517         case MII_TG3_AUX_STAT_1000HALF:
1518                 *speed = SPEED_1000;
1519                 *duplex = DUPLEX_HALF;
1520                 break;
1521
1522         case MII_TG3_AUX_STAT_1000FULL:
1523                 *speed = SPEED_1000;
1524                 *duplex = DUPLEX_FULL;
1525                 break;
1526
1527         default:
1528                 *speed = SPEED_INVALID;
1529                 *duplex = DUPLEX_INVALID;
1530                 break;
1531         };
1532 }
1533
1534 static void tg3_phy_copper_begin(struct tg3 *tp)
1535 {
1536         u32 new_adv;
1537         int i;
1538
1539         if (tp->link_config.phy_is_low_power) {
1540                 /* Entering low power mode.  Disable gigabit and
1541                  * 100baseT advertisements.
1542                  */
1543                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1544
1545                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1549
1550                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551         } else if (tp->link_config.speed == SPEED_INVALID) {
1552                 tp->link_config.advertising =
1553                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556                          ADVERTISED_Autoneg | ADVERTISED_MII);
1557
1558                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559                         tp->link_config.advertising &=
1560                                 ~(ADVERTISED_1000baseT_Half |
1561                                   ADVERTISED_1000baseT_Full);
1562
1563                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565                         new_adv |= ADVERTISE_10HALF;
1566                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567                         new_adv |= ADVERTISE_10FULL;
1568                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569                         new_adv |= ADVERTISE_100HALF;
1570                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571                         new_adv |= ADVERTISE_100FULL;
1572                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573
1574                 if (tp->link_config.advertising &
1575                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1576                         new_adv = 0;
1577                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1586                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1587                 } else {
1588                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1589                 }
1590         } else {
1591                 /* Asking for a specific link mode. */
1592                 if (tp->link_config.speed == SPEED_1000) {
1593                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1595
1596                         if (tp->link_config.duplex == DUPLEX_FULL)
1597                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1598                         else
1599                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1604                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1605                 } else {
1606                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1607
1608                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609                         if (tp->link_config.speed == SPEED_100) {
1610                                 if (tp->link_config.duplex == DUPLEX_FULL)
1611                                         new_adv |= ADVERTISE_100FULL;
1612                                 else
1613                                         new_adv |= ADVERTISE_100HALF;
1614                         } else {
1615                                 if (tp->link_config.duplex == DUPLEX_FULL)
1616                                         new_adv |= ADVERTISE_10FULL;
1617                                 else
1618                                         new_adv |= ADVERTISE_10HALF;
1619                         }
1620                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1621                 }
1622         }
1623
1624         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625             tp->link_config.speed != SPEED_INVALID) {
1626                 u32 bmcr, orig_bmcr;
1627
1628                 tp->link_config.active_speed = tp->link_config.speed;
1629                 tp->link_config.active_duplex = tp->link_config.duplex;
1630
1631                 bmcr = 0;
1632                 switch (tp->link_config.speed) {
1633                 default:
1634                 case SPEED_10:
1635                         break;
1636
1637                 case SPEED_100:
1638                         bmcr |= BMCR_SPEED100;
1639                         break;
1640
1641                 case SPEED_1000:
1642                         bmcr |= TG3_BMCR_SPEED1000;
1643                         break;
1644                 };
1645
1646                 if (tp->link_config.duplex == DUPLEX_FULL)
1647                         bmcr |= BMCR_FULLDPLX;
1648
1649                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650                     (bmcr != orig_bmcr)) {
1651                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652                         for (i = 0; i < 1500; i++) {
1653                                 u32 tmp;
1654
1655                                 udelay(10);
1656                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657                                     tg3_readphy(tp, MII_BMSR, &tmp))
1658                                         continue;
1659                                 if (!(tmp & BMSR_LSTATUS)) {
1660                                         udelay(40);
1661                                         break;
1662                                 }
1663                         }
1664                         tg3_writephy(tp, MII_BMCR, bmcr);
1665                         udelay(40);
1666                 }
1667         } else {
1668                 tg3_writephy(tp, MII_BMCR,
1669                              BMCR_ANENABLE | BMCR_ANRESTART);
1670         }
1671 }
1672
1673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1674 {
1675         int err;
1676
1677         /* Turn off tap power management. */
1678         /* Set Extended packet length bit */
1679         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1680
1681         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1683
1684         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1686
1687         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1692
1693         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1695
1696         udelay(40);
1697
1698         return err;
1699 }
1700
1701 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1702 {
1703         u32 adv_reg, all_mask;
1704
1705         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1706                 return 0;
1707
1708         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1710         if ((adv_reg & all_mask) != all_mask)
1711                 return 0;
1712         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1713                 u32 tg3_ctrl;
1714
1715                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1716                         return 0;
1717
1718                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719                             MII_TG3_CTRL_ADV_1000_FULL);
1720                 if ((tg3_ctrl & all_mask) != all_mask)
1721                         return 0;
1722         }
1723         return 1;
1724 }
1725
1726 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1727 {
1728         int current_link_up;
1729         u32 bmsr, dummy;
1730         u16 current_speed;
1731         u8 current_duplex;
1732         int i, err;
1733
1734         tw32(MAC_EVENT, 0);
1735
1736         tw32_f(MAC_STATUS,
1737              (MAC_STATUS_SYNC_CHANGED |
1738               MAC_STATUS_CFG_CHANGED |
1739               MAC_STATUS_MI_COMPLETION |
1740               MAC_STATUS_LNKSTATE_CHANGED));
1741         udelay(40);
1742
1743         tp->mi_mode = MAC_MI_MODE_BASE;
1744         tw32_f(MAC_MI_MODE, tp->mi_mode);
1745         udelay(80);
1746
1747         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1748
1749         /* Some third-party PHYs need to be reset on link going
1750          * down.
1751          */
1752         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755             netif_carrier_ok(tp->dev)) {
1756                 tg3_readphy(tp, MII_BMSR, &bmsr);
1757                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                     !(bmsr & BMSR_LSTATUS))
1759                         force_reset = 1;
1760         }
1761         if (force_reset)
1762                 tg3_phy_reset(tp);
1763
1764         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765                 tg3_readphy(tp, MII_BMSR, &bmsr);
1766                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1768                         bmsr = 0;
1769
1770                 if (!(bmsr & BMSR_LSTATUS)) {
1771                         err = tg3_init_5401phy_dsp(tp);
1772                         if (err)
1773                                 return err;
1774
1775                         tg3_readphy(tp, MII_BMSR, &bmsr);
1776                         for (i = 0; i < 1000; i++) {
1777                                 udelay(10);
1778                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779                                     (bmsr & BMSR_LSTATUS)) {
1780                                         udelay(40);
1781                                         break;
1782                                 }
1783                         }
1784
1785                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786                             !(bmsr & BMSR_LSTATUS) &&
1787                             tp->link_config.active_speed == SPEED_1000) {
1788                                 err = tg3_phy_reset(tp);
1789                                 if (!err)
1790                                         err = tg3_init_5401phy_dsp(tp);
1791                                 if (err)
1792                                         return err;
1793                         }
1794                 }
1795         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797                 /* 5701 {A0,B0} CRC bug workaround */
1798                 tg3_writephy(tp, 0x15, 0x0a75);
1799                 tg3_writephy(tp, 0x1c, 0x8c68);
1800                 tg3_writephy(tp, 0x1c, 0x8d68);
1801                 tg3_writephy(tp, 0x1c, 0x8c68);
1802         }
1803
1804         /* Clear pending interrupts... */
1805         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807
1808         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1810         else
1811                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1812
1813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1818                 else
1819                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1820         }
1821
1822         current_link_up = 0;
1823         current_speed = SPEED_INVALID;
1824         current_duplex = DUPLEX_INVALID;
1825
1826         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1827                 u32 val;
1828
1829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831                 if (!(val & (1 << 10))) {
1832                         val |= (1 << 10);
1833                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1834                         goto relink;
1835                 }
1836         }
1837
1838         bmsr = 0;
1839         for (i = 0; i < 100; i++) {
1840                 tg3_readphy(tp, MII_BMSR, &bmsr);
1841                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842                     (bmsr & BMSR_LSTATUS))
1843                         break;
1844                 udelay(40);
1845         }
1846
1847         if (bmsr & BMSR_LSTATUS) {
1848                 u32 aux_stat, bmcr;
1849
1850                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851                 for (i = 0; i < 2000; i++) {
1852                         udelay(10);
1853                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1854                             aux_stat)
1855                                 break;
1856                 }
1857
1858                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1859                                              &current_speed,
1860                                              &current_duplex);
1861
1862                 bmcr = 0;
1863                 for (i = 0; i < 200; i++) {
1864                         tg3_readphy(tp, MII_BMCR, &bmcr);
1865                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1866                                 continue;
1867                         if (bmcr && bmcr != 0x7fff)
1868                                 break;
1869                         udelay(10);
1870                 }
1871
1872                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873                         if (bmcr & BMCR_ANENABLE) {
1874                                 current_link_up = 1;
1875
1876                                 /* Force autoneg restart if we are exiting
1877                                  * low power mode.
1878                                  */
1879                                 if (!tg3_copper_is_advertising_all(tp))
1880                                         current_link_up = 0;
1881                         } else {
1882                                 current_link_up = 0;
1883                         }
1884                 } else {
1885                         if (!(bmcr & BMCR_ANENABLE) &&
1886                             tp->link_config.speed == current_speed &&
1887                             tp->link_config.duplex == current_duplex) {
1888                                 current_link_up = 1;
1889                         } else {
1890                                 current_link_up = 0;
1891                         }
1892                 }
1893
1894                 tp->link_config.active_speed = current_speed;
1895                 tp->link_config.active_duplex = current_duplex;
1896         }
1897
1898         if (current_link_up == 1 &&
1899             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901                 u32 local_adv, remote_adv;
1902
1903                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1904                         local_adv = 0;
1905                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1906
1907                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1908                         remote_adv = 0;
1909
1910                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1911
1912                 /* If we are not advertising full pause capability,
1913                  * something is wrong.  Bring the link down and reconfigure.
1914                  */
1915                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916                         current_link_up = 0;
1917                 } else {
1918                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1919                 }
1920         }
1921 relink:
1922         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1923                 u32 tmp;
1924
1925                 tg3_phy_copper_begin(tp);
1926
1927                 tg3_readphy(tp, MII_BMSR, &tmp);
1928                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929                     (tmp & BMSR_LSTATUS))
1930                         current_link_up = 1;
1931         }
1932
1933         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934         if (current_link_up == 1) {
1935                 if (tp->link_config.active_speed == SPEED_100 ||
1936                     tp->link_config.active_speed == SPEED_10)
1937                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1938                 else
1939                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1940         } else
1941                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942
1943         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944         if (tp->link_config.active_duplex == DUPLEX_HALF)
1945                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1946
1947         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950                     (current_link_up == 1 &&
1951                      tp->link_config.active_speed == SPEED_10))
1952                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953         } else {
1954                 if (current_link_up == 1)
1955                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1956         }
1957
1958         /* ??? Without this setting Netgear GA302T PHY does not
1959          * ??? send/receive packets...
1960          */
1961         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1965                 udelay(80);
1966         }
1967
1968         tw32_f(MAC_MODE, tp->mac_mode);
1969         udelay(40);
1970
1971         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972                 /* Polled via timer. */
1973                 tw32_f(MAC_EVENT, 0);
1974         } else {
1975                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1976         }
1977         udelay(40);
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980             current_link_up == 1 &&
1981             tp->link_config.active_speed == SPEED_1000 &&
1982             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1984                 udelay(120);
1985                 tw32_f(MAC_STATUS,
1986                      (MAC_STATUS_SYNC_CHANGED |
1987                       MAC_STATUS_CFG_CHANGED));
1988                 udelay(40);
1989                 tg3_write_mem(tp,
1990                               NIC_SRAM_FIRMWARE_MBOX,
1991                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1992         }
1993
1994         if (current_link_up != netif_carrier_ok(tp->dev)) {
1995                 if (current_link_up)
1996                         netif_carrier_on(tp->dev);
1997                 else
1998                         netif_carrier_off(tp->dev);
1999                 tg3_link_report(tp);
2000         }
2001
2002         return 0;
2003 }
2004
2005 struct tg3_fiber_aneginfo {
2006         int state;
2007 #define ANEG_STATE_UNKNOWN              0
2008 #define ANEG_STATE_AN_ENABLE            1
2009 #define ANEG_STATE_RESTART_INIT         2
2010 #define ANEG_STATE_RESTART              3
2011 #define ANEG_STATE_DISABLE_LINK_OK      4
2012 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2013 #define ANEG_STATE_ABILITY_DETECT       6
2014 #define ANEG_STATE_ACK_DETECT_INIT      7
2015 #define ANEG_STATE_ACK_DETECT           8
2016 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2017 #define ANEG_STATE_COMPLETE_ACK         10
2018 #define ANEG_STATE_IDLE_DETECT_INIT     11
2019 #define ANEG_STATE_IDLE_DETECT          12
2020 #define ANEG_STATE_LINK_OK              13
2021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2022 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2023
2024         u32 flags;
2025 #define MR_AN_ENABLE            0x00000001
2026 #define MR_RESTART_AN           0x00000002
2027 #define MR_AN_COMPLETE          0x00000004
2028 #define MR_PAGE_RX              0x00000008
2029 #define MR_NP_LOADED            0x00000010
2030 #define MR_TOGGLE_TX            0x00000020
2031 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2032 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2033 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2034 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2038 #define MR_TOGGLE_RX            0x00002000
2039 #define MR_NP_RX                0x00004000
2040
2041 #define MR_LINK_OK              0x80000000
2042
2043         unsigned long link_time, cur_time;
2044
2045         u32 ability_match_cfg;
2046         int ability_match_count;
2047
2048         char ability_match, idle_match, ack_match;
2049
2050         u32 txconfig, rxconfig;
2051 #define ANEG_CFG_NP             0x00000080
2052 #define ANEG_CFG_ACK            0x00000040
2053 #define ANEG_CFG_RF2            0x00000020
2054 #define ANEG_CFG_RF1            0x00000010
2055 #define ANEG_CFG_PS2            0x00000001
2056 #define ANEG_CFG_PS1            0x00008000
2057 #define ANEG_CFG_HD             0x00004000
2058 #define ANEG_CFG_FD             0x00002000
2059 #define ANEG_CFG_INVAL          0x00001f06
2060
2061 };
2062 #define ANEG_OK         0
2063 #define ANEG_DONE       1
2064 #define ANEG_TIMER_ENAB 2
2065 #define ANEG_FAILED     -1
2066
2067 #define ANEG_STATE_SETTLE_TIME  10000
2068
2069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070                                    struct tg3_fiber_aneginfo *ap)
2071 {
2072         unsigned long delta;
2073         u32 rx_cfg_reg;
2074         int ret;
2075
2076         if (ap->state == ANEG_STATE_UNKNOWN) {
2077                 ap->rxconfig = 0;
2078                 ap->link_time = 0;
2079                 ap->cur_time = 0;
2080                 ap->ability_match_cfg = 0;
2081                 ap->ability_match_count = 0;
2082                 ap->ability_match = 0;
2083                 ap->idle_match = 0;
2084                 ap->ack_match = 0;
2085         }
2086         ap->cur_time++;
2087
2088         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2090
2091                 if (rx_cfg_reg != ap->ability_match_cfg) {
2092                         ap->ability_match_cfg = rx_cfg_reg;
2093                         ap->ability_match = 0;
2094                         ap->ability_match_count = 0;
2095                 } else {
2096                         if (++ap->ability_match_count > 1) {
2097                                 ap->ability_match = 1;
2098                                 ap->ability_match_cfg = rx_cfg_reg;
2099                         }
2100                 }
2101                 if (rx_cfg_reg & ANEG_CFG_ACK)
2102                         ap->ack_match = 1;
2103                 else
2104                         ap->ack_match = 0;
2105
2106                 ap->idle_match = 0;
2107         } else {
2108                 ap->idle_match = 1;
2109                 ap->ability_match_cfg = 0;
2110                 ap->ability_match_count = 0;
2111                 ap->ability_match = 0;
2112                 ap->ack_match = 0;
2113
2114                 rx_cfg_reg = 0;
2115         }
2116
2117         ap->rxconfig = rx_cfg_reg;
2118         ret = ANEG_OK;
2119
2120         switch(ap->state) {
2121         case ANEG_STATE_UNKNOWN:
2122                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123                         ap->state = ANEG_STATE_AN_ENABLE;
2124
2125                 /* fallthru */
2126         case ANEG_STATE_AN_ENABLE:
2127                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128                 if (ap->flags & MR_AN_ENABLE) {
2129                         ap->link_time = 0;
2130                         ap->cur_time = 0;
2131                         ap->ability_match_cfg = 0;
2132                         ap->ability_match_count = 0;
2133                         ap->ability_match = 0;
2134                         ap->idle_match = 0;
2135                         ap->ack_match = 0;
2136
2137                         ap->state = ANEG_STATE_RESTART_INIT;
2138                 } else {
2139                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2140                 }
2141                 break;
2142
2143         case ANEG_STATE_RESTART_INIT:
2144                 ap->link_time = ap->cur_time;
2145                 ap->flags &= ~(MR_NP_LOADED);
2146                 ap->txconfig = 0;
2147                 tw32(MAC_TX_AUTO_NEG, 0);
2148                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149                 tw32_f(MAC_MODE, tp->mac_mode);
2150                 udelay(40);
2151
2152                 ret = ANEG_TIMER_ENAB;
2153                 ap->state = ANEG_STATE_RESTART;
2154
2155                 /* fallthru */
2156         case ANEG_STATE_RESTART:
2157                 delta = ap->cur_time - ap->link_time;
2158                 if (delta > ANEG_STATE_SETTLE_TIME) {
2159                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2160                 } else {
2161                         ret = ANEG_TIMER_ENAB;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_DISABLE_LINK_OK:
2166                 ret = ANEG_DONE;
2167                 break;
2168
2169         case ANEG_STATE_ABILITY_DETECT_INIT:
2170                 ap->flags &= ~(MR_TOGGLE_TX);
2171                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174                 tw32_f(MAC_MODE, tp->mac_mode);
2175                 udelay(40);
2176
2177                 ap->state = ANEG_STATE_ABILITY_DETECT;
2178                 break;
2179
2180         case ANEG_STATE_ABILITY_DETECT:
2181                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2183                 }
2184                 break;
2185
2186         case ANEG_STATE_ACK_DETECT_INIT:
2187                 ap->txconfig |= ANEG_CFG_ACK;
2188                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190                 tw32_f(MAC_MODE, tp->mac_mode);
2191                 udelay(40);
2192
2193                 ap->state = ANEG_STATE_ACK_DETECT;
2194
2195                 /* fallthru */
2196         case ANEG_STATE_ACK_DETECT:
2197                 if (ap->ack_match != 0) {
2198                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2201                         } else {
2202                                 ap->state = ANEG_STATE_AN_ENABLE;
2203                         }
2204                 } else if (ap->ability_match != 0 &&
2205                            ap->rxconfig == 0) {
2206                         ap->state = ANEG_STATE_AN_ENABLE;
2207                 }
2208                 break;
2209
2210         case ANEG_STATE_COMPLETE_ACK_INIT:
2211                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2212                         ret = ANEG_FAILED;
2213                         break;
2214                 }
2215                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216                                MR_LP_ADV_HALF_DUPLEX |
2217                                MR_LP_ADV_SYM_PAUSE |
2218                                MR_LP_ADV_ASYM_PAUSE |
2219                                MR_LP_ADV_REMOTE_FAULT1 |
2220                                MR_LP_ADV_REMOTE_FAULT2 |
2221                                MR_LP_ADV_NEXT_PAGE |
2222                                MR_TOGGLE_RX |
2223                                MR_NP_RX);
2224                 if (ap->rxconfig & ANEG_CFG_FD)
2225                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226                 if (ap->rxconfig & ANEG_CFG_HD)
2227                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228                 if (ap->rxconfig & ANEG_CFG_PS1)
2229                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230                 if (ap->rxconfig & ANEG_CFG_PS2)
2231                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232                 if (ap->rxconfig & ANEG_CFG_RF1)
2233                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234                 if (ap->rxconfig & ANEG_CFG_RF2)
2235                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236                 if (ap->rxconfig & ANEG_CFG_NP)
2237                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2238
2239                 ap->link_time = ap->cur_time;
2240
2241                 ap->flags ^= (MR_TOGGLE_TX);
2242                 if (ap->rxconfig & 0x0008)
2243                         ap->flags |= MR_TOGGLE_RX;
2244                 if (ap->rxconfig & ANEG_CFG_NP)
2245                         ap->flags |= MR_NP_RX;
2246                 ap->flags |= MR_PAGE_RX;
2247
2248                 ap->state = ANEG_STATE_COMPLETE_ACK;
2249                 ret = ANEG_TIMER_ENAB;
2250                 break;
2251
2252         case ANEG_STATE_COMPLETE_ACK:
2253                 if (ap->ability_match != 0 &&
2254                     ap->rxconfig == 0) {
2255                         ap->state = ANEG_STATE_AN_ENABLE;
2256                         break;
2257                 }
2258                 delta = ap->cur_time - ap->link_time;
2259                 if (delta > ANEG_STATE_SETTLE_TIME) {
2260                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2262                         } else {
2263                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264                                     !(ap->flags & MR_NP_RX)) {
2265                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2266                                 } else {
2267                                         ret = ANEG_FAILED;
2268                                 }
2269                         }
2270                 }
2271                 break;
2272
2273         case ANEG_STATE_IDLE_DETECT_INIT:
2274                 ap->link_time = ap->cur_time;
2275                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276                 tw32_f(MAC_MODE, tp->mac_mode);
2277                 udelay(40);
2278
2279                 ap->state = ANEG_STATE_IDLE_DETECT;
2280                 ret = ANEG_TIMER_ENAB;
2281                 break;
2282
2283         case ANEG_STATE_IDLE_DETECT:
2284                 if (ap->ability_match != 0 &&
2285                     ap->rxconfig == 0) {
2286                         ap->state = ANEG_STATE_AN_ENABLE;
2287                         break;
2288                 }
2289                 delta = ap->cur_time - ap->link_time;
2290                 if (delta > ANEG_STATE_SETTLE_TIME) {
2291                         /* XXX another gem from the Broadcom driver :( */
2292                         ap->state = ANEG_STATE_LINK_OK;
2293                 }
2294                 break;
2295
2296         case ANEG_STATE_LINK_OK:
2297                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2298                 ret = ANEG_DONE;
2299                 break;
2300
2301         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302                 /* ??? unimplemented */
2303                 break;
2304
2305         case ANEG_STATE_NEXT_PAGE_WAIT:
2306                 /* ??? unimplemented */
2307                 break;
2308
2309         default:
2310                 ret = ANEG_FAILED;
2311                 break;
2312         };
2313
2314         return ret;
2315 }
2316
2317 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2318 {
2319         int res = 0;
2320         struct tg3_fiber_aneginfo aninfo;
2321         int status = ANEG_FAILED;
2322         unsigned int tick;
2323         u32 tmp;
2324
2325         tw32_f(MAC_TX_AUTO_NEG, 0);
2326
2327         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2329         udelay(40);
2330
2331         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2332         udelay(40);
2333
2334         memset(&aninfo, 0, sizeof(aninfo));
2335         aninfo.flags |= MR_AN_ENABLE;
2336         aninfo.state = ANEG_STATE_UNKNOWN;
2337         aninfo.cur_time = 0;
2338         tick = 0;
2339         while (++tick < 195000) {
2340                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341                 if (status == ANEG_DONE || status == ANEG_FAILED)
2342                         break;
2343
2344                 udelay(1);
2345         }
2346
2347         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348         tw32_f(MAC_MODE, tp->mac_mode);
2349         udelay(40);
2350
2351         *flags = aninfo.flags;
2352
2353         if (status == ANEG_DONE &&
2354             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355                              MR_LP_ADV_FULL_DUPLEX)))
2356                 res = 1;
2357
2358         return res;
2359 }
2360
2361 static void tg3_init_bcm8002(struct tg3 *tp)
2362 {
2363         u32 mac_status = tr32(MAC_STATUS);
2364         int i;
2365
2366         /* Reset when initting first time or we have a link. */
2367         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368             !(mac_status & MAC_STATUS_PCS_SYNCED))
2369                 return;
2370
2371         /* Set PLL lock range. */
2372         tg3_writephy(tp, 0x16, 0x8007);
2373
2374         /* SW reset */
2375         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2376
2377         /* Wait for reset to complete. */
2378         /* XXX schedule_timeout() ... */
2379         for (i = 0; i < 500; i++)
2380                 udelay(10);
2381
2382         /* Config mode; select PMA/Ch 1 regs. */
2383         tg3_writephy(tp, 0x10, 0x8411);
2384
2385         /* Enable auto-lock and comdet, select txclk for tx. */
2386         tg3_writephy(tp, 0x11, 0x0a10);
2387
2388         tg3_writephy(tp, 0x18, 0x00a0);
2389         tg3_writephy(tp, 0x16, 0x41ff);
2390
2391         /* Assert and deassert POR. */
2392         tg3_writephy(tp, 0x13, 0x0400);
2393         udelay(40);
2394         tg3_writephy(tp, 0x13, 0x0000);
2395
2396         tg3_writephy(tp, 0x11, 0x0a50);
2397         udelay(40);
2398         tg3_writephy(tp, 0x11, 0x0a10);
2399
2400         /* Wait for signal to stabilize */
2401         /* XXX schedule_timeout() ... */
2402         for (i = 0; i < 15000; i++)
2403                 udelay(10);
2404
2405         /* Deselect the channel register so we can read the PHYID
2406          * later.
2407          */
2408         tg3_writephy(tp, 0x10, 0x8011);
2409 }
2410
2411 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2412 {
2413         u32 sg_dig_ctrl, sg_dig_status;
2414         u32 serdes_cfg, expected_sg_dig_ctrl;
2415         int workaround, port_a;
2416         int current_link_up;
2417
2418         serdes_cfg = 0;
2419         expected_sg_dig_ctrl = 0;
2420         workaround = 0;
2421         port_a = 1;
2422         current_link_up = 0;
2423
2424         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2426                 workaround = 1;
2427                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2428                         port_a = 0;
2429
2430                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431                 /* preserve bits 20-23 for voltage regulator */
2432                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2433         }
2434
2435         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2436
2437         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438                 if (sg_dig_ctrl & (1 << 31)) {
2439                         if (workaround) {
2440                                 u32 val = serdes_cfg;
2441
2442                                 if (port_a)
2443                                         val |= 0xc010000;
2444                                 else
2445                                         val |= 0x4010000;
2446                                 tw32_f(MAC_SERDES_CFG, val);
2447                         }
2448                         tw32_f(SG_DIG_CTRL, 0x01388400);
2449                 }
2450                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451                         tg3_setup_flow_control(tp, 0, 0);
2452                         current_link_up = 1;
2453                 }
2454                 goto out;
2455         }
2456
2457         /* Want auto-negotiation.  */
2458         expected_sg_dig_ctrl = 0x81388400;
2459
2460         /* Pause capability */
2461         expected_sg_dig_ctrl |= (1 << 11);
2462
2463         /* Asymettric pause */
2464         expected_sg_dig_ctrl |= (1 << 12);
2465
2466         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2467                 if (workaround)
2468                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2470                 udelay(5);
2471                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2472
2473                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475                                  MAC_STATUS_SIGNAL_DET)) {
2476                 int i;
2477
2478                 /* Giver time to negotiate (~200ms) */
2479                 for (i = 0; i < 40000; i++) {
2480                         sg_dig_status = tr32(SG_DIG_STATUS);
2481                         if (sg_dig_status & (0x3))
2482                                 break;
2483                         udelay(5);
2484                 }
2485                 mac_status = tr32(MAC_STATUS);
2486
2487                 if ((sg_dig_status & (1 << 1)) &&
2488                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489                         u32 local_adv, remote_adv;
2490
2491                         local_adv = ADVERTISE_PAUSE_CAP;
2492                         remote_adv = 0;
2493                         if (sg_dig_status & (1 << 19))
2494                                 remote_adv |= LPA_PAUSE_CAP;
2495                         if (sg_dig_status & (1 << 20))
2496                                 remote_adv |= LPA_PAUSE_ASYM;
2497
2498                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2499                         current_link_up = 1;
2500                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501                 } else if (!(sg_dig_status & (1 << 1))) {
2502                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2504                         else {
2505                                 if (workaround) {
2506                                         u32 val = serdes_cfg;
2507
2508                                         if (port_a)
2509                                                 val |= 0xc010000;
2510                                         else
2511                                                 val |= 0x4010000;
2512
2513                                         tw32_f(MAC_SERDES_CFG, val);
2514                                 }
2515
2516                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2517                                 udelay(40);
2518
2519                                 /* Link parallel detection - link is up */
2520                                 /* only if we have PCS_SYNC and not */
2521                                 /* receiving config code words */
2522                                 mac_status = tr32(MAC_STATUS);
2523                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525                                         tg3_setup_flow_control(tp, 0, 0);
2526                                         current_link_up = 1;
2527                                 }
2528                         }
2529                 }
2530         }
2531
2532 out:
2533         return current_link_up;
2534 }
2535
2536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2537 {
2538         int current_link_up = 0;
2539
2540         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2542                 goto out;
2543         }
2544
2545         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2546                 u32 flags;
2547                 int i;
2548   
2549                 if (fiber_autoneg(tp, &flags)) {
2550                         u32 local_adv, remote_adv;
2551
2552                         local_adv = ADVERTISE_PAUSE_CAP;
2553                         remote_adv = 0;
2554                         if (flags & MR_LP_ADV_SYM_PAUSE)
2555                                 remote_adv |= LPA_PAUSE_CAP;
2556                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2557                                 remote_adv |= LPA_PAUSE_ASYM;
2558
2559                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2560
2561                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562                         current_link_up = 1;
2563                 }
2564                 for (i = 0; i < 30; i++) {
2565                         udelay(20);
2566                         tw32_f(MAC_STATUS,
2567                                (MAC_STATUS_SYNC_CHANGED |
2568                                 MAC_STATUS_CFG_CHANGED));
2569                         udelay(40);
2570                         if ((tr32(MAC_STATUS) &
2571                              (MAC_STATUS_SYNC_CHANGED |
2572                               MAC_STATUS_CFG_CHANGED)) == 0)
2573                                 break;
2574                 }
2575
2576                 mac_status = tr32(MAC_STATUS);
2577                 if (current_link_up == 0 &&
2578                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579                     !(mac_status & MAC_STATUS_RCVD_CFG))
2580                         current_link_up = 1;
2581         } else {
2582                 /* Forcing 1000FD link up. */
2583                 current_link_up = 1;
2584                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2585
2586                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2587                 udelay(40);
2588         }
2589
2590 out:
2591         return current_link_up;
2592 }
2593
2594 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2595 {
2596         u32 orig_pause_cfg;
2597         u16 orig_active_speed;
2598         u8 orig_active_duplex;
2599         u32 mac_status;
2600         int current_link_up;
2601         int i;
2602
2603         orig_pause_cfg =
2604                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605                                   TG3_FLAG_TX_PAUSE));
2606         orig_active_speed = tp->link_config.active_speed;
2607         orig_active_duplex = tp->link_config.active_duplex;
2608
2609         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610             netif_carrier_ok(tp->dev) &&
2611             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612                 mac_status = tr32(MAC_STATUS);
2613                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614                                MAC_STATUS_SIGNAL_DET |
2615                                MAC_STATUS_CFG_CHANGED |
2616                                MAC_STATUS_RCVD_CFG);
2617                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618                                    MAC_STATUS_SIGNAL_DET)) {
2619                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620                                             MAC_STATUS_CFG_CHANGED));
2621                         return 0;
2622                 }
2623         }
2624
2625         tw32_f(MAC_TX_AUTO_NEG, 0);
2626
2627         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629         tw32_f(MAC_MODE, tp->mac_mode);
2630         udelay(40);
2631
2632         if (tp->phy_id == PHY_ID_BCM8002)
2633                 tg3_init_bcm8002(tp);
2634
2635         /* Enable link change event even when serdes polling.  */
2636         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2637         udelay(40);
2638
2639         current_link_up = 0;
2640         mac_status = tr32(MAC_STATUS);
2641
2642         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2644         else
2645                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2646
2647         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648         tw32_f(MAC_MODE, tp->mac_mode);
2649         udelay(40);
2650
2651         tp->hw_status->status =
2652                 (SD_STATUS_UPDATED |
2653                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2654
2655         for (i = 0; i < 100; i++) {
2656                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657                                     MAC_STATUS_CFG_CHANGED));
2658                 udelay(5);
2659                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660                                          MAC_STATUS_CFG_CHANGED)) == 0)
2661                         break;
2662         }
2663
2664         mac_status = tr32(MAC_STATUS);
2665         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666                 current_link_up = 0;
2667                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668                         tw32_f(MAC_MODE, (tp->mac_mode |
2669                                           MAC_MODE_SEND_CONFIGS));
2670                         udelay(1);
2671                         tw32_f(MAC_MODE, tp->mac_mode);
2672                 }
2673         }
2674
2675         if (current_link_up == 1) {
2676                 tp->link_config.active_speed = SPEED_1000;
2677                 tp->link_config.active_duplex = DUPLEX_FULL;
2678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679                                     LED_CTRL_LNKLED_OVERRIDE |
2680                                     LED_CTRL_1000MBPS_ON));
2681         } else {
2682                 tp->link_config.active_speed = SPEED_INVALID;
2683                 tp->link_config.active_duplex = DUPLEX_INVALID;
2684                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685                                     LED_CTRL_LNKLED_OVERRIDE |
2686                                     LED_CTRL_TRAFFIC_OVERRIDE));
2687         }
2688
2689         if (current_link_up != netif_carrier_ok(tp->dev)) {
2690                 if (current_link_up)
2691                         netif_carrier_on(tp->dev);
2692                 else
2693                         netif_carrier_off(tp->dev);
2694                 tg3_link_report(tp);
2695         } else {
2696                 u32 now_pause_cfg =
2697                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2698                                          TG3_FLAG_TX_PAUSE);
2699                 if (orig_pause_cfg != now_pause_cfg ||
2700                     orig_active_speed != tp->link_config.active_speed ||
2701                     orig_active_duplex != tp->link_config.active_duplex)
2702                         tg3_link_report(tp);
2703         }
2704
2705         return 0;
2706 }
2707
2708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2709 {
2710         int current_link_up, err = 0;
2711         u32 bmsr, bmcr;
2712         u16 current_speed;
2713         u8 current_duplex;
2714
2715         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716         tw32_f(MAC_MODE, tp->mac_mode);
2717         udelay(40);
2718
2719         tw32(MAC_EVENT, 0);
2720
2721         tw32_f(MAC_STATUS,
2722              (MAC_STATUS_SYNC_CHANGED |
2723               MAC_STATUS_CFG_CHANGED |
2724               MAC_STATUS_MI_COMPLETION |
2725               MAC_STATUS_LNKSTATE_CHANGED));
2726         udelay(40);
2727
2728         if (force_reset)
2729                 tg3_phy_reset(tp);
2730
2731         current_link_up = 0;
2732         current_speed = SPEED_INVALID;
2733         current_duplex = DUPLEX_INVALID;
2734
2735         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739                         bmsr |= BMSR_LSTATUS;
2740                 else
2741                         bmsr &= ~BMSR_LSTATUS;
2742         }
2743
2744         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2745
2746         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748                 /* do nothing, just check for link up at the end */
2749         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750                 u32 adv, new_adv;
2751
2752                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754                                   ADVERTISE_1000XPAUSE |
2755                                   ADVERTISE_1000XPSE_ASYM |
2756                                   ADVERTISE_SLCT);
2757
2758                 /* Always advertise symmetric PAUSE just like copper */
2759                 new_adv |= ADVERTISE_1000XPAUSE;
2760
2761                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762                         new_adv |= ADVERTISE_1000XHALF;
2763                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764                         new_adv |= ADVERTISE_1000XFULL;
2765
2766                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769                         tg3_writephy(tp, MII_BMCR, bmcr);
2770
2771                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2774
2775                         return err;
2776                 }
2777         } else {
2778                 u32 new_bmcr;
2779
2780                 bmcr &= ~BMCR_SPEED1000;
2781                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2782
2783                 if (tp->link_config.duplex == DUPLEX_FULL)
2784                         new_bmcr |= BMCR_FULLDPLX;
2785
2786                 if (new_bmcr != bmcr) {
2787                         /* BMCR_SPEED1000 is a reserved bit that needs
2788                          * to be set on write.
2789                          */
2790                         new_bmcr |= BMCR_SPEED1000;
2791
2792                         /* Force a linkdown */
2793                         if (netif_carrier_ok(tp->dev)) {
2794                                 u32 adv;
2795
2796                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797                                 adv &= ~(ADVERTISE_1000XFULL |
2798                                          ADVERTISE_1000XHALF |
2799                                          ADVERTISE_SLCT);
2800                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2801                                 tg3_writephy(tp, MII_BMCR, bmcr |
2802                                                            BMCR_ANRESTART |
2803                                                            BMCR_ANENABLE);
2804                                 udelay(10);
2805                                 netif_carrier_off(tp->dev);
2806                         }
2807                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2808                         bmcr = new_bmcr;
2809                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2812                             ASIC_REV_5714) {
2813                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814                                         bmsr |= BMSR_LSTATUS;
2815                                 else
2816                                         bmsr &= ~BMSR_LSTATUS;
2817                         }
2818                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2819                 }
2820         }
2821
2822         if (bmsr & BMSR_LSTATUS) {
2823                 current_speed = SPEED_1000;
2824                 current_link_up = 1;
2825                 if (bmcr & BMCR_FULLDPLX)
2826                         current_duplex = DUPLEX_FULL;
2827                 else
2828                         current_duplex = DUPLEX_HALF;
2829
2830                 if (bmcr & BMCR_ANENABLE) {
2831                         u32 local_adv, remote_adv, common;
2832
2833                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835                         common = local_adv & remote_adv;
2836                         if (common & (ADVERTISE_1000XHALF |
2837                                       ADVERTISE_1000XFULL)) {
2838                                 if (common & ADVERTISE_1000XFULL)
2839                                         current_duplex = DUPLEX_FULL;
2840                                 else
2841                                         current_duplex = DUPLEX_HALF;
2842
2843                                 tg3_setup_flow_control(tp, local_adv,
2844                                                        remote_adv);
2845                         }
2846                         else
2847                                 current_link_up = 0;
2848                 }
2849         }
2850
2851         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852         if (tp->link_config.active_duplex == DUPLEX_HALF)
2853                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2854
2855         tw32_f(MAC_MODE, tp->mac_mode);
2856         udelay(40);
2857
2858         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2859
2860         tp->link_config.active_speed = current_speed;
2861         tp->link_config.active_duplex = current_duplex;
2862
2863         if (current_link_up != netif_carrier_ok(tp->dev)) {
2864                 if (current_link_up)
2865                         netif_carrier_on(tp->dev);
2866                 else {
2867                         netif_carrier_off(tp->dev);
2868                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2869                 }
2870                 tg3_link_report(tp);
2871         }
2872         return err;
2873 }
2874
2875 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2876 {
2877         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878                 /* Give autoneg time to complete. */
2879                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2880                 return;
2881         }
2882         if (!netif_carrier_ok(tp->dev) &&
2883             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2884                 u32 bmcr;
2885
2886                 tg3_readphy(tp, MII_BMCR, &bmcr);
2887                 if (bmcr & BMCR_ANENABLE) {
2888                         u32 phy1, phy2;
2889
2890                         /* Select shadow register 0x1f */
2891                         tg3_writephy(tp, 0x1c, 0x7c00);
2892                         tg3_readphy(tp, 0x1c, &phy1);
2893
2894                         /* Select expansion interrupt status register */
2895                         tg3_writephy(tp, 0x17, 0x0f01);
2896                         tg3_readphy(tp, 0x15, &phy2);
2897                         tg3_readphy(tp, 0x15, &phy2);
2898
2899                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900                                 /* We have signal detect and not receiving
2901                                  * config code words, link is up by parallel
2902                                  * detection.
2903                                  */
2904
2905                                 bmcr &= ~BMCR_ANENABLE;
2906                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907                                 tg3_writephy(tp, MII_BMCR, bmcr);
2908                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2909                         }
2910                 }
2911         }
2912         else if (netif_carrier_ok(tp->dev) &&
2913                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2915                 u32 phy2;
2916
2917                 /* Select expansion interrupt status register */
2918                 tg3_writephy(tp, 0x17, 0x0f01);
2919                 tg3_readphy(tp, 0x15, &phy2);
2920                 if (phy2 & 0x20) {
2921                         u32 bmcr;
2922
2923                         /* Config code words received, turn on autoneg. */
2924                         tg3_readphy(tp, MII_BMCR, &bmcr);
2925                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2926
2927                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2928
2929                 }
2930         }
2931 }
2932
2933 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2934 {
2935         int err;
2936
2937         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938                 err = tg3_setup_fiber_phy(tp, force_reset);
2939         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2941         } else {
2942                 err = tg3_setup_copper_phy(tp, force_reset);
2943         }
2944
2945         if (tp->link_config.active_speed == SPEED_1000 &&
2946             tp->link_config.active_duplex == DUPLEX_HALF)
2947                 tw32(MAC_TX_LENGTHS,
2948                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949                       (6 << TX_LENGTHS_IPG_SHIFT) |
2950                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2951         else
2952                 tw32(MAC_TX_LENGTHS,
2953                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954                       (6 << TX_LENGTHS_IPG_SHIFT) |
2955                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2956
2957         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958                 if (netif_carrier_ok(tp->dev)) {
2959                         tw32(HOSTCC_STAT_COAL_TICKS,
2960                              tp->coal.stats_block_coalesce_usecs);
2961                 } else {
2962                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2963                 }
2964         }
2965
2966         return err;
2967 }
2968
2969 /* This is called whenever we suspect that the system chipset is re-
2970  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971  * is bogus tx completions. We try to recover by setting the
2972  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2973  * in the workqueue.
2974  */
2975 static void tg3_tx_recover(struct tg3 *tp)
2976 {
2977         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2979
2980         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981                "mapped I/O cycles to the network device, attempting to "
2982                "recover. Please report the problem to the driver maintainer "
2983                "and include system chipset information.\n", tp->dev->name);
2984
2985         spin_lock(&tp->lock);
2986         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2987         spin_unlock(&tp->lock);
2988 }
2989
2990 /* Tigon3 never reports partial packet sends.  So we do not
2991  * need special logic to handle SKBs that have not had all
2992  * of their frags sent yet, like SunGEM does.
2993  */
2994 static void tg3_tx(struct tg3 *tp)
2995 {
2996         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997         u32 sw_idx = tp->tx_cons;
2998
2999         while (sw_idx != hw_idx) {
3000                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001                 struct sk_buff *skb = ri->skb;
3002                 int i, tx_bug = 0;
3003
3004                 if (unlikely(skb == NULL)) {
3005                         tg3_tx_recover(tp);
3006                         return;
3007                 }
3008
3009                 pci_unmap_single(tp->pdev,
3010                                  pci_unmap_addr(ri, mapping),
3011                                  skb_headlen(skb),
3012                                  PCI_DMA_TODEVICE);
3013
3014                 ri->skb = NULL;
3015
3016                 sw_idx = NEXT_TX(sw_idx);
3017
3018                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3019                         ri = &tp->tx_buffers[sw_idx];
3020                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3021                                 tx_bug = 1;
3022
3023                         pci_unmap_page(tp->pdev,
3024                                        pci_unmap_addr(ri, mapping),
3025                                        skb_shinfo(skb)->frags[i].size,
3026                                        PCI_DMA_TODEVICE);
3027
3028                         sw_idx = NEXT_TX(sw_idx);
3029                 }
3030
3031                 dev_kfree_skb(skb);
3032
3033                 if (unlikely(tx_bug)) {
3034                         tg3_tx_recover(tp);
3035                         return;
3036                 }
3037         }
3038
3039         tp->tx_cons = sw_idx;
3040
3041         if (unlikely(netif_queue_stopped(tp->dev))) {
3042                 spin_lock(&tp->tx_lock);
3043                 if (netif_queue_stopped(tp->dev) &&
3044                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045                         netif_wake_queue(tp->dev);
3046                 spin_unlock(&tp->tx_lock);
3047         }
3048 }
3049
3050 /* Returns size of skb allocated or < 0 on error.
3051  *
3052  * We only need to fill in the address because the other members
3053  * of the RX descriptor are invariant, see tg3_init_rings.
3054  *
3055  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3056  * posting buffers we only dirty the first cache line of the RX
3057  * descriptor (containing the address).  Whereas for the RX status
3058  * buffers the cpu only reads the last cacheline of the RX descriptor
3059  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3060  */
3061 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062                             int src_idx, u32 dest_idx_unmasked)
3063 {
3064         struct tg3_rx_buffer_desc *desc;
3065         struct ring_info *map, *src_map;
3066         struct sk_buff *skb;
3067         dma_addr_t mapping;
3068         int skb_size, dest_idx;
3069
3070         src_map = NULL;
3071         switch (opaque_key) {
3072         case RXD_OPAQUE_RING_STD:
3073                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074                 desc = &tp->rx_std[dest_idx];
3075                 map = &tp->rx_std_buffers[dest_idx];
3076                 if (src_idx >= 0)
3077                         src_map = &tp->rx_std_buffers[src_idx];
3078                 skb_size = tp->rx_pkt_buf_sz;
3079                 break;
3080
3081         case RXD_OPAQUE_RING_JUMBO:
3082                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083                 desc = &tp->rx_jumbo[dest_idx];
3084                 map = &tp->rx_jumbo_buffers[dest_idx];
3085                 if (src_idx >= 0)
3086                         src_map = &tp->rx_jumbo_buffers[src_idx];
3087                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3088                 break;
3089
3090         default:
3091                 return -EINVAL;
3092         };
3093
3094         /* Do not overwrite any of the map or rp information
3095          * until we are sure we can commit to a new buffer.
3096          *
3097          * Callers depend upon this behavior and assume that
3098          * we leave everything unchanged if we fail.
3099          */
3100         skb = dev_alloc_skb(skb_size);
3101         if (skb == NULL)
3102                 return -ENOMEM;
3103
3104         skb->dev = tp->dev;
3105         skb_reserve(skb, tp->rx_offset);
3106
3107         mapping = pci_map_single(tp->pdev, skb->data,
3108                                  skb_size - tp->rx_offset,
3109                                  PCI_DMA_FROMDEVICE);
3110
3111         map->skb = skb;
3112         pci_unmap_addr_set(map, mapping, mapping);
3113
3114         if (src_map != NULL)
3115                 src_map->skb = NULL;
3116
3117         desc->addr_hi = ((u64)mapping >> 32);
3118         desc->addr_lo = ((u64)mapping & 0xffffffff);
3119
3120         return skb_size;
3121 }
3122
3123 /* We only need to move over in the address because the other
3124  * members of the RX descriptor are invariant.  See notes above
3125  * tg3_alloc_rx_skb for full details.
3126  */
3127 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3128                            int src_idx, u32 dest_idx_unmasked)
3129 {
3130         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3131         struct ring_info *src_map, *dest_map;
3132         int dest_idx;
3133
3134         switch (opaque_key) {
3135         case RXD_OPAQUE_RING_STD:
3136                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3137                 dest_desc = &tp->rx_std[dest_idx];
3138                 dest_map = &tp->rx_std_buffers[dest_idx];
3139                 src_desc = &tp->rx_std[src_idx];
3140                 src_map = &tp->rx_std_buffers[src_idx];
3141                 break;
3142
3143         case RXD_OPAQUE_RING_JUMBO:
3144                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3145                 dest_desc = &tp->rx_jumbo[dest_idx];
3146                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3147                 src_desc = &tp->rx_jumbo[src_idx];
3148                 src_map = &tp->rx_jumbo_buffers[src_idx];
3149                 break;
3150
3151         default:
3152                 return;
3153         };
3154
3155         dest_map->skb = src_map->skb;
3156         pci_unmap_addr_set(dest_map, mapping,
3157                            pci_unmap_addr(src_map, mapping));
3158         dest_desc->addr_hi = src_desc->addr_hi;
3159         dest_desc->addr_lo = src_desc->addr_lo;
3160
3161         src_map->skb = NULL;
3162 }
3163
3164 #if TG3_VLAN_TAG_USED
3165 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3166 {
3167         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3168 }
3169 #endif
3170
3171 /* The RX ring scheme is composed of multiple rings which post fresh
3172  * buffers to the chip, and one special ring the chip uses to report
3173  * status back to the host.
3174  *
3175  * The special ring reports the status of received packets to the
3176  * host.  The chip does not write into the original descriptor the
3177  * RX buffer was obtained from.  The chip simply takes the original
3178  * descriptor as provided by the host, updates the status and length
3179  * field, then writes this into the next status ring entry.
3180  *
3181  * Each ring the host uses to post buffers to the chip is described
3182  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3183  * it is first placed into the on-chip ram.  When the packet's length
3184  * is known, it walks down the TG3_BDINFO entries to select the ring.
3185  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3186  * which is within the range of the new packet's length is chosen.
3187  *
3188  * The "separate ring for rx status" scheme may sound queer, but it makes
3189  * sense from a cache coherency perspective.  If only the host writes
3190  * to the buffer post rings, and only the chip writes to the rx status
3191  * rings, then cache lines never move beyond shared-modified state.
3192  * If both the host and chip were to write into the same ring, cache line
3193  * eviction could occur since both entities want it in an exclusive state.
3194  */
3195 static int tg3_rx(struct tg3 *tp, int budget)
3196 {
3197         u32 work_mask, rx_std_posted = 0;
3198         u32 sw_idx = tp->rx_rcb_ptr;
3199         u16 hw_idx;
3200         int received;
3201
3202         hw_idx = tp->hw_status->idx[0].rx_producer;
3203         /*
3204          * We need to order the read of hw_idx and the read of
3205          * the opaque cookie.
3206          */
3207         rmb();
3208         work_mask = 0;
3209         received = 0;
3210         while (sw_idx != hw_idx && budget > 0) {
3211                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3212                 unsigned int len;
3213                 struct sk_buff *skb;
3214                 dma_addr_t dma_addr;
3215                 u32 opaque_key, desc_idx, *post_ptr;
3216
3217                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3218                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3219                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3220                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3221                                                   mapping);
3222                         skb = tp->rx_std_buffers[desc_idx].skb;
3223                         post_ptr = &tp->rx_std_ptr;
3224                         rx_std_posted++;
3225                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227                                                   mapping);
3228                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229                         post_ptr = &tp->rx_jumbo_ptr;
3230                 }
3231                 else {
3232                         goto next_pkt_nopost;
3233                 }
3234
3235                 work_mask |= opaque_key;
3236
3237                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239                 drop_it:
3240                         tg3_recycle_rx(tp, opaque_key,
3241                                        desc_idx, *post_ptr);
3242                 drop_it_no_recycle:
3243                         /* Other statistics kept track of by card. */
3244                         tp->net_stats.rx_dropped++;
3245                         goto next_pkt;
3246                 }
3247
3248                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249
3250                 if (len > RX_COPY_THRESHOLD 
3251                         && tp->rx_offset == 2
3252                         /* rx_offset != 2 iff this is a 5701 card running
3253                          * in PCI-X mode [see tg3_get_invariants()] */
3254                 ) {
3255                         int skb_size;
3256
3257                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258                                                     desc_idx, *post_ptr);
3259                         if (skb_size < 0)
3260                                 goto drop_it;
3261
3262                         pci_unmap_single(tp->pdev, dma_addr,
3263                                          skb_size - tp->rx_offset,
3264                                          PCI_DMA_FROMDEVICE);
3265
3266                         skb_put(skb, len);
3267                 } else {
3268                         struct sk_buff *copy_skb;
3269
3270                         tg3_recycle_rx(tp, opaque_key,
3271                                        desc_idx, *post_ptr);
3272
3273                         copy_skb = dev_alloc_skb(len + 2);
3274                         if (copy_skb == NULL)
3275                                 goto drop_it_no_recycle;
3276
3277                         copy_skb->dev = tp->dev;
3278                         skb_reserve(copy_skb, 2);
3279                         skb_put(copy_skb, len);
3280                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281                         memcpy(copy_skb->data, skb->data, len);
3282                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283
3284                         /* We'll reuse the original ring buffer. */
3285                         skb = copy_skb;
3286                 }
3287
3288                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3293                 else
3294                         skb->ip_summed = CHECKSUM_NONE;
3295
3296                 skb->protocol = eth_type_trans(skb, tp->dev);
3297 #if TG3_VLAN_TAG_USED
3298                 if (tp->vlgrp != NULL &&
3299                     desc->type_flags & RXD_FLAG_VLAN) {
3300                         tg3_vlan_rx(tp, skb,
3301                                     desc->err_vlan & RXD_VLAN_MASK);
3302                 } else
3303 #endif
3304                         netif_receive_skb(skb);
3305
3306                 tp->dev->last_rx = jiffies;
3307                 received++;
3308                 budget--;
3309
3310 next_pkt:
3311                 (*post_ptr)++;
3312
3313                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3314                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3315
3316                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3317                                      TG3_64BIT_REG_LOW, idx);
3318                         work_mask &= ~RXD_OPAQUE_RING_STD;
3319                         rx_std_posted = 0;
3320                 }
3321 next_pkt_nopost:
3322                 sw_idx++;
3323                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3324
3325                 /* Refresh hw_idx to see if there is new work */
3326                 if (sw_idx == hw_idx) {
3327                         hw_idx = tp->hw_status->idx[0].rx_producer;
3328                         rmb();
3329                 }
3330         }
3331
3332         /* ACK the status ring. */
3333         tp->rx_rcb_ptr = sw_idx;
3334         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3335
3336         /* Refill RX ring(s). */
3337         if (work_mask & RXD_OPAQUE_RING_STD) {
3338                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3339                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3340                              sw_idx);
3341         }
3342         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3343                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3344                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3345                              sw_idx);
3346         }
3347         mmiowb();
3348
3349         return received;
3350 }
3351
3352 static int tg3_poll(struct net_device *netdev, int *budget)
3353 {
3354         struct tg3 *tp = netdev_priv(netdev);
3355         struct tg3_hw_status *sblk = tp->hw_status;
3356         int done;
3357
3358         /* handle link change and other phy events */
3359         if (!(tp->tg3_flags &
3360               (TG3_FLAG_USE_LINKCHG_REG |
3361                TG3_FLAG_POLL_SERDES))) {
3362                 if (sblk->status & SD_STATUS_LINK_CHG) {
3363                         sblk->status = SD_STATUS_UPDATED |
3364                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3365                         spin_lock(&tp->lock);
3366                         tg3_setup_phy(tp, 0);
3367                         spin_unlock(&tp->lock);
3368                 }
3369         }
3370
3371         /* run TX completion thread */
3372         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3373                 tg3_tx(tp);
3374                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3375                         netif_rx_complete(netdev);
3376                         schedule_work(&tp->reset_task);
3377                         return 0;
3378                 }
3379         }
3380
3381         /* run RX thread, within the bounds set by NAPI.
3382          * All RX "locking" is done by ensuring outside
3383          * code synchronizes with dev->poll()
3384          */
3385         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3386                 int orig_budget = *budget;
3387                 int work_done;
3388
3389                 if (orig_budget > netdev->quota)
3390                         orig_budget = netdev->quota;
3391
3392                 work_done = tg3_rx(tp, orig_budget);
3393
3394                 *budget -= work_done;
3395                 netdev->quota -= work_done;
3396         }
3397
3398         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3399                 tp->last_tag = sblk->status_tag;
3400                 rmb();
3401         } else
3402                 sblk->status &= ~SD_STATUS_UPDATED;
3403
3404         /* if no more work, tell net stack and NIC we're done */
3405         done = !tg3_has_work(tp);
3406         if (done) {
3407                 netif_rx_complete(netdev);
3408                 tg3_restart_ints(tp);
3409         }
3410
3411         return (done ? 0 : 1);
3412 }
3413
3414 static void tg3_irq_quiesce(struct tg3 *tp)
3415 {
3416         BUG_ON(tp->irq_sync);
3417
3418         tp->irq_sync = 1;
3419         smp_mb();
3420
3421         synchronize_irq(tp->pdev->irq);
3422 }
3423
3424 static inline int tg3_irq_sync(struct tg3 *tp)
3425 {
3426         return tp->irq_sync;
3427 }
3428
3429 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3430  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3431  * with as well.  Most of the time, this is not necessary except when
3432  * shutting down the device.
3433  */
3434 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3435 {
3436         if (irq_sync)
3437                 tg3_irq_quiesce(tp);
3438         spin_lock_bh(&tp->lock);
3439 }
3440
3441 static inline void tg3_full_unlock(struct tg3 *tp)
3442 {
3443         spin_unlock_bh(&tp->lock);
3444 }
3445
3446 /* One-shot MSI handler - Chip automatically disables interrupt
3447  * after sending MSI so driver doesn't have to do it.
3448  */
3449 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3450 {
3451         struct net_device *dev = dev_id;
3452         struct tg3 *tp = netdev_priv(dev);
3453
3454         prefetch(tp->hw_status);
3455         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3456
3457         if (likely(!tg3_irq_sync(tp)))
3458                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3459
3460         return IRQ_HANDLED;
3461 }
3462
3463 /* MSI ISR - No need to check for interrupt sharing and no need to
3464  * flush status block and interrupt mailbox. PCI ordering rules
3465  * guarantee that MSI will arrive after the status block.
3466  */
3467 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3468 {
3469         struct net_device *dev = dev_id;
3470         struct tg3 *tp = netdev_priv(dev);
3471
3472         prefetch(tp->hw_status);
3473         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3474         /*
3475          * Writing any value to intr-mbox-0 clears PCI INTA# and
3476          * chip-internal interrupt pending events.
3477          * Writing non-zero to intr-mbox-0 additional tells the
3478          * NIC to stop sending us irqs, engaging "in-intr-handler"
3479          * event coalescing.
3480          */
3481         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3482         if (likely(!tg3_irq_sync(tp)))
3483                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3484
3485         return IRQ_RETVAL(1);
3486 }
3487
3488 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3489 {
3490         struct net_device *dev = dev_id;
3491         struct tg3 *tp = netdev_priv(dev);
3492         struct tg3_hw_status *sblk = tp->hw_status;
3493         unsigned int handled = 1;
3494
3495         /* In INTx mode, it is possible for the interrupt to arrive at
3496          * the CPU before the status block posted prior to the interrupt.
3497          * Reading the PCI State register will confirm whether the
3498          * interrupt is ours and will flush the status block.
3499          */
3500         if ((sblk->status & SD_STATUS_UPDATED) ||
3501             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3502                 /*
3503                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3504                  * chip-internal interrupt pending events.
3505                  * Writing non-zero to intr-mbox-0 additional tells the
3506                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3507                  * event coalescing.
3508                  */
3509                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3510                              0x00000001);
3511                 if (tg3_irq_sync(tp))
3512                         goto out;
3513                 sblk->status &= ~SD_STATUS_UPDATED;
3514                 if (likely(tg3_has_work(tp))) {
3515                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3516                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3517                 } else {
3518                         /* No work, shared interrupt perhaps?  re-enable
3519                          * interrupts, and flush that PCI write
3520                          */
3521                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3522                                 0x00000000);
3523                 }
3524         } else {        /* shared interrupt */
3525                 handled = 0;
3526         }
3527 out:
3528         return IRQ_RETVAL(handled);
3529 }
3530
3531 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3532 {
3533         struct net_device *dev = dev_id;
3534         struct tg3 *tp = netdev_priv(dev);
3535         struct tg3_hw_status *sblk = tp->hw_status;
3536         unsigned int handled = 1;
3537
3538         /* In INTx mode, it is possible for the interrupt to arrive at
3539          * the CPU before the status block posted prior to the interrupt.
3540          * Reading the PCI State register will confirm whether the
3541          * interrupt is ours and will flush the status block.
3542          */
3543         if ((sblk->status_tag != tp->last_tag) ||
3544             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3545                 /*
3546                  * writing any value to intr-mbox-0 clears PCI INTA# and
3547                  * chip-internal interrupt pending events.
3548                  * writing non-zero to intr-mbox-0 additional tells the
3549                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3550                  * event coalescing.
3551                  */
3552                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3553                              0x00000001);
3554                 if (tg3_irq_sync(tp))
3555                         goto out;
3556                 if (netif_rx_schedule_prep(dev)) {
3557                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3558                         /* Update last_tag to mark that this status has been
3559                          * seen. Because interrupt may be shared, we may be
3560                          * racing with tg3_poll(), so only update last_tag
3561                          * if tg3_poll() is not scheduled.
3562                          */
3563                         tp->last_tag = sblk->status_tag;
3564                         __netif_rx_schedule(dev);
3565                 }
3566         } else {        /* shared interrupt */
3567                 handled = 0;
3568         }
3569 out:
3570         return IRQ_RETVAL(handled);
3571 }
3572
3573 /* ISR for interrupt test */
3574 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3575                 struct pt_regs *regs)
3576 {
3577         struct net_device *dev = dev_id;
3578         struct tg3 *tp = netdev_priv(dev);
3579         struct tg3_hw_status *sblk = tp->hw_status;
3580
3581         if ((sblk->status & SD_STATUS_UPDATED) ||
3582             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3583                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3584                              0x00000001);
3585                 return IRQ_RETVAL(1);
3586         }
3587         return IRQ_RETVAL(0);
3588 }
3589
3590 static int tg3_init_hw(struct tg3 *, int);
3591 static int tg3_halt(struct tg3 *, int, int);
3592
3593 /* Restart hardware after configuration changes, self-test, etc.
3594  * Invoked with tp->lock held.
3595  */
3596 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597 {
3598         int err;
3599
3600         err = tg3_init_hw(tp, reset_phy);
3601         if (err) {
3602                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603                        "aborting.\n", tp->dev->name);
3604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605                 tg3_full_unlock(tp);
3606                 del_timer_sync(&tp->timer);
3607                 tp->irq_sync = 0;
3608                 netif_poll_enable(tp->dev);
3609                 dev_close(tp->dev);
3610                 tg3_full_lock(tp, 0);
3611         }
3612         return err;
3613 }
3614
3615 #ifdef CONFIG_NET_POLL_CONTROLLER
3616 static void tg3_poll_controller(struct net_device *dev)
3617 {
3618         struct tg3 *tp = netdev_priv(dev);
3619
3620         tg3_interrupt(tp->pdev->irq, dev, NULL);
3621 }
3622 #endif
3623
3624 static void tg3_reset_task(void *_data)
3625 {
3626         struct tg3 *tp = _data;
3627         unsigned int restart_timer;
3628
3629         tg3_full_lock(tp, 0);
3630         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3631
3632         if (!netif_running(tp->dev)) {
3633                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3634                 tg3_full_unlock(tp);
3635                 return;
3636         }
3637
3638         tg3_full_unlock(tp);
3639
3640         tg3_netif_stop(tp);
3641
3642         tg3_full_lock(tp, 1);
3643
3644         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3645         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3646
3647         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3648                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3649                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3650                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3651                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3652         }
3653
3654         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3655         if (tg3_init_hw(tp, 1))
3656                 goto out;
3657
3658         tg3_netif_start(tp);
3659
3660         if (restart_timer)
3661                 mod_timer(&tp->timer, jiffies + 1);
3662
3663 out:
3664         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3665
3666         tg3_full_unlock(tp);
3667 }
3668
3669 static void tg3_tx_timeout(struct net_device *dev)
3670 {
3671         struct tg3 *tp = netdev_priv(dev);
3672
3673         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3674                dev->name);
3675
3676         schedule_work(&tp->reset_task);
3677 }
3678
3679 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3680 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3681 {
3682         u32 base = (u32) mapping & 0xffffffff;
3683
3684         return ((base > 0xffffdcc0) &&
3685                 (base + len + 8 < base));
3686 }
3687
3688 /* Test for DMA addresses > 40-bit */
3689 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3690                                           int len)
3691 {
3692 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3693         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3694                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3695         return 0;
3696 #else
3697         return 0;
3698 #endif
3699 }
3700
3701 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3702
3703 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3704 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3705                                        u32 last_plus_one, u32 *start,
3706                                        u32 base_flags, u32 mss)
3707 {
3708         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3709         dma_addr_t new_addr = 0;
3710         u32 entry = *start;
3711         int i, ret = 0;
3712
3713         if (!new_skb) {
3714                 ret = -1;
3715         } else {
3716                 /* New SKB is guaranteed to be linear. */
3717                 entry = *start;
3718                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3719                                           PCI_DMA_TODEVICE);
3720                 /* Make sure new skb does not cross any 4G boundaries.
3721                  * Drop the packet if it does.
3722                  */
3723                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3724                         ret = -1;
3725                         dev_kfree_skb(new_skb);
3726                         new_skb = NULL;
3727                 } else {
3728                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3729                                     base_flags, 1 | (mss << 1));
3730                         *start = NEXT_TX(entry);
3731                 }
3732         }
3733
3734         /* Now clean up the sw ring entries. */
3735         i = 0;
3736         while (entry != last_plus_one) {
3737                 int len;
3738
3739                 if (i == 0)
3740                         len = skb_headlen(skb);
3741                 else
3742                         len = skb_shinfo(skb)->frags[i-1].size;
3743                 pci_unmap_single(tp->pdev,
3744                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3745                                  len, PCI_DMA_TODEVICE);
3746                 if (i == 0) {
3747                         tp->tx_buffers[entry].skb = new_skb;
3748                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3749                 } else {
3750                         tp->tx_buffers[entry].skb = NULL;
3751                 }
3752                 entry = NEXT_TX(entry);
3753                 i++;
3754         }
3755
3756         dev_kfree_skb(skb);
3757
3758         return ret;
3759 }
3760
3761 static void tg3_set_txd(struct tg3 *tp, int entry,
3762                         dma_addr_t mapping, int len, u32 flags,
3763                         u32 mss_and_is_end)
3764 {
3765         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3766         int is_end = (mss_and_is_end & 0x1);
3767         u32 mss = (mss_and_is_end >> 1);
3768         u32 vlan_tag = 0;
3769
3770         if (is_end)
3771                 flags |= TXD_FLAG_END;
3772         if (flags & TXD_FLAG_VLAN) {
3773                 vlan_tag = flags >> 16;
3774                 flags &= 0xffff;
3775         }
3776         vlan_tag |= (mss << TXD_MSS_SHIFT);
3777
3778         txd->addr_hi = ((u64) mapping >> 32);
3779         txd->addr_lo = ((u64) mapping & 0xffffffff);
3780         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3781         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3782 }
3783
3784 /* hard_start_xmit for devices that don't have any bugs and
3785  * support TG3_FLG2_HW_TSO_2 only.
3786  */
3787 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3788 {
3789         struct tg3 *tp = netdev_priv(dev);
3790         dma_addr_t mapping;
3791         u32 len, entry, base_flags, mss;
3792
3793         len = skb_headlen(skb);
3794
3795         /* We are running in BH disabled context with netif_tx_lock
3796          * and TX reclaim runs via tp->poll inside of a software
3797          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3798          * no IRQ context deadlocks to worry about either.  Rejoice!
3799          */
3800         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3801                 if (!netif_queue_stopped(dev)) {
3802                         netif_stop_queue(dev);
3803
3804                         /* This is a hard error, log it. */
3805                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3806                                "queue awake!\n", dev->name);
3807                 }
3808                 return NETDEV_TX_BUSY;
3809         }
3810
3811         entry = tp->tx_prod;
3812         base_flags = 0;
3813 #if TG3_TSO_SUPPORT != 0
3814         mss = 0;
3815         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3816             (mss = skb_shinfo(skb)->gso_size) != 0) {
3817                 int tcp_opt_len, ip_tcp_len;
3818
3819                 if (skb_header_cloned(skb) &&
3820                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3821                         dev_kfree_skb(skb);
3822                         goto out_unlock;
3823                 }
3824
3825                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3826                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3827                 else {
3828                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3829                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3830                                      sizeof(struct tcphdr);
3831
3832                         skb->nh.iph->check = 0;
3833                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3834                                                      tcp_opt_len);
3835                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3836                 }
3837
3838                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3839                                TXD_FLAG_CPU_POST_DMA);
3840
3841                 skb->h.th->check = 0;
3842
3843         }
3844         else if (skb->ip_summed == CHECKSUM_HW)
3845                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3846 #else
3847         mss = 0;
3848         if (skb->ip_summed == CHECKSUM_HW)
3849                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3850 #endif
3851 #if TG3_VLAN_TAG_USED
3852         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3853                 base_flags |= (TXD_FLAG_VLAN |
3854                                (vlan_tx_tag_get(skb) << 16));
3855 #endif
3856
3857         /* Queue skb data, a.k.a. the main skb fragment. */
3858         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3859
3860         tp->tx_buffers[entry].skb = skb;
3861         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3862
3863         tg3_set_txd(tp, entry, mapping, len, base_flags,
3864                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3865
3866         entry = NEXT_TX(entry);
3867
3868         /* Now loop through additional data fragments, and queue them. */
3869         if (skb_shinfo(skb)->nr_frags > 0) {
3870                 unsigned int i, last;
3871
3872                 last = skb_shinfo(skb)->nr_frags - 1;
3873                 for (i = 0; i <= last; i++) {
3874                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3875
3876                         len = frag->size;
3877                         mapping = pci_map_page(tp->pdev,
3878                                                frag->page,
3879                                                frag->page_offset,
3880                                                len, PCI_DMA_TODEVICE);
3881
3882                         tp->tx_buffers[entry].skb = NULL;
3883                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3884
3885                         tg3_set_txd(tp, entry, mapping, len,
3886                                     base_flags, (i == last) | (mss << 1));
3887
3888                         entry = NEXT_TX(entry);
3889                 }
3890         }
3891
3892         /* Packets are ready, update Tx producer idx local and on card. */
3893         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3894
3895         tp->tx_prod = entry;
3896         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3897                 spin_lock(&tp->tx_lock);
3898                 netif_stop_queue(dev);
3899                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3900                         netif_wake_queue(tp->dev);
3901                 spin_unlock(&tp->tx_lock);
3902         }
3903
3904 out_unlock:
3905         mmiowb();
3906
3907         dev->trans_start = jiffies;
3908
3909         return NETDEV_TX_OK;
3910 }
3911
3912 #if TG3_TSO_SUPPORT != 0
3913 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3914
3915 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3916  * TSO header is greater than 80 bytes.
3917  */
3918 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3919 {
3920         struct sk_buff *segs, *nskb;
3921
3922         /* Estimate the number of fragments in the worst case */
3923         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3924                 netif_stop_queue(tp->dev);
3925                 return NETDEV_TX_BUSY;
3926         }
3927
3928         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3929         if (unlikely(IS_ERR(segs)))
3930                 goto tg3_tso_bug_end;
3931
3932         do {
3933                 nskb = segs;
3934                 segs = segs->next;
3935                 nskb->next = NULL;
3936                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3937         } while (segs);
3938
3939 tg3_tso_bug_end:
3940         dev_kfree_skb(skb);
3941
3942         return NETDEV_TX_OK;
3943 }
3944 #endif
3945
3946 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3947  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3948  */
3949 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3950 {
3951         struct tg3 *tp = netdev_priv(dev);
3952         dma_addr_t mapping;
3953         u32 len, entry, base_flags, mss;
3954         int would_hit_hwbug;
3955
3956         len = skb_headlen(skb);
3957
3958         /* We are running in BH disabled context with netif_tx_lock
3959          * and TX reclaim runs via tp->poll inside of a software
3960          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3961          * no IRQ context deadlocks to worry about either.  Rejoice!
3962          */
3963         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3964                 if (!netif_queue_stopped(dev)) {
3965                         netif_stop_queue(dev);
3966
3967                         /* This is a hard error, log it. */
3968                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3969                                "queue awake!\n", dev->name);
3970                 }
3971                 return NETDEV_TX_BUSY;
3972         }
3973
3974         entry = tp->tx_prod;
3975         base_flags = 0;
3976         if (skb->ip_summed == CHECKSUM_HW)
3977                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3978 #if TG3_TSO_SUPPORT != 0
3979         mss = 0;
3980         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3981             (mss = skb_shinfo(skb)->gso_size) != 0) {
3982                 int tcp_opt_len, ip_tcp_len, hdr_len;
3983
3984                 if (skb_header_cloned(skb) &&
3985                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3986                         dev_kfree_skb(skb);
3987                         goto out_unlock;
3988                 }
3989
3990                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3991                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3992
3993                 hdr_len = ip_tcp_len + tcp_opt_len;
3994                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3995                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3996                         return (tg3_tso_bug(tp, skb));
3997
3998                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3999                                TXD_FLAG_CPU_POST_DMA);
4000
4001                 skb->nh.iph->check = 0;
4002                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4003                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4004                         skb->h.th->check = 0;
4005                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4006                 }
4007                 else {
4008                         skb->h.th->check =
4009                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4010                                                    skb->nh.iph->daddr,
4011                                                    0, IPPROTO_TCP, 0);
4012                 }
4013
4014                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4015                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4016                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4017                                 int tsflags;
4018
4019                                 tsflags = ((skb->nh.iph->ihl - 5) +
4020                                            (tcp_opt_len >> 2));
4021                                 mss |= (tsflags << 11);
4022                         }
4023                 } else {
4024                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4025                                 int tsflags;
4026
4027                                 tsflags = ((skb->nh.iph->ihl - 5) +
4028                                            (tcp_opt_len >> 2));
4029                                 base_flags |= tsflags << 12;
4030                         }
4031                 }
4032         }
4033 #else
4034         mss = 0;
4035 #endif
4036 #if TG3_VLAN_TAG_USED
4037         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4038                 base_flags |= (TXD_FLAG_VLAN |
4039                                (vlan_tx_tag_get(skb) << 16));
4040 #endif
4041
4042         /* Queue skb data, a.k.a. the main skb fragment. */
4043         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4044
4045         tp->tx_buffers[entry].skb = skb;
4046         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4047
4048         would_hit_hwbug = 0;
4049
4050         if (tg3_4g_overflow_test(mapping, len))
4051                 would_hit_hwbug = 1;
4052
4053         tg3_set_txd(tp, entry, mapping, len, base_flags,
4054                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4055
4056         entry = NEXT_TX(entry);
4057
4058         /* Now loop through additional data fragments, and queue them. */
4059         if (skb_shinfo(skb)->nr_frags > 0) {
4060                 unsigned int i, last;
4061
4062                 last = skb_shinfo(skb)->nr_frags - 1;
4063                 for (i = 0; i <= last; i++) {
4064                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4065
4066                         len = frag->size;
4067                         mapping = pci_map_page(tp->pdev,
4068                                                frag->page,
4069                                                frag->page_offset,
4070                                                len, PCI_DMA_TODEVICE);
4071
4072                         tp->tx_buffers[entry].skb = NULL;
4073                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4074
4075                         if (tg3_4g_overflow_test(mapping, len))
4076                                 would_hit_hwbug = 1;
4077
4078                         if (tg3_40bit_overflow_test(tp, mapping, len))
4079                                 would_hit_hwbug = 1;
4080
4081                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4082                                 tg3_set_txd(tp, entry, mapping, len,
4083                                             base_flags, (i == last)|(mss << 1));
4084                         else
4085                                 tg3_set_txd(tp, entry, mapping, len,
4086                                             base_flags, (i == last));
4087
4088                         entry = NEXT_TX(entry);
4089                 }
4090         }
4091
4092         if (would_hit_hwbug) {
4093                 u32 last_plus_one = entry;
4094                 u32 start;
4095
4096                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4097                 start &= (TG3_TX_RING_SIZE - 1);
4098
4099                 /* If the workaround fails due to memory/mapping
4100                  * failure, silently drop this packet.
4101                  */
4102                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4103                                                 &start, base_flags, mss))
4104                         goto out_unlock;
4105
4106                 entry = start;
4107         }
4108
4109         /* Packets are ready, update Tx producer idx local and on card. */
4110         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4111
4112         tp->tx_prod = entry;
4113         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4114                 spin_lock(&tp->tx_lock);
4115                 netif_stop_queue(dev);
4116                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4117                         netif_wake_queue(tp->dev);
4118                 spin_unlock(&tp->tx_lock);
4119         }
4120
4121 out_unlock:
4122         mmiowb();
4123
4124         dev->trans_start = jiffies;
4125
4126         return NETDEV_TX_OK;
4127 }
4128
4129 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4130                                int new_mtu)
4131 {
4132         dev->mtu = new_mtu;
4133
4134         if (new_mtu > ETH_DATA_LEN) {
4135                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4136                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4137                         ethtool_op_set_tso(dev, 0);
4138                 }
4139                 else
4140                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4141         } else {
4142                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4143                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4144                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4145         }
4146 }
4147
4148 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4149 {
4150         struct tg3 *tp = netdev_priv(dev);
4151         int err;
4152
4153         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4154                 return -EINVAL;
4155
4156         if (!netif_running(dev)) {
4157                 /* We'll just catch it later when the
4158                  * device is up'd.
4159                  */
4160                 tg3_set_mtu(dev, tp, new_mtu);
4161                 return 0;
4162         }
4163
4164         tg3_netif_stop(tp);
4165
4166         tg3_full_lock(tp, 1);
4167
4168         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4169
4170         tg3_set_mtu(dev, tp, new_mtu);
4171
4172         err = tg3_restart_hw(tp, 0);
4173
4174         if (!err)
4175                 tg3_netif_start(tp);
4176
4177         tg3_full_unlock(tp);
4178
4179         return err;
4180 }
4181
4182 /* Free up pending packets in all rx/tx rings.
4183  *
4184  * The chip has been shut down and the driver detached from
4185  * the networking, so no interrupts or new tx packets will
4186  * end up in the driver.  tp->{tx,}lock is not held and we are not
4187  * in an interrupt context and thus may sleep.
4188  */
4189 static void tg3_free_rings(struct tg3 *tp)
4190 {
4191         struct ring_info *rxp;
4192         int i;
4193
4194         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4195                 rxp = &tp->rx_std_buffers[i];
4196
4197                 if (rxp->skb == NULL)
4198                         continue;
4199                 pci_unmap_single(tp->pdev,
4200                                  pci_unmap_addr(rxp, mapping),
4201                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4202                                  PCI_DMA_FROMDEVICE);
4203                 dev_kfree_skb_any(rxp->skb);
4204                 rxp->skb = NULL;
4205         }
4206
4207         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4208                 rxp = &tp->rx_jumbo_buffers[i];
4209
4210                 if (rxp->skb == NULL)
4211                         continue;
4212                 pci_unmap_single(tp->pdev,
4213                                  pci_unmap_addr(rxp, mapping),
4214                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4215                                  PCI_DMA_FROMDEVICE);
4216                 dev_kfree_skb_any(rxp->skb);
4217                 rxp->skb = NULL;
4218         }
4219
4220         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4221                 struct tx_ring_info *txp;
4222                 struct sk_buff *skb;
4223                 int j;
4224
4225                 txp = &tp->tx_buffers[i];
4226                 skb = txp->skb;
4227
4228                 if (skb == NULL) {
4229                         i++;
4230                         continue;
4231                 }
4232
4233                 pci_unmap_single(tp->pdev,
4234                                  pci_unmap_addr(txp, mapping),
4235                                  skb_headlen(skb),
4236                                  PCI_DMA_TODEVICE);
4237                 txp->skb = NULL;
4238
4239                 i++;
4240
4241                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4242                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4243                         pci_unmap_page(tp->pdev,
4244                                        pci_unmap_addr(txp, mapping),
4245                                        skb_shinfo(skb)->frags[j].size,
4246                                        PCI_DMA_TODEVICE);
4247                         i++;
4248                 }
4249
4250                 dev_kfree_skb_any(skb);
4251         }
4252 }
4253
4254 /* Initialize tx/rx rings for packet processing.
4255  *
4256  * The chip has been shut down and the driver detached from
4257  * the networking, so no interrupts or new tx packets will
4258  * end up in the driver.  tp->{tx,}lock are held and thus
4259  * we may not sleep.
4260  */
4261 static void tg3_init_rings(struct tg3 *tp)
4262 {
4263         u32 i;
4264
4265         /* Free up all the SKBs. */
4266         tg3_free_rings(tp);
4267
4268         /* Zero out all descriptors. */
4269         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4270         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4271         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4272         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4273
4274         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4275         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4276             (tp->dev->mtu > ETH_DATA_LEN))
4277                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4278
4279         /* Initialize invariants of the rings, we only set this
4280          * stuff once.  This works because the card does not
4281          * write into the rx buffer posting rings.
4282          */
4283         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4284                 struct tg3_rx_buffer_desc *rxd;
4285
4286                 rxd = &tp->rx_std[i];
4287                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4288                         << RXD_LEN_SHIFT;
4289                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4290                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4291                                (i << RXD_OPAQUE_INDEX_SHIFT));
4292         }
4293
4294         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4295                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4296                         struct tg3_rx_buffer_desc *rxd;
4297
4298                         rxd = &tp->rx_jumbo[i];
4299                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4300                                 << RXD_LEN_SHIFT;
4301                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4302                                 RXD_FLAG_JUMBO;
4303                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4304                                (i << RXD_OPAQUE_INDEX_SHIFT));
4305                 }
4306         }
4307
4308         /* Now allocate fresh SKBs for each rx ring. */
4309         for (i = 0; i < tp->rx_pending; i++) {
4310                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4311                                      -1, i) < 0)
4312                         break;
4313         }
4314
4315         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4316                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4317                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4318                                              -1, i) < 0)
4319                                 break;
4320                 }
4321         }
4322 }
4323
4324 /*
4325  * Must not be invoked with interrupt sources disabled and
4326  * the hardware shutdown down.
4327  */
4328 static void tg3_free_consistent(struct tg3 *tp)
4329 {
4330         kfree(tp->rx_std_buffers);
4331         tp->rx_std_buffers = NULL;
4332         if (tp->rx_std) {
4333                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4334                                     tp->rx_std, tp->rx_std_mapping);
4335                 tp->rx_std = NULL;
4336         }
4337         if (tp->rx_jumbo) {
4338                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4339                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4340                 tp->rx_jumbo = NULL;
4341         }
4342         if (tp->rx_rcb) {
4343                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4344                                     tp->rx_rcb, tp->rx_rcb_mapping);
4345                 tp->rx_rcb = NULL;
4346         }
4347         if (tp->tx_ring) {
4348                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4349                         tp->tx_ring, tp->tx_desc_mapping);
4350                 tp->tx_ring = NULL;
4351         }
4352         if (tp->hw_status) {
4353                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4354                                     tp->hw_status, tp->status_mapping);
4355                 tp->hw_status = NULL;
4356         }
4357         if (tp->hw_stats) {
4358                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4359                                     tp->hw_stats, tp->stats_mapping);
4360                 tp->hw_stats = NULL;
4361         }
4362 }
4363
4364 /*
4365  * Must not be invoked with interrupt sources disabled and
4366  * the hardware shutdown down.  Can sleep.
4367  */
4368 static int tg3_alloc_consistent(struct tg3 *tp)
4369 {
4370         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4371                                       (TG3_RX_RING_SIZE +
4372                                        TG3_RX_JUMBO_RING_SIZE)) +
4373                                      (sizeof(struct tx_ring_info) *
4374                                       TG3_TX_RING_SIZE),
4375                                      GFP_KERNEL);
4376         if (!tp->rx_std_buffers)
4377                 return -ENOMEM;
4378
4379         memset(tp->rx_std_buffers, 0,
4380                (sizeof(struct ring_info) *
4381                 (TG3_RX_RING_SIZE +
4382                  TG3_RX_JUMBO_RING_SIZE)) +
4383                (sizeof(struct tx_ring_info) *
4384                 TG3_TX_RING_SIZE));
4385
4386         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4387         tp->tx_buffers = (struct tx_ring_info *)
4388                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4389
4390         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4391                                           &tp->rx_std_mapping);
4392         if (!tp->rx_std)
4393                 goto err_out;
4394
4395         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4396                                             &tp->rx_jumbo_mapping);
4397
4398         if (!tp->rx_jumbo)
4399                 goto err_out;
4400
4401         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4402                                           &tp->rx_rcb_mapping);
4403         if (!tp->rx_rcb)
4404                 goto err_out;
4405
4406         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4407                                            &tp->tx_desc_mapping);
4408         if (!tp->tx_ring)
4409                 goto err_out;
4410
4411         tp->hw_status = pci_alloc_consistent(tp->pdev,
4412                                              TG3_HW_STATUS_SIZE,
4413                                              &tp->status_mapping);
4414         if (!tp->hw_status)
4415                 goto err_out;
4416
4417         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4418                                             sizeof(struct tg3_hw_stats),
4419                                             &tp->stats_mapping);
4420         if (!tp->hw_stats)
4421                 goto err_out;
4422
4423         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4424         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4425
4426         return 0;
4427
4428 err_out:
4429         tg3_free_consistent(tp);
4430         return -ENOMEM;
4431 }
4432
4433 #define MAX_WAIT_CNT 1000
4434
4435 /* To stop a block, clear the enable bit and poll till it
4436  * clears.  tp->lock is held.
4437  */
4438 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4439 {
4440         unsigned int i;
4441         u32 val;
4442
4443         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4444                 switch (ofs) {
4445                 case RCVLSC_MODE:
4446                 case DMAC_MODE:
4447                 case MBFREE_MODE:
4448                 case BUFMGR_MODE:
4449                 case MEMARB_MODE:
4450                         /* We can't enable/disable these bits of the
4451                          * 5705/5750, just say success.
4452                          */
4453                         return 0;
4454
4455                 default:
4456                         break;
4457                 };
4458         }
4459
4460         val = tr32(ofs);
4461         val &= ~enable_bit;
4462         tw32_f(ofs, val);
4463
4464         for (i = 0; i < MAX_WAIT_CNT; i++) {
4465                 udelay(100);
4466                 val = tr32(ofs);
4467                 if ((val & enable_bit) == 0)
4468                         break;
4469         }
4470
4471         if (i == MAX_WAIT_CNT && !silent) {
4472                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4473                        "ofs=%lx enable_bit=%x\n",
4474                        ofs, enable_bit);
4475                 return -ENODEV;
4476         }
4477
4478         return 0;
4479 }
4480
4481 /* tp->lock is held. */
4482 static int tg3_abort_hw(struct tg3 *tp, int silent)
4483 {
4484         int i, err;
4485
4486         tg3_disable_ints(tp);
4487
4488         tp->rx_mode &= ~RX_MODE_ENABLE;
4489         tw32_f(MAC_RX_MODE, tp->rx_mode);
4490         udelay(10);
4491
4492         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4493         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4494         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4495         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4496         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4497         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4498
4499         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4500         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4501         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4502         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4503         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4504         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4505         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4506
4507         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4508         tw32_f(MAC_MODE, tp->mac_mode);
4509         udelay(40);
4510
4511         tp->tx_mode &= ~TX_MODE_ENABLE;
4512         tw32_f(MAC_TX_MODE, tp->tx_mode);
4513
4514         for (i = 0; i < MAX_WAIT_CNT; i++) {
4515                 udelay(100);
4516                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4517                         break;
4518         }
4519         if (i >= MAX_WAIT_CNT) {
4520                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4521                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4522                        tp->dev->name, tr32(MAC_TX_MODE));
4523                 err |= -ENODEV;
4524         }
4525
4526         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4527         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4528         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4529
4530         tw32(FTQ_RESET, 0xffffffff);
4531         tw32(FTQ_RESET, 0x00000000);
4532
4533         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4534         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4535
4536         if (tp->hw_status)
4537                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4538         if (tp->hw_stats)
4539                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4540
4541         return err;
4542 }
4543
4544 /* tp->lock is held. */
4545 static int tg3_nvram_lock(struct tg3 *tp)
4546 {
4547         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4548                 int i;
4549
4550                 if (tp->nvram_lock_cnt == 0) {
4551                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4552                         for (i = 0; i < 8000; i++) {
4553                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4554                                         break;
4555                                 udelay(20);
4556                         }
4557                         if (i == 8000) {
4558                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4559                                 return -ENODEV;
4560                         }
4561                 }
4562                 tp->nvram_lock_cnt++;
4563         }
4564         return 0;
4565 }
4566
4567 /* tp->lock is held. */
4568 static void tg3_nvram_unlock(struct tg3 *tp)
4569 {
4570         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4571                 if (tp->nvram_lock_cnt > 0)
4572                         tp->nvram_lock_cnt--;
4573                 if (tp->nvram_lock_cnt == 0)
4574                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4575         }
4576 }
4577
4578 /* tp->lock is held. */
4579 static void tg3_enable_nvram_access(struct tg3 *tp)
4580 {
4581         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4582             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4583                 u32 nvaccess = tr32(NVRAM_ACCESS);
4584
4585                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4586         }
4587 }
4588
4589 /* tp->lock is held. */
4590 static void tg3_disable_nvram_access(struct tg3 *tp)
4591 {
4592         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4593             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4594                 u32 nvaccess = tr32(NVRAM_ACCESS);
4595
4596                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4597         }
4598 }
4599
4600 /* tp->lock is held. */
4601 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4602 {
4603         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4604                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4605
4606         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4607                 switch (kind) {
4608                 case RESET_KIND_INIT:
4609                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4610                                       DRV_STATE_START);
4611                         break;
4612
4613                 case RESET_KIND_SHUTDOWN:
4614                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4615                                       DRV_STATE_UNLOAD);
4616                         break;
4617
4618                 case RESET_KIND_SUSPEND:
4619                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4620                                       DRV_STATE_SUSPEND);
4621                         break;
4622
4623                 default:
4624                         break;
4625                 };
4626         }
4627 }
4628
4629 /* tp->lock is held. */
4630 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4631 {
4632         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4633                 switch (kind) {
4634                 case RESET_KIND_INIT:
4635                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4636                                       DRV_STATE_START_DONE);
4637                         break;
4638
4639                 case RESET_KIND_SHUTDOWN:
4640                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4641                                       DRV_STATE_UNLOAD_DONE);
4642                         break;
4643
4644                 default:
4645                         break;
4646                 };
4647         }
4648 }
4649
4650 /* tp->lock is held. */
4651 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4652 {
4653         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4654                 switch (kind) {
4655                 case RESET_KIND_INIT:
4656                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4657                                       DRV_STATE_START);
4658                         break;
4659
4660                 case RESET_KIND_SHUTDOWN:
4661                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4662                                       DRV_STATE_UNLOAD);
4663                         break;
4664
4665                 case RESET_KIND_SUSPEND:
4666                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4667                                       DRV_STATE_SUSPEND);
4668                         break;
4669
4670                 default:
4671                         break;
4672                 };
4673         }
4674 }
4675
4676 static void tg3_stop_fw(struct tg3 *);
4677
4678 /* tp->lock is held. */
4679 static int tg3_chip_reset(struct tg3 *tp)
4680 {
4681         u32 val;
4682         void (*write_op)(struct tg3 *, u32, u32);
4683         int i;
4684
4685         tg3_nvram_lock(tp);
4686
4687         /* No matching tg3_nvram_unlock() after this because
4688          * chip reset below will undo the nvram lock.
4689          */
4690         tp->nvram_lock_cnt = 0;
4691
4692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4693             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4695                 tw32(GRC_FASTBOOT_PC, 0);
4696
4697         /*
4698          * We must avoid the readl() that normally takes place.
4699          * It locks machines, causes machine checks, and other
4700          * fun things.  So, temporarily disable the 5701
4701          * hardware workaround, while we do the reset.
4702          */
4703         write_op = tp->write32;
4704         if (write_op == tg3_write_flush_reg32)
4705                 tp->write32 = tg3_write32;
4706
4707         /* do the reset */
4708         val = GRC_MISC_CFG_CORECLK_RESET;
4709
4710         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4711                 if (tr32(0x7e2c) == 0x60) {
4712                         tw32(0x7e2c, 0x20);
4713                 }
4714                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4715                         tw32(GRC_MISC_CFG, (1 << 29));
4716                         val |= (1 << 29);
4717                 }
4718         }
4719
4720         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4721                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4722         tw32(GRC_MISC_CFG, val);
4723
4724         /* restore 5701 hardware bug workaround write method */
4725         tp->write32 = write_op;
4726
4727         /* Unfortunately, we have to delay before the PCI read back.
4728          * Some 575X chips even will not respond to a PCI cfg access
4729          * when the reset command is given to the chip.
4730          *
4731          * How do these hardware designers expect things to work
4732          * properly if the PCI write is posted for a long period
4733          * of time?  It is always necessary to have some method by
4734          * which a register read back can occur to push the write
4735          * out which does the reset.
4736          *
4737          * For most tg3 variants the trick below was working.
4738          * Ho hum...
4739          */
4740         udelay(120);
4741
4742         /* Flush PCI posted writes.  The normal MMIO registers
4743          * are inaccessible at this time so this is the only
4744          * way to make this reliably (actually, this is no longer
4745          * the case, see above).  I tried to use indirect
4746          * register read/write but this upset some 5701 variants.
4747          */
4748         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4749
4750         udelay(120);
4751
4752         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4753                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4754                         int i;
4755                         u32 cfg_val;
4756
4757                         /* Wait for link training to complete.  */
4758                         for (i = 0; i < 5000; i++)
4759                                 udelay(100);
4760
4761                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4762                         pci_write_config_dword(tp->pdev, 0xc4,
4763                                                cfg_val | (1 << 15));
4764                 }
4765                 /* Set PCIE max payload size and clear error status.  */
4766                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4767         }
4768
4769         /* Re-enable indirect register accesses. */
4770         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4771                                tp->misc_host_ctrl);
4772
4773         /* Set MAX PCI retry to zero. */
4774         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4775         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4776             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4777                 val |= PCISTATE_RETRY_SAME_DMA;
4778         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4779
4780         pci_restore_state(tp->pdev);
4781
4782         /* Make sure PCI-X relaxed ordering bit is clear. */
4783         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4784         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4785         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4786
4787         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4788                 u32 val;
4789
4790                 /* Chip reset on 5780 will reset MSI enable bit,
4791                  * so need to restore it.
4792                  */
4793                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4794                         u16 ctrl;
4795
4796                         pci_read_config_word(tp->pdev,
4797                                              tp->msi_cap + PCI_MSI_FLAGS,
4798                                              &ctrl);
4799                         pci_write_config_word(tp->pdev,
4800                                               tp->msi_cap + PCI_MSI_FLAGS,
4801                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4802                         val = tr32(MSGINT_MODE);
4803                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4804                 }
4805
4806                 val = tr32(MEMARB_MODE);
4807                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4808
4809         } else
4810                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4811
4812         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4813                 tg3_stop_fw(tp);
4814                 tw32(0x5000, 0x400);
4815         }
4816
4817         tw32(GRC_MODE, tp->grc_mode);
4818
4819         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4820                 u32 val = tr32(0xc4);
4821
4822                 tw32(0xc4, val | (1 << 15));
4823         }
4824
4825         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4826             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4827                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4828                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4829                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4830                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4831         }
4832
4833         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4834                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4835                 tw32_f(MAC_MODE, tp->mac_mode);
4836         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4837                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4838                 tw32_f(MAC_MODE, tp->mac_mode);
4839         } else
4840                 tw32_f(MAC_MODE, 0);
4841         udelay(40);
4842
4843         /* Wait for firmware initialization to complete. */
4844         for (i = 0; i < 100000; i++) {
4845                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4846                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4847                         break;
4848                 udelay(10);
4849         }
4850
4851         /* Chip might not be fitted with firmare.  Some Sun onboard
4852          * parts are configured like that.  So don't signal the timeout
4853          * of the above loop as an error, but do report the lack of
4854          * running firmware once.
4855          */
4856         if (i >= 100000 &&
4857             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4858                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4859
4860                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4861                        tp->dev->name);
4862         }
4863
4864         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4865             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4866                 u32 val = tr32(0x7c00);
4867
4868                 tw32(0x7c00, val | (1 << 25));
4869         }
4870
4871         /* Reprobe ASF enable state.  */
4872         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4873         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4874         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4875         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4876                 u32 nic_cfg;
4877
4878                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4879                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4880                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4881                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4882                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4883                 }
4884         }
4885
4886         return 0;
4887 }
4888
4889 /* tp->lock is held. */
4890 static void tg3_stop_fw(struct tg3 *tp)
4891 {
4892         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4893                 u32 val;
4894                 int i;
4895
4896                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4897                 val = tr32(GRC_RX_CPU_EVENT);
4898                 val |= (1 << 14);
4899                 tw32(GRC_RX_CPU_EVENT, val);
4900
4901                 /* Wait for RX cpu to ACK the event.  */
4902                 for (i = 0; i < 100; i++) {
4903                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4904                                 break;
4905                         udelay(1);
4906                 }
4907         }
4908 }
4909
4910 /* tp->lock is held. */
4911 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4912 {
4913         int err;
4914
4915         tg3_stop_fw(tp);
4916
4917         tg3_write_sig_pre_reset(tp, kind);
4918
4919         tg3_abort_hw(tp, silent);
4920         err = tg3_chip_reset(tp);
4921
4922         tg3_write_sig_legacy(tp, kind);
4923         tg3_write_sig_post_reset(tp, kind);
4924
4925         if (err)
4926                 return err;
4927
4928         return 0;
4929 }
4930
4931 #define TG3_FW_RELEASE_MAJOR    0x0
4932 #define TG3_FW_RELASE_MINOR     0x0
4933 #define TG3_FW_RELEASE_FIX      0x0
4934 #define TG3_FW_START_ADDR       0x08000000
4935 #define TG3_FW_TEXT_ADDR        0x08000000
4936 #define TG3_FW_TEXT_LEN         0x9c0
4937 #define TG3_FW_RODATA_ADDR      0x080009c0
4938 #define TG3_FW_RODATA_LEN       0x60
4939 #define TG3_FW_DATA_ADDR        0x08000a40
4940 #define TG3_FW_DATA_LEN         0x20
4941 #define TG3_FW_SBSS_ADDR        0x08000a60
4942 #define TG3_FW_SBSS_LEN         0xc
4943 #define TG3_FW_BSS_ADDR         0x08000a70
4944 #define TG3_FW_BSS_LEN          0x10
4945
4946 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4947         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4948         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4949         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4950         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4951         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4952         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4953         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4954         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4955         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4956         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4957         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4958         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4959         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4960         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4961         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4962         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4963         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4964         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4965         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4966         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4967         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4968         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4969         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4970         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4971         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972         0, 0, 0, 0, 0, 0,
4973         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4974         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4975         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4976         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4977         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4978         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4979         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4980         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4981         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4982         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4983         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4984         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4985         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4986         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4987         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4988         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4989         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4990         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4991         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4992         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4993         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4994         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4995         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4996         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4997         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4998         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4999         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5000         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5001         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5002         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5003         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5004         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5005         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5006         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5007         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5008         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5009         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5010         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5011         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5012         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5013         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5014         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5015         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5016         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5017         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5018         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5019         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5020         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5021         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5022         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5023         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5024         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5025         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5026         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5027         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5028         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5029         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5030         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5031         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5032         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5033         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5034         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5035         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5036         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5037         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5038 };
5039
5040 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5041         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5042         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5043         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5044         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5045         0x00000000
5046 };
5047
5048 #if 0 /* All zeros, don't eat up space with it. */
5049 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5050         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5051         0x00000000, 0x00000000, 0x00000000, 0x00000000
5052 };
5053 #endif
5054
5055 #define RX_CPU_SCRATCH_BASE     0x30000
5056 #define RX_CPU_SCRATCH_SIZE     0x04000
5057 #define TX_CPU_SCRATCH_BASE     0x34000
5058 #define TX_CPU_SCRATCH_SIZE     0x04000
5059
5060 /* tp->lock is held. */
5061 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5062 {
5063         int i;
5064
5065         BUG_ON(offset == TX_CPU_BASE &&
5066             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5067
5068         if (offset == RX_CPU_BASE) {
5069                 for (i = 0; i < 10000; i++) {
5070                         tw32(offset + CPU_STATE, 0xffffffff);
5071                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5072                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5073                                 break;
5074                 }
5075
5076                 tw32(offset + CPU_STATE, 0xffffffff);
5077                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5078                 udelay(10);
5079         } else {
5080                 for (i = 0; i < 10000; i++) {
5081                         tw32(offset + CPU_STATE, 0xffffffff);
5082                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5083                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5084                                 break;
5085                 }
5086         }
5087
5088         if (i >= 10000) {
5089                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5090                        "and %s CPU\n",
5091                        tp->dev->name,
5092                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5093                 return -ENODEV;
5094         }
5095
5096         /* Clear firmware's nvram arbitration. */
5097         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5098                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5099         return 0;
5100 }
5101
5102 struct fw_info {
5103         unsigned int text_base;
5104         unsigned int text_len;
5105         u32 *text_data;
5106         unsigned int rodata_base;
5107         unsigned int rodata_len;
5108         u32 *rodata_data;
5109         unsigned int data_base;
5110         unsigned int data_len;
5111         u32 *data_data;
5112 };
5113
5114 /* tp->lock is held. */
5115 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5116                                  int cpu_scratch_size, struct fw_info *info)
5117 {
5118         int err, lock_err, i;
5119         void (*write_op)(struct tg3 *, u32, u32);
5120
5121         if (cpu_base == TX_CPU_BASE &&
5122             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5123                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5124                        "TX cpu firmware on %s which is 5705.\n",
5125                        tp->dev->name);
5126                 return -EINVAL;
5127         }
5128
5129         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5130                 write_op = tg3_write_mem;
5131         else
5132                 write_op = tg3_write_indirect_reg32;
5133
5134         /* It is possible that bootcode is still loading at this point.
5135          * Get the nvram lock first before halting the cpu.
5136          */
5137         lock_err = tg3_nvram_lock(tp);
5138         err = tg3_halt_cpu(tp, cpu_base);
5139         if (!lock_err)
5140                 tg3_nvram_unlock(tp);
5141         if (err)
5142                 goto out;
5143
5144         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5145                 write_op(tp, cpu_scratch_base + i, 0);
5146         tw32(cpu_base + CPU_STATE, 0xffffffff);
5147         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5148         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5149                 write_op(tp, (cpu_scratch_base +
5150                               (info->text_base & 0xffff) +
5151                               (i * sizeof(u32))),
5152                          (info->text_data ?
5153                           info->text_data[i] : 0));
5154         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5155                 write_op(tp, (cpu_scratch_base +
5156                               (info->rodata_base & 0xffff) +
5157                               (i * sizeof(u32))),
5158                          (info->rodata_data ?
5159                           info->rodata_data[i] : 0));
5160         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5161                 write_op(tp, (cpu_scratch_base +
5162                               (info->data_base & 0xffff) +
5163                               (i * sizeof(u32))),
5164                          (info->data_data ?
5165                           info->data_data[i] : 0));
5166
5167         err = 0;
5168
5169 out:
5170         return err;
5171 }
5172
5173 /* tp->lock is held. */
5174 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5175 {
5176         struct fw_info info;
5177         int err, i;
5178
5179         info.text_base = TG3_FW_TEXT_ADDR;
5180         info.text_len = TG3_FW_TEXT_LEN;
5181         info.text_data = &tg3FwText[0];
5182         info.rodata_base = TG3_FW_RODATA_ADDR;
5183         info.rodata_len = TG3_FW_RODATA_LEN;
5184         info.rodata_data = &tg3FwRodata[0];
5185         info.data_base = TG3_FW_DATA_ADDR;
5186         info.data_len = TG3_FW_DATA_LEN;
5187         info.data_data = NULL;
5188
5189         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5190                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5191                                     &info);
5192         if (err)
5193                 return err;
5194
5195         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5196                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5197                                     &info);
5198         if (err)
5199                 return err;
5200
5201         /* Now startup only the RX cpu. */
5202         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5203         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5204
5205         for (i = 0; i < 5; i++) {
5206                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5207                         break;
5208                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5209                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5210                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5211                 udelay(1000);
5212         }
5213         if (i >= 5) {
5214                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5215                        "to set RX CPU PC, is %08x should be %08x\n",
5216                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5217                        TG3_FW_TEXT_ADDR);
5218                 return -ENODEV;
5219         }
5220         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5221         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5222
5223         return 0;
5224 }
5225
5226 #if TG3_TSO_SUPPORT != 0
5227
5228 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5229 #define TG3_TSO_FW_RELASE_MINOR         0x6
5230 #define TG3_TSO_FW_RELEASE_FIX          0x0
5231 #define TG3_TSO_FW_START_ADDR           0x08000000
5232 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5233 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5234 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5235 #define TG3_TSO_FW_RODATA_LEN           0x60
5236 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5237 #define TG3_TSO_FW_DATA_LEN             0x30
5238 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5239 #define TG3_TSO_FW_SBSS_LEN             0x2c
5240 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5241 #define TG3_TSO_FW_BSS_LEN              0x894
5242
5243 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5244         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5245         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5246         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5247         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5248         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5249         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5250         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5251         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5252         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5253         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5254         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5255         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5256         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5257         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5258         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5259         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5260         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5261         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5262         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5263         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5264         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5265         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5266         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5267         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5268         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5269         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5270         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5271         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5272         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5273         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5274         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5275         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5276         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5277         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5278         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5279         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5280         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5281         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5282         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5283         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5284         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5285         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5286         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5287         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5288         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5289         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5290         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5291         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5292         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5293         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5294         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5295         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5296         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5297         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5298         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5299         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5300         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5301         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5302         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5303         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5304         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5305         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5306         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5307         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5308         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5309         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5310         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5311         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5312         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5313         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5314         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5315         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5316         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5317         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5318         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5319         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5320         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5321         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5322         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5323         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5324         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5325         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5326         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5327         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5328         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5329         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5330         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5331         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5332         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5333         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5334         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5335         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5336         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5337         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5338         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5339         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5340         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5341         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5342         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5343         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5344         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5345         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5346         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5347         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5348         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5349         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5350         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5351         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5352         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5353         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5354         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5355         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5356         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5357         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5358         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5359         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5360         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5361         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5362         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5363         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5364         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5365         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5366         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5367         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5368         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5369         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5370         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5371         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5372         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5373         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5374         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5375         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5376         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5377         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5378         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5379         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5380         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5381         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5382         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5383         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5384         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5385         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5386         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5387         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5388         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5389         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5390         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5391         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5392         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5393         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5394         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5395         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5396         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5397         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5398         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5399         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5400         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5401         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5402         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5403         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5404         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5405         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5406         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5407         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5408         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5409         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5410         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5411         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5412         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5413         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5414         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5415         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5416         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5417         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5418         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5419         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5420         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5421         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5422         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5423         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5424         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5425         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5426         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5427         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5428         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5429         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5430         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5431         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5432         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5433         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5434         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5435         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5436         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5437         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5438         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5439         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5440         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5441         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5442         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5443         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5444         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5445         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5446         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5447         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5448         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5449         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5450         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5451         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5452         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5453         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5454         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5455         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5456         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5457         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5458         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5459         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5460         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5461         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5462         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5463         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5464         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5465         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5466         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5467         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5468         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5469         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5470         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5471         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5472         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5473         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5474         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5475         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5476         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5477         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5478         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5479         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5480         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5481         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5482         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5483         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5484         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5485         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5486         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5487         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5488         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5489         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5490         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5491         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5492         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5493         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5494         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5495         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5496         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5497         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5498         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5499         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5500         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5501         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5502         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5503         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5504         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5505         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5506         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5507         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5508         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5509         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5510         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5511         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5512         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5513         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5514         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5515         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5516         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5517         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5518         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5519         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5520         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5521         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5522         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5523         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5524         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5525         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5526         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5527         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5528 };
5529
5530 static u32 tg3TsoFwRodata[] = {
5531         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5532         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5533         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5534         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5535         0x00000000,
5536 };
5537
5538 static u32 tg3TsoFwData[] = {
5539         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5540         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5541         0x00000000,
5542 };
5543
5544 /* 5705 needs a special version of the TSO firmware.  */
5545 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5546 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5547 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5548 #define TG3_TSO5_FW_START_ADDR          0x00010000
5549 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5550 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5551 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5552 #define TG3_TSO5_FW_RODATA_LEN          0x50
5553 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5554 #define TG3_TSO5_FW_DATA_LEN            0x20
5555 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5556 #define TG3_TSO5_FW_SBSS_LEN            0x28
5557 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5558 #define TG3_TSO5_FW_BSS_LEN             0x88
5559
5560 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5561         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5562         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5563         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5564         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5565         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5566         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5567         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5568         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5569         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5570         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5571         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5572         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5573         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5574         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5575         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5576         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5577         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5578         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5579         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5580         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5581         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5582         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5583         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5584         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5585         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5586         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5587         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5588         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5589         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5590         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5591         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5592         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5593         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5594         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5595         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5596         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5597         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5598         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5599         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5600         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5601         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5602         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5603         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5604         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5605         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5606         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5607         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5608         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5609         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5610         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5611         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5612         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5613         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5614         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5615         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5616         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5617         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5618         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5619         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5620         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5621         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5622         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5623         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5624         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5625         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5626         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5627         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5628         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5629         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5630         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5631         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5632         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5633         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5634         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5635         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5636         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5637         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5638         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5639         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5640         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5641         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5642         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5643         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5644         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5645         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5646         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5647         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5648         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5649         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5650         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5651         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5652         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5653         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5654         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5655         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5656         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5657         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5658         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5659         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5660         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5661         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5662         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5663         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5664         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5665         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5666         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5667         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5668         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5669         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5670         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5671         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5672         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5673         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5674         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5675         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5676         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5677         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5678         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5679         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5680         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5681         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5682         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5683         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5684         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5685         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5686         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5687         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5688         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5689         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5690         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5691         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5692         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5693         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5694         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5695         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5696         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5697         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5698         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5699         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5700         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5701         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5702         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5703         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5704         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5705         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5706         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5707         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5708         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5709         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5710         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5711         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5712         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5713         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5714         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5715         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5716         0x00000000, 0x00000000, 0x00000000,
5717 };
5718
5719 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5720         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5721         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5722         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5723         0x00000000, 0x00000000, 0x00000000,
5724 };
5725
5726 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5727         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5728         0x00000000, 0x00000000, 0x00000000,
5729 };
5730
5731 /* tp->lock is held. */
5732 static int tg3_load_tso_firmware(struct tg3 *tp)
5733 {
5734         struct fw_info info;
5735         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5736         int err, i;
5737
5738         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5739                 return 0;
5740
5741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5742                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5743                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5744                 info.text_data = &tg3Tso5FwText[0];
5745                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5746                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5747                 info.rodata_data = &tg3Tso5FwRodata[0];
5748                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5749                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5750                 info.data_data = &tg3Tso5FwData[0];
5751                 cpu_base = RX_CPU_BASE;
5752                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5753                 cpu_scratch_size = (info.text_len +
5754                                     info.rodata_len +
5755                                     info.data_len +
5756                                     TG3_TSO5_FW_SBSS_LEN +
5757                                     TG3_TSO5_FW_BSS_LEN);
5758         } else {
5759                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5760                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5761                 info.text_data = &tg3TsoFwText[0];
5762                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5763                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5764                 info.rodata_data = &tg3TsoFwRodata[0];
5765                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5766                 info.data_len = TG3_TSO_FW_DATA_LEN;
5767                 info.data_data = &tg3TsoFwData[0];
5768                 cpu_base = TX_CPU_BASE;
5769                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5770                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5771         }
5772
5773         err = tg3_load_firmware_cpu(tp, cpu_base,
5774                                     cpu_scratch_base, cpu_scratch_size,
5775                                     &info);
5776         if (err)
5777                 return err;
5778
5779         /* Now startup the cpu. */
5780         tw32(cpu_base + CPU_STATE, 0xffffffff);
5781         tw32_f(cpu_base + CPU_PC,    info.text_base);
5782
5783         for (i = 0; i < 5; i++) {
5784                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5785                         break;
5786                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5787                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5788                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5789                 udelay(1000);
5790         }
5791         if (i >= 5) {
5792                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5793                        "to set CPU PC, is %08x should be %08x\n",
5794                        tp->dev->name, tr32(cpu_base + CPU_PC),
5795                        info.text_base);
5796                 return -ENODEV;
5797         }
5798         tw32(cpu_base + CPU_STATE, 0xffffffff);
5799         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5800         return 0;
5801 }
5802
5803 #endif /* TG3_TSO_SUPPORT != 0 */
5804
5805 /* tp->lock is held. */
5806 static void __tg3_set_mac_addr(struct tg3 *tp)
5807 {
5808         u32 addr_high, addr_low;
5809         int i;
5810
5811         addr_high = ((tp->dev->dev_addr[0] << 8) |
5812                      tp->dev->dev_addr[1]);
5813         addr_low = ((tp->dev->dev_addr[2] << 24) |
5814                     (tp->dev->dev_addr[3] << 16) |
5815                     (tp->dev->dev_addr[4] <<  8) |
5816                     (tp->dev->dev_addr[5] <<  0));
5817         for (i = 0; i < 4; i++) {
5818                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5819                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5820         }
5821
5822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5824                 for (i = 0; i < 12; i++) {
5825                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5826                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5827                 }
5828         }
5829
5830         addr_high = (tp->dev->dev_addr[0] +
5831                      tp->dev->dev_addr[1] +
5832                      tp->dev->dev_addr[2] +
5833                      tp->dev->dev_addr[3] +
5834                      tp->dev->dev_addr[4] +
5835                      tp->dev->dev_addr[5]) &
5836                 TX_BACKOFF_SEED_MASK;
5837         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5838 }
5839
5840 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5841 {
5842         struct tg3 *tp = netdev_priv(dev);
5843         struct sockaddr *addr = p;
5844         int err = 0;
5845
5846         if (!is_valid_ether_addr(addr->sa_data))
5847                 return -EINVAL;
5848
5849         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5850
5851         if (!netif_running(dev))
5852                 return 0;
5853
5854         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5855                 /* Reset chip so that ASF can re-init any MAC addresses it
5856                  * needs.
5857                  */
5858                 tg3_netif_stop(tp);
5859                 tg3_full_lock(tp, 1);
5860
5861                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5862                 err = tg3_restart_hw(tp, 0);
5863                 if (!err)
5864                         tg3_netif_start(tp);
5865                 tg3_full_unlock(tp);
5866         } else {
5867                 spin_lock_bh(&tp->lock);
5868                 __tg3_set_mac_addr(tp);
5869                 spin_unlock_bh(&tp->lock);
5870         }
5871
5872         return err;
5873 }
5874
5875 /* tp->lock is held. */
5876 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5877                            dma_addr_t mapping, u32 maxlen_flags,
5878                            u32 nic_addr)
5879 {
5880         tg3_write_mem(tp,
5881                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5882                       ((u64) mapping >> 32));
5883         tg3_write_mem(tp,
5884                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5885                       ((u64) mapping & 0xffffffff));
5886         tg3_write_mem(tp,
5887                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5888                        maxlen_flags);
5889
5890         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5891                 tg3_write_mem(tp,
5892                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5893                               nic_addr);
5894 }
5895
5896 static void __tg3_set_rx_mode(struct net_device *);
5897 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5898 {
5899         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5900         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5901         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5902         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5903         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5904                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5905                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5906         }
5907         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5908         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5909         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5910                 u32 val = ec->stats_block_coalesce_usecs;
5911
5912                 if (!netif_carrier_ok(tp->dev))
5913                         val = 0;
5914
5915                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5916         }
5917 }
5918
5919 /* tp->lock is held. */
5920 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5921 {
5922         u32 val, rdmac_mode;
5923         int i, err, limit;
5924
5925         tg3_disable_ints(tp);
5926
5927         tg3_stop_fw(tp);
5928
5929         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5930
5931         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5932                 tg3_abort_hw(tp, 1);
5933         }
5934
5935         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5936                 tg3_phy_reset(tp);
5937
5938         err = tg3_chip_reset(tp);
5939         if (err)
5940                 return err;
5941
5942         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5943
5944         /* This works around an issue with Athlon chipsets on
5945          * B3 tigon3 silicon.  This bit has no effect on any
5946          * other revision.  But do not set this on PCI Express
5947          * chips.
5948          */
5949         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5950                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5951         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5952
5953         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5954             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5955                 val = tr32(TG3PCI_PCISTATE);
5956                 val |= PCISTATE_RETRY_SAME_DMA;
5957                 tw32(TG3PCI_PCISTATE, val);
5958         }
5959
5960         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5961                 /* Enable some hw fixes.  */
5962                 val = tr32(TG3PCI_MSI_DATA);
5963                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5964                 tw32(TG3PCI_MSI_DATA, val);
5965         }
5966
5967         /* Descriptor ring init may make accesses to the
5968          * NIC SRAM area to setup the TX descriptors, so we
5969          * can only do this after the hardware has been
5970          * successfully reset.
5971          */
5972         tg3_init_rings(tp);
5973
5974         /* This value is determined during the probe time DMA
5975          * engine test, tg3_test_dma.
5976          */
5977         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5978
5979         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5980                           GRC_MODE_4X_NIC_SEND_RINGS |
5981                           GRC_MODE_NO_TX_PHDR_CSUM |
5982                           GRC_MODE_NO_RX_PHDR_CSUM);
5983         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5984
5985         /* Pseudo-header checksum is done by hardware logic and not
5986          * the offload processers, so make the chip do the pseudo-
5987          * header checksums on receive.  For transmit it is more
5988          * convenient to do the pseudo-header checksum in software
5989          * as Linux does that on transmit for us in all cases.
5990          */
5991         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5992
5993         tw32(GRC_MODE,
5994              tp->grc_mode |
5995              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5996
5997         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5998         val = tr32(GRC_MISC_CFG);
5999         val &= ~0xff;
6000         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6001         tw32(GRC_MISC_CFG, val);
6002
6003         /* Initialize MBUF/DESC pool. */
6004         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6005                 /* Do nothing.  */
6006         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6007                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6008                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6009                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6010                 else
6011                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6012                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6013                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6014         }
6015 #if TG3_TSO_SUPPORT != 0
6016         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6017                 int fw_len;
6018
6019                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6020                           TG3_TSO5_FW_RODATA_LEN +
6021                           TG3_TSO5_FW_DATA_LEN +
6022                           TG3_TSO5_FW_SBSS_LEN +
6023                           TG3_TSO5_FW_BSS_LEN);
6024                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6025                 tw32(BUFMGR_MB_POOL_ADDR,
6026                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6027                 tw32(BUFMGR_MB_POOL_SIZE,
6028                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6029         }
6030 #endif
6031
6032         if (tp->dev->mtu <= ETH_DATA_LEN) {
6033                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6034                      tp->bufmgr_config.mbuf_read_dma_low_water);
6035                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6036                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6037                 tw32(BUFMGR_MB_HIGH_WATER,
6038                      tp->bufmgr_config.mbuf_high_water);
6039         } else {
6040                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6041                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6042                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6043                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6044                 tw32(BUFMGR_MB_HIGH_WATER,
6045                      tp->bufmgr_config.mbuf_high_water_jumbo);
6046         }
6047         tw32(BUFMGR_DMA_LOW_WATER,
6048              tp->bufmgr_config.dma_low_water);
6049         tw32(BUFMGR_DMA_HIGH_WATER,
6050              tp->bufmgr_config.dma_high_water);
6051
6052         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6053         for (i = 0; i < 2000; i++) {
6054                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6055                         break;
6056                 udelay(10);
6057         }
6058         if (i >= 2000) {
6059                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6060                        tp->dev->name);
6061                 return -ENODEV;
6062         }
6063
6064         /* Setup replenish threshold. */
6065         val = tp->rx_pending / 8;
6066         if (val == 0)
6067                 val = 1;
6068         else if (val > tp->rx_std_max_post)
6069                 val = tp->rx_std_max_post;
6070
6071         tw32(RCVBDI_STD_THRESH, val);
6072
6073         /* Initialize TG3_BDINFO's at:
6074          *  RCVDBDI_STD_BD:     standard eth size rx ring
6075          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6076          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6077          *
6078          * like so:
6079          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6080          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6081          *                              ring attribute flags
6082          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6083          *
6084          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6085          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6086          *
6087          * The size of each ring is fixed in the firmware, but the location is
6088          * configurable.
6089          */
6090         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6091              ((u64) tp->rx_std_mapping >> 32));
6092         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6093              ((u64) tp->rx_std_mapping & 0xffffffff));
6094         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6095              NIC_SRAM_RX_BUFFER_DESC);
6096
6097         /* Don't even try to program the JUMBO/MINI buffer descriptor
6098          * configs on 5705.
6099          */
6100         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6101                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6102                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6103         } else {
6104                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6105                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6106
6107                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6108                      BDINFO_FLAGS_DISABLED);
6109
6110                 /* Setup replenish threshold. */
6111                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6112
6113                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6114                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6115                              ((u64) tp->rx_jumbo_mapping >> 32));
6116                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6117                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6118                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6119                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6120                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6121                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6122                 } else {
6123                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6124                              BDINFO_FLAGS_DISABLED);
6125                 }
6126
6127         }
6128
6129         /* There is only one send ring on 5705/5750, no need to explicitly
6130          * disable the others.
6131          */
6132         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6133                 /* Clear out send RCB ring in SRAM. */
6134                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6135                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6136                                       BDINFO_FLAGS_DISABLED);
6137         }
6138
6139         tp->tx_prod = 0;
6140         tp->tx_cons = 0;
6141         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6142         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6143
6144         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6145                        tp->tx_desc_mapping,
6146                        (TG3_TX_RING_SIZE <<
6147                         BDINFO_FLAGS_MAXLEN_SHIFT),
6148                        NIC_SRAM_TX_BUFFER_DESC);
6149
6150         /* There is only one receive return ring on 5705/5750, no need
6151          * to explicitly disable the others.
6152          */
6153         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6154                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6155                      i += TG3_BDINFO_SIZE) {
6156                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6157                                       BDINFO_FLAGS_DISABLED);
6158                 }
6159         }
6160
6161         tp->rx_rcb_ptr = 0;
6162         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6163
6164         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6165                        tp->rx_rcb_mapping,
6166                        (TG3_RX_RCB_RING_SIZE(tp) <<
6167                         BDINFO_FLAGS_MAXLEN_SHIFT),
6168                        0);
6169
6170         tp->rx_std_ptr = tp->rx_pending;
6171         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6172                      tp->rx_std_ptr);
6173
6174         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6175                                                 tp->rx_jumbo_pending : 0;
6176         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6177                      tp->rx_jumbo_ptr);
6178
6179         /* Initialize MAC address and backoff seed. */
6180         __tg3_set_mac_addr(tp);
6181
6182         /* MTU + ethernet header + FCS + optional VLAN tag */
6183         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6184
6185         /* The slot time is changed by tg3_setup_phy if we
6186          * run at gigabit with half duplex.
6187          */
6188         tw32(MAC_TX_LENGTHS,
6189              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6190              (6 << TX_LENGTHS_IPG_SHIFT) |
6191              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6192
6193         /* Receive rules. */
6194         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6195         tw32(RCVLPC_CONFIG, 0x0181);
6196
6197         /* Calculate RDMAC_MODE setting early, we need it to determine
6198          * the RCVLPC_STATE_ENABLE mask.
6199          */
6200         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6201                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6202                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6203                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6204                       RDMAC_MODE_LNGREAD_ENAB);
6205         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6206                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6207
6208         /* If statement applies to 5705 and 5750 PCI devices only */
6209         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6210              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6211             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6212                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6213                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6214                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6215                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6216                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6217                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6218                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6219                 }
6220         }
6221
6222         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6223                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6224
6225 #if TG3_TSO_SUPPORT != 0
6226         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6227                 rdmac_mode |= (1 << 27);
6228 #endif
6229
6230         /* Receive/send statistics. */
6231         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6232                 val = tr32(RCVLPC_STATS_ENABLE);
6233                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6234                 tw32(RCVLPC_STATS_ENABLE, val);
6235         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6236                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6237                 val = tr32(RCVLPC_STATS_ENABLE);
6238                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6239                 tw32(RCVLPC_STATS_ENABLE, val);
6240         } else {
6241                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6242         }
6243         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6244         tw32(SNDDATAI_STATSENAB, 0xffffff);
6245         tw32(SNDDATAI_STATSCTRL,
6246              (SNDDATAI_SCTRL_ENABLE |
6247               SNDDATAI_SCTRL_FASTUPD));
6248
6249         /* Setup host coalescing engine. */
6250         tw32(HOSTCC_MODE, 0);
6251         for (i = 0; i < 2000; i++) {
6252                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6253                         break;
6254                 udelay(10);
6255         }
6256
6257         __tg3_set_coalesce(tp, &tp->coal);
6258
6259         /* set status block DMA address */
6260         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6261              ((u64) tp->status_mapping >> 32));
6262         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6263              ((u64) tp->status_mapping & 0xffffffff));
6264
6265         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6266                 /* Status/statistics block address.  See tg3_timer,
6267                  * the tg3_periodic_fetch_stats call there, and
6268                  * tg3_get_stats to see how this works for 5705/5750 chips.
6269                  */
6270                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6271                      ((u64) tp->stats_mapping >> 32));
6272                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6273                      ((u64) tp->stats_mapping & 0xffffffff));
6274                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6275                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6276         }
6277
6278         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6279
6280         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6281         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6282         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6283                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6284
6285         /* Clear statistics/status block in chip, and status block in ram. */
6286         for (i = NIC_SRAM_STATS_BLK;
6287              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6288              i += sizeof(u32)) {
6289                 tg3_write_mem(tp, i, 0);
6290                 udelay(40);
6291         }
6292         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6293
6294         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6295                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6296                 /* reset to prevent losing 1st rx packet intermittently */
6297                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6298                 udelay(10);
6299         }
6300
6301         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6302                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6303         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6304         udelay(40);
6305
6306         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6307          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6308          * register to preserve the GPIO settings for LOMs. The GPIOs,
6309          * whether used as inputs or outputs, are set by boot code after
6310          * reset.
6311          */
6312         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6313                 u32 gpio_mask;
6314
6315                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6316                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6317
6318                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6319                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6320                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6321
6322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6323                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6324
6325                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6326
6327                 /* GPIO1 must be driven high for eeprom write protect */
6328                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6329                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6330         }
6331         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6332         udelay(100);
6333
6334         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6335         tp->last_tag = 0;
6336
6337         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6338                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6339                 udelay(40);
6340         }
6341
6342         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6343                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6344                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6345                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6346                WDMAC_MODE_LNGREAD_ENAB);
6347
6348         /* If statement applies to 5705 and 5750 PCI devices only */
6349         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6350              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6352                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6353                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6354                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6355                         /* nothing */
6356                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6357                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6358                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6359                         val |= WDMAC_MODE_RX_ACCEL;
6360                 }
6361         }
6362
6363         /* Enable host coalescing bug fix */
6364         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6365             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6366                 val |= (1 << 29);
6367
6368         tw32_f(WDMAC_MODE, val);
6369         udelay(40);
6370
6371         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6372                 val = tr32(TG3PCI_X_CAPS);
6373                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6374                         val &= ~PCIX_CAPS_BURST_MASK;
6375                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6376                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6377                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6378                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6379                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6380                                 val |= (tp->split_mode_max_reqs <<
6381                                         PCIX_CAPS_SPLIT_SHIFT);
6382                 }
6383                 tw32(TG3PCI_X_CAPS, val);
6384         }
6385
6386         tw32_f(RDMAC_MODE, rdmac_mode);
6387         udelay(40);
6388
6389         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6390         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6391                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6392         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6393         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6394         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6395         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6396         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6397 #if TG3_TSO_SUPPORT != 0
6398         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6399                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6400 #endif
6401         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6402         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6403
6404         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6405                 err = tg3_load_5701_a0_firmware_fix(tp);
6406                 if (err)
6407                         return err;
6408         }
6409
6410 #if TG3_TSO_SUPPORT != 0
6411         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6412                 err = tg3_load_tso_firmware(tp);
6413                 if (err)
6414                         return err;
6415         }
6416 #endif
6417
6418         tp->tx_mode = TX_MODE_ENABLE;
6419         tw32_f(MAC_TX_MODE, tp->tx_mode);
6420         udelay(100);
6421
6422         tp->rx_mode = RX_MODE_ENABLE;
6423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6424                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6425
6426         tw32_f(MAC_RX_MODE, tp->rx_mode);
6427         udelay(10);
6428
6429         if (tp->link_config.phy_is_low_power) {
6430                 tp->link_config.phy_is_low_power = 0;
6431                 tp->link_config.speed = tp->link_config.orig_speed;
6432                 tp->link_config.duplex = tp->link_config.orig_duplex;
6433                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6434         }
6435
6436         tp->mi_mode = MAC_MI_MODE_BASE;
6437         tw32_f(MAC_MI_MODE, tp->mi_mode);
6438         udelay(80);
6439
6440         tw32(MAC_LED_CTRL, tp->led_ctrl);
6441
6442         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6443         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6444                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6445                 udelay(10);
6446         }
6447         tw32_f(MAC_RX_MODE, tp->rx_mode);
6448         udelay(10);
6449
6450         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6451                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6452                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6453                         /* Set drive transmission level to 1.2V  */
6454                         /* only if the signal pre-emphasis bit is not set  */
6455                         val = tr32(MAC_SERDES_CFG);
6456                         val &= 0xfffff000;
6457                         val |= 0x880;
6458                         tw32(MAC_SERDES_CFG, val);
6459                 }
6460                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6461                         tw32(MAC_SERDES_CFG, 0x616000);
6462         }
6463
6464         /* Prevent chip from dropping frames when flow control
6465          * is enabled.
6466          */
6467         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6468
6469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6470             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6471                 /* Use hardware link auto-negotiation */
6472                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6473         }
6474
6475         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6476             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6477                 u32 tmp;
6478
6479                 tmp = tr32(SERDES_RX_CTRL);
6480                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6481                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6482                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6483                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6484         }
6485
6486         err = tg3_setup_phy(tp, reset_phy);
6487         if (err)
6488                 return err;
6489
6490         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6491                 u32 tmp;
6492
6493                 /* Clear CRC stats. */
6494                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6495                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6496                         tg3_readphy(tp, 0x14, &tmp);
6497                 }
6498         }
6499
6500         __tg3_set_rx_mode(tp->dev);
6501
6502         /* Initialize receive rules. */
6503         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6504         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6505         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6506         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6507
6508         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6509             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6510                 limit = 8;
6511         else
6512                 limit = 16;
6513         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6514                 limit -= 4;
6515         switch (limit) {
6516         case 16:
6517                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6518         case 15:
6519                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6520         case 14:
6521                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6522         case 13:
6523                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6524         case 12:
6525                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6526         case 11:
6527                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6528         case 10:
6529                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6530         case 9:
6531                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6532         case 8:
6533                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6534         case 7:
6535                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6536         case 6:
6537                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6538         case 5:
6539                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6540         case 4:
6541                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6542         case 3:
6543                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6544         case 2:
6545         case 1:
6546
6547         default:
6548                 break;
6549         };
6550
6551         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6552
6553         return 0;
6554 }
6555
6556 /* Called at device open time to get the chip ready for
6557  * packet processing.  Invoked with tp->lock held.
6558  */
6559 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6560 {
6561         int err;
6562
6563         /* Force the chip into D0. */
6564         err = tg3_set_power_state(tp, PCI_D0);
6565         if (err)
6566                 goto out;
6567
6568         tg3_switch_clocks(tp);
6569
6570         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6571
6572         err = tg3_reset_hw(tp, reset_phy);
6573
6574 out:
6575         return err;
6576 }
6577
6578 #define TG3_STAT_ADD32(PSTAT, REG) \
6579 do {    u32 __val = tr32(REG); \
6580         (PSTAT)->low += __val; \
6581         if ((PSTAT)->low < __val) \
6582                 (PSTAT)->high += 1; \
6583 } while (0)
6584
6585 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6586 {
6587         struct tg3_hw_stats *sp = tp->hw_stats;
6588
6589         if (!netif_carrier_ok(tp->dev))
6590                 return;
6591
6592         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6593         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6594         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6595         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6596         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6597         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6598         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6599         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6600         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6601         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6602         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6603         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6604         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6605
6606         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6607         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6608         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6609         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6610         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6611         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6612         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6613         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6614         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6615         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6616         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6617         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6618         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6619         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6620
6621         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6622         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6623         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6624 }
6625
6626 static void tg3_timer(unsigned long __opaque)
6627 {
6628         struct tg3 *tp = (struct tg3 *) __opaque;
6629
6630         if (tp->irq_sync)
6631                 goto restart_timer;
6632
6633         spin_lock(&tp->lock);
6634
6635         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6636                 /* All of this garbage is because when using non-tagged
6637                  * IRQ status the mailbox/status_block protocol the chip
6638                  * uses with the cpu is race prone.
6639                  */
6640                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6641                         tw32(GRC_LOCAL_CTRL,
6642                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6643                 } else {
6644                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6645                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6646                 }
6647
6648                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6649                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6650                         spin_unlock(&tp->lock);
6651                         schedule_work(&tp->reset_task);
6652                         return;
6653                 }
6654         }
6655
6656         /* This part only runs once per second. */
6657         if (!--tp->timer_counter) {
6658                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6659                         tg3_periodic_fetch_stats(tp);
6660
6661                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6662                         u32 mac_stat;
6663                         int phy_event;
6664
6665                         mac_stat = tr32(MAC_STATUS);
6666
6667                         phy_event = 0;
6668                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6669                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6670                                         phy_event = 1;
6671                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6672                                 phy_event = 1;
6673
6674                         if (phy_event)
6675                                 tg3_setup_phy(tp, 0);
6676                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6677                         u32 mac_stat = tr32(MAC_STATUS);
6678                         int need_setup = 0;
6679
6680                         if (netif_carrier_ok(tp->dev) &&
6681                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6682                                 need_setup = 1;
6683                         }
6684                         if (! netif_carrier_ok(tp->dev) &&
6685                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6686                                          MAC_STATUS_SIGNAL_DET))) {
6687                                 need_setup = 1;
6688                         }
6689                         if (need_setup) {
6690                                 tw32_f(MAC_MODE,
6691                                      (tp->mac_mode &
6692                                       ~MAC_MODE_PORT_MODE_MASK));
6693                                 udelay(40);
6694                                 tw32_f(MAC_MODE, tp->mac_mode);
6695                                 udelay(40);
6696                                 tg3_setup_phy(tp, 0);
6697                         }
6698                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6699                         tg3_serdes_parallel_detect(tp);
6700
6701                 tp->timer_counter = tp->timer_multiplier;
6702         }
6703
6704         /* Heartbeat is only sent once every 2 seconds.  */
6705         if (!--tp->asf_counter) {
6706                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6707                         u32 val;
6708
6709                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6710                                       FWCMD_NICDRV_ALIVE2);
6711                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6712                         /* 5 seconds timeout */
6713                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6714                         val = tr32(GRC_RX_CPU_EVENT);
6715                         val |= (1 << 14);
6716                         tw32(GRC_RX_CPU_EVENT, val);
6717                 }
6718                 tp->asf_counter = tp->asf_multiplier;
6719         }
6720
6721         spin_unlock(&tp->lock);
6722
6723 restart_timer:
6724         tp->timer.expires = jiffies + tp->timer_offset;
6725         add_timer(&tp->timer);
6726 }
6727
6728 static int tg3_request_irq(struct tg3 *tp)
6729 {
6730         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6731         unsigned long flags;
6732         struct net_device *dev = tp->dev;
6733
6734         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6735                 fn = tg3_msi;
6736                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6737                         fn = tg3_msi_1shot;
6738                 flags = IRQF_SAMPLE_RANDOM;
6739         } else {
6740                 fn = tg3_interrupt;
6741                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6742                         fn = tg3_interrupt_tagged;
6743                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6744         }
6745         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6746 }
6747
6748 static int tg3_test_interrupt(struct tg3 *tp)
6749 {
6750         struct net_device *dev = tp->dev;
6751         int err, i;
6752         u32 int_mbox = 0;
6753
6754         if (!netif_running(dev))
6755                 return -ENODEV;
6756
6757         tg3_disable_ints(tp);
6758
6759         free_irq(tp->pdev->irq, dev);
6760
6761         err = request_irq(tp->pdev->irq, tg3_test_isr,
6762                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6763         if (err)
6764                 return err;
6765
6766         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6767         tg3_enable_ints(tp);
6768
6769         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6770                HOSTCC_MODE_NOW);
6771
6772         for (i = 0; i < 5; i++) {
6773                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6774                                         TG3_64BIT_REG_LOW);
6775                 if (int_mbox != 0)
6776                         break;
6777                 msleep(10);
6778         }
6779
6780         tg3_disable_ints(tp);
6781
6782         free_irq(tp->pdev->irq, dev);
6783         
6784         err = tg3_request_irq(tp);
6785
6786         if (err)
6787                 return err;
6788
6789         if (int_mbox != 0)
6790                 return 0;
6791
6792         return -EIO;
6793 }
6794
6795 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6796  * successfully restored
6797  */
6798 static int tg3_test_msi(struct tg3 *tp)
6799 {
6800         struct net_device *dev = tp->dev;
6801         int err;
6802         u16 pci_cmd;
6803
6804         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6805                 return 0;
6806
6807         /* Turn off SERR reporting in case MSI terminates with Master
6808          * Abort.
6809          */
6810         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6811         pci_write_config_word(tp->pdev, PCI_COMMAND,
6812                               pci_cmd & ~PCI_COMMAND_SERR);
6813
6814         err = tg3_test_interrupt(tp);
6815
6816         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6817
6818         if (!err)
6819                 return 0;
6820
6821         /* other failures */
6822         if (err != -EIO)
6823                 return err;
6824
6825         /* MSI test failed, go back to INTx mode */
6826         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6827                "switching to INTx mode. Please report this failure to "
6828                "the PCI maintainer and include system chipset information.\n",
6829                        tp->dev->name);
6830
6831         free_irq(tp->pdev->irq, dev);
6832         pci_disable_msi(tp->pdev);
6833
6834         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6835
6836         err = tg3_request_irq(tp);
6837         if (err)
6838                 return err;
6839
6840         /* Need to reset the chip because the MSI cycle may have terminated
6841          * with Master Abort.
6842          */
6843         tg3_full_lock(tp, 1);
6844
6845         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6846         err = tg3_init_hw(tp, 1);
6847
6848         tg3_full_unlock(tp);
6849
6850         if (err)
6851                 free_irq(tp->pdev->irq, dev);
6852
6853         return err;
6854 }
6855
6856 static int tg3_open(struct net_device *dev)
6857 {
6858         struct tg3 *tp = netdev_priv(dev);
6859         int err;
6860
6861         tg3_full_lock(tp, 0);
6862
6863         err = tg3_set_power_state(tp, PCI_D0);
6864         if (err)
6865                 return err;
6866
6867         tg3_disable_ints(tp);
6868         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6869
6870         tg3_full_unlock(tp);
6871
6872         /* The placement of this call is tied
6873          * to the setup and use of Host TX descriptors.
6874          */
6875         err = tg3_alloc_consistent(tp);
6876         if (err)
6877                 return err;
6878
6879         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6880             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6881             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6882             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6883               (tp->pdev_peer == tp->pdev))) {
6884                 /* All MSI supporting chips should support tagged
6885                  * status.  Assert that this is the case.
6886                  */
6887                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6888                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6889                                "Not using MSI.\n", tp->dev->name);
6890                 } else if (pci_enable_msi(tp->pdev) == 0) {
6891                         u32 msi_mode;
6892
6893                         msi_mode = tr32(MSGINT_MODE);
6894                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6895                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6896                 }
6897         }
6898         err = tg3_request_irq(tp);
6899
6900         if (err) {
6901                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6902                         pci_disable_msi(tp->pdev);
6903                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6904                 }
6905                 tg3_free_consistent(tp);
6906                 return err;
6907         }
6908
6909         tg3_full_lock(tp, 0);
6910
6911         err = tg3_init_hw(tp, 1);
6912         if (err) {
6913                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6914                 tg3_free_rings(tp);
6915         } else {
6916                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6917                         tp->timer_offset = HZ;
6918                 else
6919                         tp->timer_offset = HZ / 10;
6920
6921                 BUG_ON(tp->timer_offset > HZ);
6922                 tp->timer_counter = tp->timer_multiplier =
6923                         (HZ / tp->timer_offset);
6924                 tp->asf_counter = tp->asf_multiplier =
6925                         ((HZ / tp->timer_offset) * 2);
6926
6927                 init_timer(&tp->timer);
6928                 tp->timer.expires = jiffies + tp->timer_offset;
6929                 tp->timer.data = (unsigned long) tp;
6930                 tp->timer.function = tg3_timer;
6931         }
6932
6933         tg3_full_unlock(tp);
6934
6935         if (err) {
6936                 free_irq(tp->pdev->irq, dev);
6937                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6938                         pci_disable_msi(tp->pdev);
6939                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6940                 }
6941                 tg3_free_consistent(tp);
6942                 return err;
6943         }
6944
6945         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6946                 err = tg3_test_msi(tp);
6947
6948                 if (err) {
6949                         tg3_full_lock(tp, 0);
6950
6951                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6952                                 pci_disable_msi(tp->pdev);
6953                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6954                         }
6955                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6956                         tg3_free_rings(tp);
6957                         tg3_free_consistent(tp);
6958
6959                         tg3_full_unlock(tp);
6960
6961                         return err;
6962                 }
6963
6964                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6965                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6966                                 u32 val = tr32(0x7c04);
6967
6968                                 tw32(0x7c04, val | (1 << 29));
6969                         }
6970                 }
6971         }
6972
6973         tg3_full_lock(tp, 0);
6974
6975         add_timer(&tp->timer);
6976         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6977         tg3_enable_ints(tp);
6978
6979         tg3_full_unlock(tp);
6980
6981         netif_start_queue(dev);
6982
6983         return 0;
6984 }
6985
6986 #if 0
6987 /*static*/ void tg3_dump_state(struct tg3 *tp)
6988 {
6989         u32 val32, val32_2, val32_3, val32_4, val32_5;
6990         u16 val16;
6991         int i;
6992
6993         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6994         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6995         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6996                val16, val32);
6997
6998         /* MAC block */
6999         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7000                tr32(MAC_MODE), tr32(MAC_STATUS));
7001         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7002                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7003         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7004                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7005         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7006                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7007
7008         /* Send data initiator control block */
7009         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7010                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7011         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7012                tr32(SNDDATAI_STATSCTRL));
7013
7014         /* Send data completion control block */
7015         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7016
7017         /* Send BD ring selector block */
7018         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7019                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7020
7021         /* Send BD initiator control block */
7022         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7023                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7024
7025         /* Send BD completion control block */
7026         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7027
7028         /* Receive list placement control block */
7029         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7030                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7031         printk("       RCVLPC_STATSCTRL[%08x]\n",
7032                tr32(RCVLPC_STATSCTRL));
7033
7034         /* Receive data and receive BD initiator control block */
7035         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7036                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7037
7038         /* Receive data completion control block */
7039         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7040                tr32(RCVDCC_MODE));
7041
7042         /* Receive BD initiator control block */
7043         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7044                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7045
7046         /* Receive BD completion control block */
7047         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7048                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7049
7050         /* Receive list selector control block */
7051         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7052                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7053
7054         /* Mbuf cluster free block */
7055         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7056                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7057
7058         /* Host coalescing control block */
7059         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7060                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7061         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7062                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7063                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7064         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7065                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7066                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7067         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7068                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7069         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7070                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7071
7072         /* Memory arbiter control block */
7073         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7074                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7075
7076         /* Buffer manager control block */
7077         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7078                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7079         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7080                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7081         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7082                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7083                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7084                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7085
7086         /* Read DMA control block */
7087         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7088                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7089
7090         /* Write DMA control block */
7091         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7092                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7093
7094         /* DMA completion block */
7095         printk("DEBUG: DMAC_MODE[%08x]\n",
7096                tr32(DMAC_MODE));
7097
7098         /* GRC block */
7099         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7100                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7101         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7102                tr32(GRC_LOCAL_CTRL));
7103
7104         /* TG3_BDINFOs */
7105         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7106                tr32(RCVDBDI_JUMBO_BD + 0x0),
7107                tr32(RCVDBDI_JUMBO_BD + 0x4),
7108                tr32(RCVDBDI_JUMBO_BD + 0x8),
7109                tr32(RCVDBDI_JUMBO_BD + 0xc));
7110         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7111                tr32(RCVDBDI_STD_BD + 0x0),
7112                tr32(RCVDBDI_STD_BD + 0x4),
7113                tr32(RCVDBDI_STD_BD + 0x8),
7114                tr32(RCVDBDI_STD_BD + 0xc));
7115         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7116                tr32(RCVDBDI_MINI_BD + 0x0),
7117                tr32(RCVDBDI_MINI_BD + 0x4),
7118                tr32(RCVDBDI_MINI_BD + 0x8),
7119                tr32(RCVDBDI_MINI_BD + 0xc));
7120
7121         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7122         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7123         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7124         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7125         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7126                val32, val32_2, val32_3, val32_4);
7127
7128         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7129         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7130         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7131         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7132         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7133                val32, val32_2, val32_3, val32_4);
7134
7135         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7136         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7137         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7138         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7139         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7140         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7141                val32, val32_2, val32_3, val32_4, val32_5);
7142
7143         /* SW status block */
7144         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7145                tp->hw_status->status,
7146                tp->hw_status->status_tag,
7147                tp->hw_status->rx_jumbo_consumer,
7148                tp->hw_status->rx_consumer,
7149                tp->hw_status->rx_mini_consumer,
7150                tp->hw_status->idx[0].rx_producer,
7151                tp->hw_status->idx[0].tx_consumer);
7152
7153         /* SW statistics block */
7154         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7155                ((u32 *)tp->hw_stats)[0],
7156                ((u32 *)tp->hw_stats)[1],
7157                ((u32 *)tp->hw_stats)[2],
7158                ((u32 *)tp->hw_stats)[3]);
7159
7160         /* Mailboxes */
7161         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7162                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7163                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7164                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7165                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7166
7167         /* NIC side send descriptors. */
7168         for (i = 0; i < 6; i++) {
7169                 unsigned long txd;
7170
7171                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7172                         + (i * sizeof(struct tg3_tx_buffer_desc));
7173                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7174                        i,
7175                        readl(txd + 0x0), readl(txd + 0x4),
7176                        readl(txd + 0x8), readl(txd + 0xc));
7177         }
7178
7179         /* NIC side RX descriptors. */
7180         for (i = 0; i < 6; i++) {
7181                 unsigned long rxd;
7182
7183                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7184                         + (i * sizeof(struct tg3_rx_buffer_desc));
7185                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7186                        i,
7187                        readl(rxd + 0x0), readl(rxd + 0x4),
7188                        readl(rxd + 0x8), readl(rxd + 0xc));
7189                 rxd += (4 * sizeof(u32));
7190                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7191                        i,
7192                        readl(rxd + 0x0), readl(rxd + 0x4),
7193                        readl(rxd + 0x8), readl(rxd + 0xc));
7194         }
7195
7196         for (i = 0; i < 6; i++) {
7197                 unsigned long rxd;
7198
7199                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7200                         + (i * sizeof(struct tg3_rx_buffer_desc));
7201                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7202                        i,
7203                        readl(rxd + 0x0), readl(rxd + 0x4),
7204                        readl(rxd + 0x8), readl(rxd + 0xc));
7205                 rxd += (4 * sizeof(u32));
7206                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7207                        i,
7208                        readl(rxd + 0x0), readl(rxd + 0x4),
7209                        readl(rxd + 0x8), readl(rxd + 0xc));
7210         }
7211 }
7212 #endif
7213
7214 static struct net_device_stats *tg3_get_stats(struct net_device *);
7215 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7216
7217 static int tg3_close(struct net_device *dev)
7218 {
7219         struct tg3 *tp = netdev_priv(dev);
7220
7221         /* Calling flush_scheduled_work() may deadlock because
7222          * linkwatch_event() may be on the workqueue and it will try to get
7223          * the rtnl_lock which we are holding.
7224          */
7225         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7226                 msleep(1);
7227
7228         netif_stop_queue(dev);
7229
7230         del_timer_sync(&tp->timer);
7231
7232         tg3_full_lock(tp, 1);
7233 #if 0
7234         tg3_dump_state(tp);
7235 #endif
7236
7237         tg3_disable_ints(tp);
7238
7239         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7240         tg3_free_rings(tp);
7241         tp->tg3_flags &=
7242                 ~(TG3_FLAG_INIT_COMPLETE |
7243                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7244
7245         tg3_full_unlock(tp);
7246
7247         free_irq(tp->pdev->irq, dev);
7248         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7249                 pci_disable_msi(tp->pdev);
7250                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7251         }
7252
7253         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7254                sizeof(tp->net_stats_prev));
7255         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7256                sizeof(tp->estats_prev));
7257
7258         tg3_free_consistent(tp);
7259
7260         tg3_set_power_state(tp, PCI_D3hot);
7261
7262         netif_carrier_off(tp->dev);
7263
7264         return 0;
7265 }
7266
7267 static inline unsigned long get_stat64(tg3_stat64_t *val)
7268 {
7269         unsigned long ret;
7270
7271 #if (BITS_PER_LONG == 32)
7272         ret = val->low;
7273 #else
7274         ret = ((u64)val->high << 32) | ((u64)val->low);
7275 #endif
7276         return ret;
7277 }
7278
7279 static unsigned long calc_crc_errors(struct tg3 *tp)
7280 {
7281         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7282
7283         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7284             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7285              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7286                 u32 val;
7287
7288                 spin_lock_bh(&tp->lock);
7289                 if (!tg3_readphy(tp, 0x1e, &val)) {
7290                         tg3_writephy(tp, 0x1e, val | 0x8000);
7291                         tg3_readphy(tp, 0x14, &val);
7292                 } else
7293                         val = 0;
7294                 spin_unlock_bh(&tp->lock);
7295
7296                 tp->phy_crc_errors += val;
7297
7298                 return tp->phy_crc_errors;
7299         }
7300
7301         return get_stat64(&hw_stats->rx_fcs_errors);
7302 }
7303
7304 #define ESTAT_ADD(member) \
7305         estats->member =        old_estats->member + \
7306                                 get_stat64(&hw_stats->member)
7307
7308 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7309 {
7310         struct tg3_ethtool_stats *estats = &tp->estats;
7311         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7312         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7313
7314         if (!hw_stats)
7315                 return old_estats;
7316
7317         ESTAT_ADD(rx_octets);
7318         ESTAT_ADD(rx_fragments);
7319         ESTAT_ADD(rx_ucast_packets);
7320         ESTAT_ADD(rx_mcast_packets);
7321         ESTAT_ADD(rx_bcast_packets);
7322         ESTAT_ADD(rx_fcs_errors);
7323         ESTAT_ADD(rx_align_errors);
7324         ESTAT_ADD(rx_xon_pause_rcvd);
7325         ESTAT_ADD(rx_xoff_pause_rcvd);
7326         ESTAT_ADD(rx_mac_ctrl_rcvd);
7327         ESTAT_ADD(rx_xoff_entered);
7328         ESTAT_ADD(rx_frame_too_long_errors);
7329         ESTAT_ADD(rx_jabbers);
7330         ESTAT_ADD(rx_undersize_packets);
7331         ESTAT_ADD(rx_in_length_errors);
7332         ESTAT_ADD(rx_out_length_errors);
7333         ESTAT_ADD(rx_64_or_less_octet_packets);
7334         ESTAT_ADD(rx_65_to_127_octet_packets);
7335         ESTAT_ADD(rx_128_to_255_octet_packets);
7336         ESTAT_ADD(rx_256_to_511_octet_packets);
7337         ESTAT_ADD(rx_512_to_1023_octet_packets);
7338         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7339         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7340         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7341         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7342         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7343
7344         ESTAT_ADD(tx_octets);
7345         ESTAT_ADD(tx_collisions);
7346         ESTAT_ADD(tx_xon_sent);
7347         ESTAT_ADD(tx_xoff_sent);
7348         ESTAT_ADD(tx_flow_control);
7349         ESTAT_ADD(tx_mac_errors);
7350         ESTAT_ADD(tx_single_collisions);
7351         ESTAT_ADD(tx_mult_collisions);
7352         ESTAT_ADD(tx_deferred);
7353         ESTAT_ADD(tx_excessive_collisions);
7354         ESTAT_ADD(tx_late_collisions);
7355         ESTAT_ADD(tx_collide_2times);
7356         ESTAT_ADD(tx_collide_3times);
7357         ESTAT_ADD(tx_collide_4times);
7358         ESTAT_ADD(tx_collide_5times);
7359         ESTAT_ADD(tx_collide_6times);
7360         ESTAT_ADD(tx_collide_7times);
7361         ESTAT_ADD(tx_collide_8times);
7362         ESTAT_ADD(tx_collide_9times);
7363         ESTAT_ADD(tx_collide_10times);
7364         ESTAT_ADD(tx_collide_11times);
7365         ESTAT_ADD(tx_collide_12times);
7366         ESTAT_ADD(tx_collide_13times);
7367         ESTAT_ADD(tx_collide_14times);
7368         ESTAT_ADD(tx_collide_15times);
7369         ESTAT_ADD(tx_ucast_packets);
7370         ESTAT_ADD(tx_mcast_packets);
7371         ESTAT_ADD(tx_bcast_packets);
7372         ESTAT_ADD(tx_carrier_sense_errors);
7373         ESTAT_ADD(tx_discards);
7374         ESTAT_ADD(tx_errors);
7375
7376         ESTAT_ADD(dma_writeq_full);
7377         ESTAT_ADD(dma_write_prioq_full);
7378         ESTAT_ADD(rxbds_empty);
7379         ESTAT_ADD(rx_discards);
7380         ESTAT_ADD(rx_errors);
7381         ESTAT_ADD(rx_threshold_hit);
7382
7383         ESTAT_ADD(dma_readq_full);
7384         ESTAT_ADD(dma_read_prioq_full);
7385         ESTAT_ADD(tx_comp_queue_full);
7386
7387         ESTAT_ADD(ring_set_send_prod_index);
7388         ESTAT_ADD(ring_status_update);
7389         ESTAT_ADD(nic_irqs);
7390         ESTAT_ADD(nic_avoided_irqs);
7391         ESTAT_ADD(nic_tx_threshold_hit);
7392
7393         return estats;
7394 }
7395
7396 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7397 {
7398         struct tg3 *tp = netdev_priv(dev);
7399         struct net_device_stats *stats = &tp->net_stats;
7400         struct net_device_stats *old_stats = &tp->net_stats_prev;
7401         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7402
7403         if (!hw_stats)
7404                 return old_stats;
7405
7406         stats->rx_packets = old_stats->rx_packets +
7407                 get_stat64(&hw_stats->rx_ucast_packets) +
7408                 get_stat64(&hw_stats->rx_mcast_packets) +
7409                 get_stat64(&hw_stats->rx_bcast_packets);
7410                 
7411         stats->tx_packets = old_stats->tx_packets +
7412                 get_stat64(&hw_stats->tx_ucast_packets) +
7413                 get_stat64(&hw_stats->tx_mcast_packets) +
7414                 get_stat64(&hw_stats->tx_bcast_packets);
7415
7416         stats->rx_bytes = old_stats->rx_bytes +
7417                 get_stat64(&hw_stats->rx_octets);
7418         stats->tx_bytes = old_stats->tx_bytes +
7419                 get_stat64(&hw_stats->tx_octets);
7420
7421         stats->rx_errors = old_stats->rx_errors +
7422                 get_stat64(&hw_stats->rx_errors);
7423         stats->tx_errors = old_stats->tx_errors +
7424                 get_stat64(&hw_stats->tx_errors) +
7425                 get_stat64(&hw_stats->tx_mac_errors) +
7426                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7427                 get_stat64(&hw_stats->tx_discards);
7428
7429         stats->multicast = old_stats->multicast +
7430                 get_stat64(&hw_stats->rx_mcast_packets);
7431         stats->collisions = old_stats->collisions +
7432                 get_stat64(&hw_stats->tx_collisions);
7433
7434         stats->rx_length_errors = old_stats->rx_length_errors +
7435                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7436                 get_stat64(&hw_stats->rx_undersize_packets);
7437
7438         stats->rx_over_errors = old_stats->rx_over_errors +
7439                 get_stat64(&hw_stats->rxbds_empty);
7440         stats->rx_frame_errors = old_stats->rx_frame_errors +
7441                 get_stat64(&hw_stats->rx_align_errors);
7442         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7443                 get_stat64(&hw_stats->tx_discards);
7444         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7445                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7446
7447         stats->rx_crc_errors = old_stats->rx_crc_errors +
7448                 calc_crc_errors(tp);
7449
7450         stats->rx_missed_errors = old_stats->rx_missed_errors +
7451                 get_stat64(&hw_stats->rx_discards);
7452
7453         return stats;
7454 }
7455
7456 static inline u32 calc_crc(unsigned char *buf, int len)
7457 {
7458         u32 reg;
7459         u32 tmp;
7460         int j, k;
7461
7462         reg = 0xffffffff;
7463
7464         for (j = 0; j < len; j++) {
7465                 reg ^= buf[j];
7466
7467                 for (k = 0; k < 8; k++) {
7468                         tmp = reg & 0x01;
7469
7470                         reg >>= 1;
7471
7472                         if (tmp) {
7473                                 reg ^= 0xedb88320;
7474                         }
7475                 }
7476         }
7477
7478         return ~reg;
7479 }
7480
7481 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7482 {
7483         /* accept or reject all multicast frames */
7484         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7485         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7486         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7487         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7488 }
7489
7490 static void __tg3_set_rx_mode(struct net_device *dev)
7491 {
7492         struct tg3 *tp = netdev_priv(dev);
7493         u32 rx_mode;
7494
7495         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7496                                   RX_MODE_KEEP_VLAN_TAG);
7497
7498         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7499          * flag clear.
7500          */
7501 #if TG3_VLAN_TAG_USED
7502         if (!tp->vlgrp &&
7503             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7504                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7505 #else
7506         /* By definition, VLAN is disabled always in this
7507          * case.
7508          */
7509         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7510                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7511 #endif
7512
7513         if (dev->flags & IFF_PROMISC) {
7514                 /* Promiscuous mode. */
7515                 rx_mode |= RX_MODE_PROMISC;
7516         } else if (dev->flags & IFF_ALLMULTI) {
7517                 /* Accept all multicast. */
7518                 tg3_set_multi (tp, 1);
7519         } else if (dev->mc_count < 1) {
7520                 /* Reject all multicast. */
7521                 tg3_set_multi (tp, 0);
7522         } else {
7523                 /* Accept one or more multicast(s). */
7524                 struct dev_mc_list *mclist;
7525                 unsigned int i;
7526                 u32 mc_filter[4] = { 0, };
7527                 u32 regidx;
7528                 u32 bit;
7529                 u32 crc;
7530
7531                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7532                      i++, mclist = mclist->next) {
7533
7534                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7535                         bit = ~crc & 0x7f;
7536                         regidx = (bit & 0x60) >> 5;
7537                         bit &= 0x1f;
7538                         mc_filter[regidx] |= (1 << bit);
7539                 }
7540
7541                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7542                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7543                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7544                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7545         }
7546
7547         if (rx_mode != tp->rx_mode) {
7548                 tp->rx_mode = rx_mode;
7549                 tw32_f(MAC_RX_MODE, rx_mode);
7550                 udelay(10);
7551         }
7552 }
7553
7554 static void tg3_set_rx_mode(struct net_device *dev)
7555 {
7556         struct tg3 *tp = netdev_priv(dev);
7557
7558         if (!netif_running(dev))
7559                 return;
7560
7561         tg3_full_lock(tp, 0);
7562         __tg3_set_rx_mode(dev);
7563         tg3_full_unlock(tp);
7564 }
7565
7566 #define TG3_REGDUMP_LEN         (32 * 1024)
7567
7568 static int tg3_get_regs_len(struct net_device *dev)
7569 {
7570         return TG3_REGDUMP_LEN;
7571 }
7572
7573 static void tg3_get_regs(struct net_device *dev,
7574                 struct ethtool_regs *regs, void *_p)
7575 {
7576         u32 *p = _p;
7577         struct tg3 *tp = netdev_priv(dev);
7578         u8 *orig_p = _p;
7579         int i;
7580
7581         regs->version = 0;
7582
7583         memset(p, 0, TG3_REGDUMP_LEN);
7584
7585         if (tp->link_config.phy_is_low_power)
7586                 return;
7587
7588         tg3_full_lock(tp, 0);
7589
7590 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7591 #define GET_REG32_LOOP(base,len)                \
7592 do {    p = (u32 *)(orig_p + (base));           \
7593         for (i = 0; i < len; i += 4)            \
7594                 __GET_REG32((base) + i);        \
7595 } while (0)
7596 #define GET_REG32_1(reg)                        \
7597 do {    p = (u32 *)(orig_p + (reg));            \
7598         __GET_REG32((reg));                     \
7599 } while (0)
7600
7601         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7602         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7603         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7604         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7605         GET_REG32_1(SNDDATAC_MODE);
7606         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7607         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7608         GET_REG32_1(SNDBDC_MODE);
7609         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7610         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7611         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7612         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7613         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7614         GET_REG32_1(RCVDCC_MODE);
7615         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7616         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7617         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7618         GET_REG32_1(MBFREE_MODE);
7619         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7620         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7621         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7622         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7623         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7624         GET_REG32_1(RX_CPU_MODE);
7625         GET_REG32_1(RX_CPU_STATE);
7626         GET_REG32_1(RX_CPU_PGMCTR);
7627         GET_REG32_1(RX_CPU_HWBKPT);
7628         GET_REG32_1(TX_CPU_MODE);
7629         GET_REG32_1(TX_CPU_STATE);
7630         GET_REG32_1(TX_CPU_PGMCTR);
7631         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7632         GET_REG32_LOOP(FTQ_RESET, 0x120);
7633         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7634         GET_REG32_1(DMAC_MODE);
7635         GET_REG32_LOOP(GRC_MODE, 0x4c);
7636         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7637                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7638
7639 #undef __GET_REG32
7640 #undef GET_REG32_LOOP
7641 #undef GET_REG32_1
7642
7643         tg3_full_unlock(tp);
7644 }
7645
7646 static int tg3_get_eeprom_len(struct net_device *dev)
7647 {
7648         struct tg3 *tp = netdev_priv(dev);
7649
7650         return tp->nvram_size;
7651 }
7652
7653 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7654 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7655
7656 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7657 {
7658         struct tg3 *tp = netdev_priv(dev);
7659         int ret;
7660         u8  *pd;
7661         u32 i, offset, len, val, b_offset, b_count;
7662
7663         if (tp->link_config.phy_is_low_power)
7664                 return -EAGAIN;
7665
7666         offset = eeprom->offset;
7667         len = eeprom->len;
7668         eeprom->len = 0;
7669
7670         eeprom->magic = TG3_EEPROM_MAGIC;
7671
7672         if (offset & 3) {
7673                 /* adjustments to start on required 4 byte boundary */
7674                 b_offset = offset & 3;
7675                 b_count = 4 - b_offset;
7676                 if (b_count > len) {
7677                         /* i.e. offset=1 len=2 */
7678                         b_count = len;
7679                 }
7680                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7681                 if (ret)
7682                         return ret;
7683                 val = cpu_to_le32(val);
7684                 memcpy(data, ((char*)&val) + b_offset, b_count);
7685                 len -= b_count;
7686                 offset += b_count;
7687                 eeprom->len += b_count;
7688         }
7689
7690         /* read bytes upto the last 4 byte boundary */
7691         pd = &data[eeprom->len];
7692         for (i = 0; i < (len - (len & 3)); i += 4) {
7693                 ret = tg3_nvram_read(tp, offset + i, &val);
7694                 if (ret) {
7695                         eeprom->len += i;
7696                         return ret;
7697                 }
7698                 val = cpu_to_le32(val);
7699                 memcpy(pd + i, &val, 4);
7700         }
7701         eeprom->len += i;
7702
7703         if (len & 3) {
7704                 /* read last bytes not ending on 4 byte boundary */
7705                 pd = &data[eeprom->len];
7706                 b_count = len & 3;
7707                 b_offset = offset + len - b_count;
7708                 ret = tg3_nvram_read(tp, b_offset, &val);
7709                 if (ret)
7710                         return ret;
7711                 val = cpu_to_le32(val);
7712                 memcpy(pd, ((char*)&val), b_count);
7713                 eeprom->len += b_count;
7714         }
7715         return 0;
7716 }
7717
7718 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7719
7720 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7721 {
7722         struct tg3 *tp = netdev_priv(dev);
7723         int ret;
7724         u32 offset, len, b_offset, odd_len, start, end;
7725         u8 *buf;
7726
7727         if (tp->link_config.phy_is_low_power)
7728                 return -EAGAIN;
7729
7730         if (eeprom->magic != TG3_EEPROM_MAGIC)
7731                 return -EINVAL;
7732
7733         offset = eeprom->offset;
7734         len = eeprom->len;
7735
7736         if ((b_offset = (offset & 3))) {
7737                 /* adjustments to start on required 4 byte boundary */
7738                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7739                 if (ret)
7740                         return ret;
7741                 start = cpu_to_le32(start);
7742                 len += b_offset;
7743                 offset &= ~3;
7744                 if (len < 4)
7745                         len = 4;
7746         }
7747
7748         odd_len = 0;
7749         if (len & 3) {
7750                 /* adjustments to end on required 4 byte boundary */
7751                 odd_len = 1;
7752                 len = (len + 3) & ~3;
7753                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7754                 if (ret)
7755                         return ret;
7756                 end = cpu_to_le32(end);
7757         }
7758
7759         buf = data;
7760         if (b_offset || odd_len) {
7761                 buf = kmalloc(len, GFP_KERNEL);
7762                 if (buf == 0)
7763                         return -ENOMEM;
7764                 if (b_offset)
7765                         memcpy(buf, &start, 4);
7766                 if (odd_len)
7767                         memcpy(buf+len-4, &end, 4);
7768                 memcpy(buf + b_offset, data, eeprom->len);
7769         }
7770
7771         ret = tg3_nvram_write_block(tp, offset, len, buf);
7772
7773         if (buf != data)
7774                 kfree(buf);
7775
7776         return ret;
7777 }
7778
7779 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7780 {
7781         struct tg3 *tp = netdev_priv(dev);
7782   
7783         cmd->supported = (SUPPORTED_Autoneg);
7784
7785         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7786                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7787                                    SUPPORTED_1000baseT_Full);
7788
7789         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7790                 cmd->supported |= (SUPPORTED_100baseT_Half |
7791                                   SUPPORTED_100baseT_Full |
7792                                   SUPPORTED_10baseT_Half |
7793                                   SUPPORTED_10baseT_Full |
7794                                   SUPPORTED_MII);
7795                 cmd->port = PORT_TP;
7796         } else {
7797                 cmd->supported |= SUPPORTED_FIBRE;
7798                 cmd->port = PORT_FIBRE;
7799         }
7800   
7801         cmd->advertising = tp->link_config.advertising;
7802         if (netif_running(dev)) {
7803                 cmd->speed = tp->link_config.active_speed;
7804                 cmd->duplex = tp->link_config.active_duplex;
7805         }
7806         cmd->phy_address = PHY_ADDR;
7807         cmd->transceiver = 0;
7808         cmd->autoneg = tp->link_config.autoneg;
7809         cmd->maxtxpkt = 0;
7810         cmd->maxrxpkt = 0;
7811         return 0;
7812 }
7813   
7814 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7815 {
7816         struct tg3 *tp = netdev_priv(dev);
7817   
7818         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7819                 /* These are the only valid advertisement bits allowed.  */
7820                 if (cmd->autoneg == AUTONEG_ENABLE &&
7821                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7822                                           ADVERTISED_1000baseT_Full |
7823                                           ADVERTISED_Autoneg |
7824                                           ADVERTISED_FIBRE)))
7825                         return -EINVAL;
7826                 /* Fiber can only do SPEED_1000.  */
7827                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7828                          (cmd->speed != SPEED_1000))
7829                         return -EINVAL;
7830         /* Copper cannot force SPEED_1000.  */
7831         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7832                    (cmd->speed == SPEED_1000))
7833                 return -EINVAL;
7834         else if ((cmd->speed == SPEED_1000) &&
7835                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7836                 return -EINVAL;
7837
7838         tg3_full_lock(tp, 0);
7839
7840         tp->link_config.autoneg = cmd->autoneg;
7841         if (cmd->autoneg == AUTONEG_ENABLE) {
7842                 tp->link_config.advertising = cmd->advertising;
7843                 tp->link_config.speed = SPEED_INVALID;
7844                 tp->link_config.duplex = DUPLEX_INVALID;
7845         } else {
7846                 tp->link_config.advertising = 0;
7847                 tp->link_config.speed = cmd->speed;
7848                 tp->link_config.duplex = cmd->duplex;
7849         }
7850   
7851         if (netif_running(dev))
7852                 tg3_setup_phy(tp, 1);
7853
7854         tg3_full_unlock(tp);
7855   
7856         return 0;
7857 }
7858   
7859 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7860 {
7861         struct tg3 *tp = netdev_priv(dev);
7862   
7863         strcpy(info->driver, DRV_MODULE_NAME);
7864         strcpy(info->version, DRV_MODULE_VERSION);
7865         strcpy(info->fw_version, tp->fw_ver);
7866         strcpy(info->bus_info, pci_name(tp->pdev));
7867 }
7868   
7869 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7870 {
7871         struct tg3 *tp = netdev_priv(dev);
7872   
7873         wol->supported = WAKE_MAGIC;
7874         wol->wolopts = 0;
7875         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7876                 wol->wolopts = WAKE_MAGIC;
7877         memset(&wol->sopass, 0, sizeof(wol->sopass));
7878 }
7879   
7880 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7881 {
7882         struct tg3 *tp = netdev_priv(dev);
7883   
7884         if (wol->wolopts & ~WAKE_MAGIC)
7885                 return -EINVAL;
7886         if ((wol->wolopts & WAKE_MAGIC) &&
7887             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7888             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7889                 return -EINVAL;
7890   
7891         spin_lock_bh(&tp->lock);
7892         if (wol->wolopts & WAKE_MAGIC)
7893                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7894         else
7895                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7896         spin_unlock_bh(&tp->lock);
7897   
7898         return 0;
7899 }
7900   
7901 static u32 tg3_get_msglevel(struct net_device *dev)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904         return tp->msg_enable;
7905 }
7906   
7907 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7908 {
7909         struct tg3 *tp = netdev_priv(dev);
7910         tp->msg_enable = value;
7911 }
7912   
7913 #if TG3_TSO_SUPPORT != 0
7914 static int tg3_set_tso(struct net_device *dev, u32 value)
7915 {
7916         struct tg3 *tp = netdev_priv(dev);
7917
7918         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7919                 if (value)
7920                         return -EINVAL;
7921                 return 0;
7922         }
7923         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7924                 if (value)
7925                         dev->features |= NETIF_F_TSO6;
7926                 else
7927                         dev->features &= ~NETIF_F_TSO6;
7928         }
7929         return ethtool_op_set_tso(dev, value);
7930 }
7931 #endif
7932   
7933 static int tg3_nway_reset(struct net_device *dev)
7934 {
7935         struct tg3 *tp = netdev_priv(dev);
7936         u32 bmcr;
7937         int r;
7938   
7939         if (!netif_running(dev))
7940                 return -EAGAIN;
7941
7942         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7943                 return -EINVAL;
7944
7945         spin_lock_bh(&tp->lock);
7946         r = -EINVAL;
7947         tg3_readphy(tp, MII_BMCR, &bmcr);
7948         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7949             ((bmcr & BMCR_ANENABLE) ||
7950              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7951                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7952                                            BMCR_ANENABLE);
7953                 r = 0;
7954         }
7955         spin_unlock_bh(&tp->lock);
7956   
7957         return r;
7958 }
7959   
7960 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7961 {
7962         struct tg3 *tp = netdev_priv(dev);
7963   
7964         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7965         ering->rx_mini_max_pending = 0;
7966         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7967                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7968         else
7969                 ering->rx_jumbo_max_pending = 0;
7970
7971         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7972
7973         ering->rx_pending = tp->rx_pending;
7974         ering->rx_mini_pending = 0;
7975         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7976                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7977         else
7978                 ering->rx_jumbo_pending = 0;
7979
7980         ering->tx_pending = tp->tx_pending;
7981 }
7982   
7983 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7984 {
7985         struct tg3 *tp = netdev_priv(dev);
7986         int irq_sync = 0, err = 0;
7987   
7988         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7989             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7990             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7991                 return -EINVAL;
7992   
7993         if (netif_running(dev)) {
7994                 tg3_netif_stop(tp);
7995                 irq_sync = 1;
7996         }
7997
7998         tg3_full_lock(tp, irq_sync);
7999   
8000         tp->rx_pending = ering->rx_pending;
8001
8002         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8003             tp->rx_pending > 63)
8004                 tp->rx_pending = 63;
8005         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8006         tp->tx_pending = ering->tx_pending;
8007
8008         if (netif_running(dev)) {
8009                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8010                 err = tg3_restart_hw(tp, 1);
8011                 if (!err)
8012                         tg3_netif_start(tp);
8013         }
8014
8015         tg3_full_unlock(tp);
8016   
8017         return err;
8018 }
8019   
8020 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8021 {
8022         struct tg3 *tp = netdev_priv(dev);
8023   
8024         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8025         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8026         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8027 }
8028   
8029 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8030 {
8031         struct tg3 *tp = netdev_priv(dev);
8032         int irq_sync = 0, err = 0;
8033   
8034         if (netif_running(dev)) {
8035                 tg3_netif_stop(tp);
8036                 irq_sync = 1;
8037         }
8038
8039         tg3_full_lock(tp, irq_sync);
8040
8041         if (epause->autoneg)
8042                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8043         else
8044                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8045         if (epause->rx_pause)
8046                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8047         else
8048                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8049         if (epause->tx_pause)
8050                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8051         else
8052                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8053
8054         if (netif_running(dev)) {
8055                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8056                 err = tg3_restart_hw(tp, 1);
8057                 if (!err)
8058                         tg3_netif_start(tp);
8059         }
8060
8061         tg3_full_unlock(tp);
8062   
8063         return err;
8064 }
8065   
8066 static u32 tg3_get_rx_csum(struct net_device *dev)
8067 {
8068         struct tg3 *tp = netdev_priv(dev);
8069         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8070 }
8071   
8072 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8073 {
8074         struct tg3 *tp = netdev_priv(dev);
8075   
8076         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8077                 if (data != 0)
8078                         return -EINVAL;
8079                 return 0;
8080         }
8081   
8082         spin_lock_bh(&tp->lock);
8083         if (data)
8084                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8085         else
8086                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8087         spin_unlock_bh(&tp->lock);
8088   
8089         return 0;
8090 }
8091   
8092 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8093 {
8094         struct tg3 *tp = netdev_priv(dev);
8095   
8096         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8097                 if (data != 0)
8098                         return -EINVAL;
8099                 return 0;
8100         }
8101   
8102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8104                 ethtool_op_set_tx_hw_csum(dev, data);
8105         else
8106                 ethtool_op_set_tx_csum(dev, data);
8107
8108         return 0;
8109 }
8110
8111 static int tg3_get_stats_count (struct net_device *dev)
8112 {
8113         return TG3_NUM_STATS;
8114 }
8115
8116 static int tg3_get_test_count (struct net_device *dev)
8117 {
8118         return TG3_NUM_TEST;
8119 }
8120
8121 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8122 {
8123         switch (stringset) {
8124         case ETH_SS_STATS:
8125                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8126                 break;
8127         case ETH_SS_TEST:
8128                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8129                 break;
8130         default:
8131                 WARN_ON(1);     /* we need a WARN() */
8132                 break;
8133         }
8134 }
8135
8136 static int tg3_phys_id(struct net_device *dev, u32 data)
8137 {
8138         struct tg3 *tp = netdev_priv(dev);
8139         int i;
8140
8141         if (!netif_running(tp->dev))
8142                 return -EAGAIN;
8143
8144         if (data == 0)
8145                 data = 2;
8146
8147         for (i = 0; i < (data * 2); i++) {
8148                 if ((i % 2) == 0)
8149                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8150                                            LED_CTRL_1000MBPS_ON |
8151                                            LED_CTRL_100MBPS_ON |
8152                                            LED_CTRL_10MBPS_ON |
8153                                            LED_CTRL_TRAFFIC_OVERRIDE |
8154                                            LED_CTRL_TRAFFIC_BLINK |
8155                                            LED_CTRL_TRAFFIC_LED);
8156         
8157                 else
8158                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8159                                            LED_CTRL_TRAFFIC_OVERRIDE);
8160
8161                 if (msleep_interruptible(500))
8162                         break;
8163         }
8164         tw32(MAC_LED_CTRL, tp->led_ctrl);
8165         return 0;
8166 }
8167
8168 static void tg3_get_ethtool_stats (struct net_device *dev,
8169                                    struct ethtool_stats *estats, u64 *tmp_stats)
8170 {
8171         struct tg3 *tp = netdev_priv(dev);
8172         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8173 }
8174
8175 #define NVRAM_TEST_SIZE 0x100
8176 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8177
8178 static int tg3_test_nvram(struct tg3 *tp)
8179 {
8180         u32 *buf, csum, magic;
8181         int i, j, err = 0, size;
8182
8183         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8184                 return -EIO;
8185
8186         if (magic == TG3_EEPROM_MAGIC)
8187                 size = NVRAM_TEST_SIZE;
8188         else if ((magic & 0xff000000) == 0xa5000000) {
8189                 if ((magic & 0xe00000) == 0x200000)
8190                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8191                 else
8192                         return 0;
8193         } else
8194                 return -EIO;
8195
8196         buf = kmalloc(size, GFP_KERNEL);
8197         if (buf == NULL)
8198                 return -ENOMEM;
8199
8200         err = -EIO;
8201         for (i = 0, j = 0; i < size; i += 4, j++) {
8202                 u32 val;
8203
8204                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8205                         break;
8206                 buf[j] = cpu_to_le32(val);
8207         }
8208         if (i < size)
8209                 goto out;
8210
8211         /* Selfboot format */
8212         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8213                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8214
8215                 for (i = 0; i < size; i++)
8216                         csum8 += buf8[i];
8217
8218                 if (csum8 == 0) {
8219                         err = 0;
8220                         goto out;
8221                 }
8222
8223                 err = -EIO;
8224                 goto out;
8225         }
8226
8227         /* Bootstrap checksum at offset 0x10 */
8228         csum = calc_crc((unsigned char *) buf, 0x10);
8229         if(csum != cpu_to_le32(buf[0x10/4]))
8230                 goto out;
8231
8232         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8233         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8234         if (csum != cpu_to_le32(buf[0xfc/4]))
8235                  goto out;
8236
8237         err = 0;
8238
8239 out:
8240         kfree(buf);
8241         return err;
8242 }
8243
8244 #define TG3_SERDES_TIMEOUT_SEC  2
8245 #define TG3_COPPER_TIMEOUT_SEC  6
8246
8247 static int tg3_test_link(struct tg3 *tp)
8248 {
8249         int i, max;
8250
8251         if (!netif_running(tp->dev))
8252                 return -ENODEV;
8253
8254         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8255                 max = TG3_SERDES_TIMEOUT_SEC;
8256         else
8257                 max = TG3_COPPER_TIMEOUT_SEC;
8258
8259         for (i = 0; i < max; i++) {
8260                 if (netif_carrier_ok(tp->dev))
8261                         return 0;
8262
8263                 if (msleep_interruptible(1000))
8264                         break;
8265         }
8266
8267         return -EIO;
8268 }
8269
8270 /* Only test the commonly used registers */
8271 static int tg3_test_registers(struct tg3 *tp)
8272 {
8273         int i, is_5705;
8274         u32 offset, read_mask, write_mask, val, save_val, read_val;
8275         static struct {
8276                 u16 offset;
8277                 u16 flags;
8278 #define TG3_FL_5705     0x1
8279 #define TG3_FL_NOT_5705 0x2
8280 #define TG3_FL_NOT_5788 0x4
8281                 u32 read_mask;
8282                 u32 write_mask;
8283         } reg_tbl[] = {
8284                 /* MAC Control Registers */
8285                 { MAC_MODE, TG3_FL_NOT_5705,
8286                         0x00000000, 0x00ef6f8c },
8287                 { MAC_MODE, TG3_FL_5705,
8288                         0x00000000, 0x01ef6b8c },
8289                 { MAC_STATUS, TG3_FL_NOT_5705,
8290                         0x03800107, 0x00000000 },
8291                 { MAC_STATUS, TG3_FL_5705,
8292                         0x03800100, 0x00000000 },
8293                 { MAC_ADDR_0_HIGH, 0x0000,
8294                         0x00000000, 0x0000ffff },
8295                 { MAC_ADDR_0_LOW, 0x0000,
8296                         0x00000000, 0xffffffff },
8297                 { MAC_RX_MTU_SIZE, 0x0000,
8298                         0x00000000, 0x0000ffff },
8299                 { MAC_TX_MODE, 0x0000,
8300                         0x00000000, 0x00000070 },
8301                 { MAC_TX_LENGTHS, 0x0000,
8302                         0x00000000, 0x00003fff },
8303                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8304                         0x00000000, 0x000007fc },
8305                 { MAC_RX_MODE, TG3_FL_5705,
8306                         0x00000000, 0x000007dc },
8307                 { MAC_HASH_REG_0, 0x0000,
8308                         0x00000000, 0xffffffff },
8309                 { MAC_HASH_REG_1, 0x0000,
8310                         0x00000000, 0xffffffff },
8311                 { MAC_HASH_REG_2, 0x0000,
8312                         0x00000000, 0xffffffff },
8313                 { MAC_HASH_REG_3, 0x0000,
8314                         0x00000000, 0xffffffff },
8315
8316                 /* Receive Data and Receive BD Initiator Control Registers. */
8317                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8318                         0x00000000, 0xffffffff },
8319                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8320                         0x00000000, 0xffffffff },
8321                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8322                         0x00000000, 0x00000003 },
8323                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8324                         0x00000000, 0xffffffff },
8325                 { RCVDBDI_STD_BD+0, 0x0000,
8326                         0x00000000, 0xffffffff },
8327                 { RCVDBDI_STD_BD+4, 0x0000,
8328                         0x00000000, 0xffffffff },
8329                 { RCVDBDI_STD_BD+8, 0x0000,
8330                         0x00000000, 0xffff0002 },
8331                 { RCVDBDI_STD_BD+0xc, 0x0000,
8332                         0x00000000, 0xffffffff },
8333         
8334                 /* Receive BD Initiator Control Registers. */
8335                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8336                         0x00000000, 0xffffffff },
8337                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8338                         0x00000000, 0x000003ff },
8339                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8340                         0x00000000, 0xffffffff },
8341         
8342                 /* Host Coalescing Control Registers. */
8343                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8344                         0x00000000, 0x00000004 },
8345                 { HOSTCC_MODE, TG3_FL_5705,
8346                         0x00000000, 0x000000f6 },
8347                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8348                         0x00000000, 0xffffffff },
8349                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8350                         0x00000000, 0x000003ff },
8351                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8352                         0x00000000, 0xffffffff },
8353                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8354                         0x00000000, 0x000003ff },
8355                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8356                         0x00000000, 0xffffffff },
8357                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8358                         0x00000000, 0x000000ff },
8359                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8360                         0x00000000, 0xffffffff },
8361                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8362                         0x00000000, 0x000000ff },
8363                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8364                         0x00000000, 0xffffffff },
8365                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8366                         0x00000000, 0xffffffff },
8367                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8368                         0x00000000, 0xffffffff },
8369                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8370                         0x00000000, 0x000000ff },
8371                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8372                         0x00000000, 0xffffffff },
8373                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8374                         0x00000000, 0x000000ff },
8375                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8376                         0x00000000, 0xffffffff },
8377                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8378                         0x00000000, 0xffffffff },
8379                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8380                         0x00000000, 0xffffffff },
8381                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8382                         0x00000000, 0xffffffff },
8383                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8384                         0x00000000, 0xffffffff },
8385                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8386                         0xffffffff, 0x00000000 },
8387                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8388                         0xffffffff, 0x00000000 },
8389
8390                 /* Buffer Manager Control Registers. */
8391                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8392                         0x00000000, 0x007fff80 },
8393                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8394                         0x00000000, 0x007fffff },
8395                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8396                         0x00000000, 0x0000003f },
8397                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8398                         0x00000000, 0x000001ff },
8399                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8400                         0x00000000, 0x000001ff },
8401                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8402                         0xffffffff, 0x00000000 },
8403                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8404                         0xffffffff, 0x00000000 },
8405         
8406                 /* Mailbox Registers */
8407                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8408                         0x00000000, 0x000001ff },
8409                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8410                         0x00000000, 0x000001ff },
8411                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8412                         0x00000000, 0x000007ff },
8413                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8414                         0x00000000, 0x000001ff },
8415
8416                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8417         };
8418
8419         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8420                 is_5705 = 1;
8421         else
8422                 is_5705 = 0;
8423
8424         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8425                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8426                         continue;
8427
8428                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8429                         continue;
8430
8431                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8432                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8433                         continue;
8434
8435                 offset = (u32) reg_tbl[i].offset;
8436                 read_mask = reg_tbl[i].read_mask;
8437                 write_mask = reg_tbl[i].write_mask;
8438
8439                 /* Save the original register content */
8440                 save_val = tr32(offset);
8441
8442                 /* Determine the read-only value. */
8443                 read_val = save_val & read_mask;
8444
8445                 /* Write zero to the register, then make sure the read-only bits
8446                  * are not changed and the read/write bits are all zeros.
8447                  */
8448                 tw32(offset, 0);
8449
8450                 val = tr32(offset);
8451
8452                 /* Test the read-only and read/write bits. */
8453                 if (((val & read_mask) != read_val) || (val & write_mask))
8454                         goto out;
8455
8456                 /* Write ones to all the bits defined by RdMask and WrMask, then
8457                  * make sure the read-only bits are not changed and the
8458                  * read/write bits are all ones.
8459                  */
8460                 tw32(offset, read_mask | write_mask);
8461
8462                 val = tr32(offset);
8463
8464                 /* Test the read-only bits. */
8465                 if ((val & read_mask) != read_val)
8466                         goto out;
8467
8468                 /* Test the read/write bits. */
8469                 if ((val & write_mask) != write_mask)
8470                         goto out;
8471
8472                 tw32(offset, save_val);
8473         }
8474
8475         return 0;
8476
8477 out:
8478         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8479         tw32(offset, save_val);
8480         return -EIO;
8481 }
8482
8483 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8484 {
8485         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8486         int i;
8487         u32 j;
8488
8489         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8490                 for (j = 0; j < len; j += 4) {
8491                         u32 val;
8492
8493                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8494                         tg3_read_mem(tp, offset + j, &val);
8495                         if (val != test_pattern[i])
8496                                 return -EIO;
8497                 }
8498         }
8499         return 0;
8500 }
8501
8502 static int tg3_test_memory(struct tg3 *tp)
8503 {
8504         static struct mem_entry {
8505                 u32 offset;
8506                 u32 len;
8507         } mem_tbl_570x[] = {
8508                 { 0x00000000, 0x00b50},
8509                 { 0x00002000, 0x1c000},
8510                 { 0xffffffff, 0x00000}
8511         }, mem_tbl_5705[] = {
8512                 { 0x00000100, 0x0000c},
8513                 { 0x00000200, 0x00008},
8514                 { 0x00004000, 0x00800},
8515                 { 0x00006000, 0x01000},
8516                 { 0x00008000, 0x02000},
8517                 { 0x00010000, 0x0e000},
8518                 { 0xffffffff, 0x00000}
8519         }, mem_tbl_5755[] = {
8520                 { 0x00000200, 0x00008},
8521                 { 0x00004000, 0x00800},
8522                 { 0x00006000, 0x00800},
8523                 { 0x00008000, 0x02000},
8524                 { 0x00010000, 0x0c000},
8525                 { 0xffffffff, 0x00000}
8526         };
8527         struct mem_entry *mem_tbl;
8528         int err = 0;
8529         int i;
8530
8531         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8532                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8533                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8534                         mem_tbl = mem_tbl_5755;
8535                 else
8536                         mem_tbl = mem_tbl_5705;
8537         } else
8538                 mem_tbl = mem_tbl_570x;
8539
8540         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8541                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8542                     mem_tbl[i].len)) != 0)
8543                         break;
8544         }
8545         
8546         return err;
8547 }
8548
8549 #define TG3_MAC_LOOPBACK        0
8550 #define TG3_PHY_LOOPBACK        1
8551
8552 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8553 {
8554         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8555         u32 desc_idx;
8556         struct sk_buff *skb, *rx_skb;
8557         u8 *tx_data;
8558         dma_addr_t map;
8559         int num_pkts, tx_len, rx_len, i, err;
8560         struct tg3_rx_buffer_desc *desc;
8561
8562         if (loopback_mode == TG3_MAC_LOOPBACK) {
8563                 /* HW errata - mac loopback fails in some cases on 5780.
8564                  * Normal traffic and PHY loopback are not affected by
8565                  * errata.
8566                  */
8567                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8568                         return 0;
8569
8570                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8571                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8572                            MAC_MODE_PORT_MODE_GMII;
8573                 tw32(MAC_MODE, mac_mode);
8574         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8575                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8576                                            BMCR_SPEED1000);
8577                 udelay(40);
8578                 /* reset to prevent losing 1st rx packet intermittently */
8579                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8580                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8581                         udelay(10);
8582                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8583                 }
8584                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8585                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8586                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8587                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8588                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8589                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8590                 }
8591                 tw32(MAC_MODE, mac_mode);
8592         }
8593         else
8594                 return -EINVAL;
8595
8596         err = -EIO;
8597
8598         tx_len = 1514;
8599         skb = dev_alloc_skb(tx_len);
8600         if (!skb)
8601                 return -ENOMEM;
8602
8603         tx_data = skb_put(skb, tx_len);
8604         memcpy(tx_data, tp->dev->dev_addr, 6);
8605         memset(tx_data + 6, 0x0, 8);
8606
8607         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8608
8609         for (i = 14; i < tx_len; i++)
8610                 tx_data[i] = (u8) (i & 0xff);
8611
8612         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8613
8614         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8615              HOSTCC_MODE_NOW);
8616
8617         udelay(10);
8618
8619         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8620
8621         num_pkts = 0;
8622
8623         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8624
8625         tp->tx_prod++;
8626         num_pkts++;
8627
8628         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8629                      tp->tx_prod);
8630         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8631
8632         udelay(10);
8633
8634         for (i = 0; i < 10; i++) {
8635                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8636                        HOSTCC_MODE_NOW);
8637
8638                 udelay(10);
8639
8640                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8641                 rx_idx = tp->hw_status->idx[0].rx_producer;
8642                 if ((tx_idx == tp->tx_prod) &&
8643                     (rx_idx == (rx_start_idx + num_pkts)))
8644                         break;
8645         }
8646
8647         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8648         dev_kfree_skb(skb);
8649
8650         if (tx_idx != tp->tx_prod)
8651                 goto out;
8652
8653         if (rx_idx != rx_start_idx + num_pkts)
8654                 goto out;
8655
8656         desc = &tp->rx_rcb[rx_start_idx];
8657         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8658         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8659         if (opaque_key != RXD_OPAQUE_RING_STD)
8660                 goto out;
8661
8662         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8663             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8664                 goto out;
8665
8666         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8667         if (rx_len != tx_len)
8668                 goto out;
8669
8670         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8671
8672         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8673         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8674
8675         for (i = 14; i < tx_len; i++) {
8676                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8677                         goto out;
8678         }
8679         err = 0;
8680         
8681         /* tg3_free_rings will unmap and free the rx_skb */
8682 out:
8683         return err;
8684 }
8685
8686 #define TG3_MAC_LOOPBACK_FAILED         1
8687 #define TG3_PHY_LOOPBACK_FAILED         2
8688 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8689                                          TG3_PHY_LOOPBACK_FAILED)
8690
8691 static int tg3_test_loopback(struct tg3 *tp)
8692 {
8693         int err = 0;
8694
8695         if (!netif_running(tp->dev))
8696                 return TG3_LOOPBACK_FAILED;
8697
8698         err = tg3_reset_hw(tp, 1);
8699         if (err)
8700                 return TG3_LOOPBACK_FAILED;
8701
8702         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8703                 err |= TG3_MAC_LOOPBACK_FAILED;
8704         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8705                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8706                         err |= TG3_PHY_LOOPBACK_FAILED;
8707         }
8708
8709         return err;
8710 }
8711
8712 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8713                           u64 *data)
8714 {
8715         struct tg3 *tp = netdev_priv(dev);
8716
8717         if (tp->link_config.phy_is_low_power)
8718                 tg3_set_power_state(tp, PCI_D0);
8719
8720         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8721
8722         if (tg3_test_nvram(tp) != 0) {
8723                 etest->flags |= ETH_TEST_FL_FAILED;
8724                 data[0] = 1;
8725         }
8726         if (tg3_test_link(tp) != 0) {
8727                 etest->flags |= ETH_TEST_FL_FAILED;
8728                 data[1] = 1;
8729         }
8730         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8731                 int err, irq_sync = 0;
8732
8733                 if (netif_running(dev)) {
8734                         tg3_netif_stop(tp);
8735                         irq_sync = 1;
8736                 }
8737
8738                 tg3_full_lock(tp, irq_sync);
8739
8740                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8741                 err = tg3_nvram_lock(tp);
8742                 tg3_halt_cpu(tp, RX_CPU_BASE);
8743                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8744                         tg3_halt_cpu(tp, TX_CPU_BASE);
8745                 if (!err)
8746                         tg3_nvram_unlock(tp);
8747
8748                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8749                         tg3_phy_reset(tp);
8750
8751                 if (tg3_test_registers(tp) != 0) {
8752                         etest->flags |= ETH_TEST_FL_FAILED;
8753                         data[2] = 1;
8754                 }
8755                 if (tg3_test_memory(tp) != 0) {
8756                         etest->flags |= ETH_TEST_FL_FAILED;
8757                         data[3] = 1;
8758                 }
8759                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8760                         etest->flags |= ETH_TEST_FL_FAILED;
8761
8762                 tg3_full_unlock(tp);
8763
8764                 if (tg3_test_interrupt(tp) != 0) {
8765                         etest->flags |= ETH_TEST_FL_FAILED;
8766                         data[5] = 1;
8767                 }
8768
8769                 tg3_full_lock(tp, 0);
8770
8771                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8772                 if (netif_running(dev)) {
8773                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8774                         if (!tg3_restart_hw(tp, 1))
8775                                 tg3_netif_start(tp);
8776                 }
8777
8778                 tg3_full_unlock(tp);
8779         }
8780         if (tp->link_config.phy_is_low_power)
8781                 tg3_set_power_state(tp, PCI_D3hot);
8782
8783 }
8784
8785 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8786 {
8787         struct mii_ioctl_data *data = if_mii(ifr);
8788         struct tg3 *tp = netdev_priv(dev);
8789         int err;
8790
8791         switch(cmd) {
8792         case SIOCGMIIPHY:
8793                 data->phy_id = PHY_ADDR;
8794
8795                 /* fallthru */
8796         case SIOCGMIIREG: {
8797                 u32 mii_regval;
8798
8799                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8800                         break;                  /* We have no PHY */
8801
8802                 if (tp->link_config.phy_is_low_power)
8803                         return -EAGAIN;
8804
8805                 spin_lock_bh(&tp->lock);
8806                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8807                 spin_unlock_bh(&tp->lock);
8808
8809                 data->val_out = mii_regval;
8810
8811                 return err;
8812         }
8813
8814         case SIOCSMIIREG:
8815                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8816                         break;                  /* We have no PHY */
8817
8818                 if (!capable(CAP_NET_ADMIN))
8819                         return -EPERM;
8820
8821                 if (tp->link_config.phy_is_low_power)
8822                         return -EAGAIN;
8823
8824                 spin_lock_bh(&tp->lock);
8825                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8826                 spin_unlock_bh(&tp->lock);
8827
8828                 return err;
8829
8830         default:
8831                 /* do nothing */
8832                 break;
8833         }
8834         return -EOPNOTSUPP;
8835 }
8836
8837 #if TG3_VLAN_TAG_USED
8838 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8839 {
8840         struct tg3 *tp = netdev_priv(dev);
8841
8842         if (netif_running(dev))
8843                 tg3_netif_stop(tp);
8844
8845         tg3_full_lock(tp, 0);
8846
8847         tp->vlgrp = grp;
8848
8849         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8850         __tg3_set_rx_mode(dev);
8851
8852         tg3_full_unlock(tp);
8853
8854         if (netif_running(dev))
8855                 tg3_netif_start(tp);
8856 }
8857
8858 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8859 {
8860         struct tg3 *tp = netdev_priv(dev);
8861
8862         if (netif_running(dev))
8863                 tg3_netif_stop(tp);
8864
8865         tg3_full_lock(tp, 0);
8866         if (tp->vlgrp)
8867                 tp->vlgrp->vlan_devices[vid] = NULL;
8868         tg3_full_unlock(tp);
8869
8870         if (netif_running(dev))
8871                 tg3_netif_start(tp);
8872 }
8873 #endif
8874
8875 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8876 {
8877         struct tg3 *tp = netdev_priv(dev);
8878
8879         memcpy(ec, &tp->coal, sizeof(*ec));
8880         return 0;
8881 }
8882
8883 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8884 {
8885         struct tg3 *tp = netdev_priv(dev);
8886         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8887         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8888
8889         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8890                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8891                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8892                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8893                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8894         }
8895
8896         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8897             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8898             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8899             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8900             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8901             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8902             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8903             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8904             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8905             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8906                 return -EINVAL;
8907
8908         /* No rx interrupts will be generated if both are zero */
8909         if ((ec->rx_coalesce_usecs == 0) &&
8910             (ec->rx_max_coalesced_frames == 0))
8911                 return -EINVAL;
8912
8913         /* No tx interrupts will be generated if both are zero */
8914         if ((ec->tx_coalesce_usecs == 0) &&
8915             (ec->tx_max_coalesced_frames == 0))
8916                 return -EINVAL;
8917
8918         /* Only copy relevant parameters, ignore all others. */
8919         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8920         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8921         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8922         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8923         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8924         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8925         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8926         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8927         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8928
8929         if (netif_running(dev)) {
8930                 tg3_full_lock(tp, 0);
8931                 __tg3_set_coalesce(tp, &tp->coal);
8932                 tg3_full_unlock(tp);
8933         }
8934         return 0;
8935 }
8936
8937 static struct ethtool_ops tg3_ethtool_ops = {
8938         .get_settings           = tg3_get_settings,
8939         .set_settings           = tg3_set_settings,
8940         .get_drvinfo            = tg3_get_drvinfo,
8941         .get_regs_len           = tg3_get_regs_len,
8942         .get_regs               = tg3_get_regs,
8943         .get_wol                = tg3_get_wol,
8944         .set_wol                = tg3_set_wol,
8945         .get_msglevel           = tg3_get_msglevel,
8946         .set_msglevel           = tg3_set_msglevel,
8947         .nway_reset             = tg3_nway_reset,
8948         .get_link               = ethtool_op_get_link,
8949         .get_eeprom_len         = tg3_get_eeprom_len,
8950         .get_eeprom             = tg3_get_eeprom,
8951         .set_eeprom             = tg3_set_eeprom,
8952         .get_ringparam          = tg3_get_ringparam,
8953         .set_ringparam          = tg3_set_ringparam,
8954         .get_pauseparam         = tg3_get_pauseparam,
8955         .set_pauseparam         = tg3_set_pauseparam,
8956         .get_rx_csum            = tg3_get_rx_csum,
8957         .set_rx_csum            = tg3_set_rx_csum,
8958         .get_tx_csum            = ethtool_op_get_tx_csum,
8959         .set_tx_csum            = tg3_set_tx_csum,
8960         .get_sg                 = ethtool_op_get_sg,
8961         .set_sg                 = ethtool_op_set_sg,
8962 #if TG3_TSO_SUPPORT != 0
8963         .get_tso                = ethtool_op_get_tso,
8964         .set_tso                = tg3_set_tso,
8965 #endif
8966         .self_test_count        = tg3_get_test_count,
8967         .self_test              = tg3_self_test,
8968         .get_strings            = tg3_get_strings,
8969         .phys_id                = tg3_phys_id,
8970         .get_stats_count        = tg3_get_stats_count,
8971         .get_ethtool_stats      = tg3_get_ethtool_stats,
8972         .get_coalesce           = tg3_get_coalesce,
8973         .set_coalesce           = tg3_set_coalesce,
8974         .get_perm_addr          = ethtool_op_get_perm_addr,
8975 };
8976
8977 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8978 {
8979         u32 cursize, val, magic;
8980
8981         tp->nvram_size = EEPROM_CHIP_SIZE;
8982
8983         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8984                 return;
8985
8986         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8987                 return;
8988
8989         /*
8990          * Size the chip by reading offsets at increasing powers of two.
8991          * When we encounter our validation signature, we know the addressing
8992          * has wrapped around, and thus have our chip size.
8993          */
8994         cursize = 0x10;
8995
8996         while (cursize < tp->nvram_size) {
8997                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8998                         return;
8999
9000                 if (val == magic)
9001                         break;
9002
9003                 cursize <<= 1;
9004         }
9005
9006         tp->nvram_size = cursize;
9007 }
9008                 
9009 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9010 {
9011         u32 val;
9012
9013         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9014                 return;
9015
9016         /* Selfboot format */
9017         if (val != TG3_EEPROM_MAGIC) {
9018                 tg3_get_eeprom_size(tp);
9019                 return;
9020         }
9021
9022         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9023                 if (val != 0) {
9024                         tp->nvram_size = (val >> 16) * 1024;
9025                         return;
9026                 }
9027         }
9028         tp->nvram_size = 0x20000;
9029 }
9030
9031 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9032 {
9033         u32 nvcfg1;
9034
9035         nvcfg1 = tr32(NVRAM_CFG1);
9036         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9037                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9038         }
9039         else {
9040                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9041                 tw32(NVRAM_CFG1, nvcfg1);
9042         }
9043
9044         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9045             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9046                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9047                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9048                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9049                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9050                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9051                                 break;
9052                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9053                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9054                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9055                                 break;
9056                         case FLASH_VENDOR_ATMEL_EEPROM:
9057                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9058                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9059                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9060                                 break;
9061                         case FLASH_VENDOR_ST:
9062                                 tp->nvram_jedecnum = JEDEC_ST;
9063                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9064                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9065                                 break;
9066                         case FLASH_VENDOR_SAIFUN:
9067                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9068                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9069                                 break;
9070                         case FLASH_VENDOR_SST_SMALL:
9071                         case FLASH_VENDOR_SST_LARGE:
9072                                 tp->nvram_jedecnum = JEDEC_SST;
9073                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9074                                 break;
9075                 }
9076         }
9077         else {
9078                 tp->nvram_jedecnum = JEDEC_ATMEL;
9079                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9080                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9081         }
9082 }
9083
9084 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9085 {
9086         u32 nvcfg1;
9087
9088         nvcfg1 = tr32(NVRAM_CFG1);
9089
9090         /* NVRAM protection for TPM */
9091         if (nvcfg1 & (1 << 27))
9092                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9093
9094         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9095                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9096                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9097                         tp->nvram_jedecnum = JEDEC_ATMEL;
9098                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9099                         break;
9100                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9101                         tp->nvram_jedecnum = JEDEC_ATMEL;
9102                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9103                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9104                         break;
9105                 case FLASH_5752VENDOR_ST_M45PE10:
9106                 case FLASH_5752VENDOR_ST_M45PE20:
9107                 case FLASH_5752VENDOR_ST_M45PE40:
9108                         tp->nvram_jedecnum = JEDEC_ST;
9109                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9110                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9111                         break;
9112         }
9113
9114         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9115                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9116                         case FLASH_5752PAGE_SIZE_256:
9117                                 tp->nvram_pagesize = 256;
9118                                 break;
9119                         case FLASH_5752PAGE_SIZE_512:
9120                                 tp->nvram_pagesize = 512;
9121                                 break;
9122                         case FLASH_5752PAGE_SIZE_1K:
9123                                 tp->nvram_pagesize = 1024;
9124                                 break;
9125                         case FLASH_5752PAGE_SIZE_2K:
9126                                 tp->nvram_pagesize = 2048;
9127                                 break;
9128                         case FLASH_5752PAGE_SIZE_4K:
9129                                 tp->nvram_pagesize = 4096;
9130                                 break;
9131                         case FLASH_5752PAGE_SIZE_264:
9132                                 tp->nvram_pagesize = 264;
9133                                 break;
9134                 }
9135         }
9136         else {
9137                 /* For eeprom, set pagesize to maximum eeprom size */
9138                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9139
9140                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9141                 tw32(NVRAM_CFG1, nvcfg1);
9142         }
9143 }
9144
9145 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9146 {
9147         u32 nvcfg1;
9148
9149         nvcfg1 = tr32(NVRAM_CFG1);
9150
9151         /* NVRAM protection for TPM */
9152         if (nvcfg1 & (1 << 27))
9153                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9154
9155         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9156                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9157                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9158                         tp->nvram_jedecnum = JEDEC_ATMEL;
9159                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9160                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9161
9162                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9163                         tw32(NVRAM_CFG1, nvcfg1);
9164                         break;
9165                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9166                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9167                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9168                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9169                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9170                         tp->nvram_jedecnum = JEDEC_ATMEL;
9171                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9172                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9173                         tp->nvram_pagesize = 264;
9174                         break;
9175                 case FLASH_5752VENDOR_ST_M45PE10:
9176                 case FLASH_5752VENDOR_ST_M45PE20:
9177                 case FLASH_5752VENDOR_ST_M45PE40:
9178                         tp->nvram_jedecnum = JEDEC_ST;
9179                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9180                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9181                         tp->nvram_pagesize = 256;
9182                         break;
9183         }
9184 }
9185
9186 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9187 {
9188         u32 nvcfg1;
9189
9190         nvcfg1 = tr32(NVRAM_CFG1);
9191
9192         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9193                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9194                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9195                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9196                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9197                         tp->nvram_jedecnum = JEDEC_ATMEL;
9198                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9199                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9200
9201                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9202                         tw32(NVRAM_CFG1, nvcfg1);
9203                         break;
9204                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9205                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9206                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9207                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9208                         tp->nvram_jedecnum = JEDEC_ATMEL;
9209                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9210                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9211                         tp->nvram_pagesize = 264;
9212                         break;
9213                 case FLASH_5752VENDOR_ST_M45PE10:
9214                 case FLASH_5752VENDOR_ST_M45PE20:
9215                 case FLASH_5752VENDOR_ST_M45PE40:
9216                         tp->nvram_jedecnum = JEDEC_ST;
9217                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9218                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9219                         tp->nvram_pagesize = 256;
9220                         break;
9221         }
9222 }
9223
9224 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9225 static void __devinit tg3_nvram_init(struct tg3 *tp)
9226 {
9227         int j;
9228
9229         tw32_f(GRC_EEPROM_ADDR,
9230              (EEPROM_ADDR_FSM_RESET |
9231               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9232                EEPROM_ADDR_CLKPERD_SHIFT)));
9233
9234         /* XXX schedule_timeout() ... */
9235         for (j = 0; j < 100; j++)
9236                 udelay(10);
9237
9238         /* Enable seeprom accesses. */
9239         tw32_f(GRC_LOCAL_CTRL,
9240              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9241         udelay(100);
9242
9243         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9244             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9245                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9246
9247                 if (tg3_nvram_lock(tp)) {
9248                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9249                                "tg3_nvram_init failed.\n", tp->dev->name);
9250                         return;
9251                 }
9252                 tg3_enable_nvram_access(tp);
9253
9254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9255                         tg3_get_5752_nvram_info(tp);
9256                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9257                         tg3_get_5755_nvram_info(tp);
9258                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9259                         tg3_get_5787_nvram_info(tp);
9260                 else
9261                         tg3_get_nvram_info(tp);
9262
9263                 tg3_get_nvram_size(tp);
9264
9265                 tg3_disable_nvram_access(tp);
9266                 tg3_nvram_unlock(tp);
9267
9268         } else {
9269                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9270
9271                 tg3_get_eeprom_size(tp);
9272         }
9273 }
9274
9275 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9276                                         u32 offset, u32 *val)
9277 {
9278         u32 tmp;
9279         int i;
9280
9281         if (offset > EEPROM_ADDR_ADDR_MASK ||
9282             (offset % 4) != 0)
9283                 return -EINVAL;
9284
9285         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9286                                         EEPROM_ADDR_DEVID_MASK |
9287                                         EEPROM_ADDR_READ);
9288         tw32(GRC_EEPROM_ADDR,
9289              tmp |
9290              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9291              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9292               EEPROM_ADDR_ADDR_MASK) |
9293              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9294
9295         for (i = 0; i < 10000; i++) {
9296                 tmp = tr32(GRC_EEPROM_ADDR);
9297
9298                 if (tmp & EEPROM_ADDR_COMPLETE)
9299                         break;
9300                 udelay(100);
9301         }
9302         if (!(tmp & EEPROM_ADDR_COMPLETE))
9303                 return -EBUSY;
9304
9305         *val = tr32(GRC_EEPROM_DATA);
9306         return 0;
9307 }
9308
9309 #define NVRAM_CMD_TIMEOUT 10000
9310
9311 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9312 {
9313         int i;
9314
9315         tw32(NVRAM_CMD, nvram_cmd);
9316         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9317                 udelay(10);
9318                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9319                         udelay(10);
9320                         break;
9321                 }
9322         }
9323         if (i == NVRAM_CMD_TIMEOUT) {
9324                 return -EBUSY;
9325         }
9326         return 0;
9327 }
9328
9329 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9330 {
9331         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9332             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9333             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9334             (tp->nvram_jedecnum == JEDEC_ATMEL))
9335
9336                 addr = ((addr / tp->nvram_pagesize) <<
9337                         ATMEL_AT45DB0X1B_PAGE_POS) +
9338                        (addr % tp->nvram_pagesize);
9339
9340         return addr;
9341 }
9342
9343 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9344 {
9345         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9346             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9347             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9348             (tp->nvram_jedecnum == JEDEC_ATMEL))
9349
9350                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9351                         tp->nvram_pagesize) +
9352                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9353
9354         return addr;
9355 }
9356
9357 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9358 {
9359         int ret;
9360
9361         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9362                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9363
9364         offset = tg3_nvram_phys_addr(tp, offset);
9365
9366         if (offset > NVRAM_ADDR_MSK)
9367                 return -EINVAL;
9368
9369         ret = tg3_nvram_lock(tp);
9370         if (ret)
9371                 return ret;
9372
9373         tg3_enable_nvram_access(tp);
9374
9375         tw32(NVRAM_ADDR, offset);
9376         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9377                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9378
9379         if (ret == 0)
9380                 *val = swab32(tr32(NVRAM_RDDATA));
9381
9382         tg3_disable_nvram_access(tp);
9383
9384         tg3_nvram_unlock(tp);
9385
9386         return ret;
9387 }
9388
9389 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9390 {
9391         int err;
9392         u32 tmp;
9393
9394         err = tg3_nvram_read(tp, offset, &tmp);
9395         *val = swab32(tmp);
9396         return err;
9397 }
9398
9399 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9400                                     u32 offset, u32 len, u8 *buf)
9401 {
9402         int i, j, rc = 0;
9403         u32 val;
9404
9405         for (i = 0; i < len; i += 4) {
9406                 u32 addr, data;
9407
9408                 addr = offset + i;
9409
9410                 memcpy(&data, buf + i, 4);
9411
9412                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9413
9414                 val = tr32(GRC_EEPROM_ADDR);
9415                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9416
9417                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9418                         EEPROM_ADDR_READ);
9419                 tw32(GRC_EEPROM_ADDR, val |
9420                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9421                         (addr & EEPROM_ADDR_ADDR_MASK) |
9422                         EEPROM_ADDR_START |
9423                         EEPROM_ADDR_WRITE);
9424                 
9425                 for (j = 0; j < 10000; j++) {
9426                         val = tr32(GRC_EEPROM_ADDR);
9427
9428                         if (val & EEPROM_ADDR_COMPLETE)
9429                                 break;
9430                         udelay(100);
9431                 }
9432                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9433                         rc = -EBUSY;
9434                         break;
9435                 }
9436         }
9437
9438         return rc;
9439 }
9440
9441 /* offset and length are dword aligned */
9442 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9443                 u8 *buf)
9444 {
9445         int ret = 0;
9446         u32 pagesize = tp->nvram_pagesize;
9447         u32 pagemask = pagesize - 1;
9448         u32 nvram_cmd;
9449         u8 *tmp;
9450
9451         tmp = kmalloc(pagesize, GFP_KERNEL);
9452         if (tmp == NULL)
9453                 return -ENOMEM;
9454
9455         while (len) {
9456                 int j;
9457                 u32 phy_addr, page_off, size;
9458
9459                 phy_addr = offset & ~pagemask;
9460         
9461                 for (j = 0; j < pagesize; j += 4) {
9462                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9463                                                 (u32 *) (tmp + j))))
9464                                 break;
9465                 }
9466                 if (ret)
9467                         break;
9468
9469                 page_off = offset & pagemask;
9470                 size = pagesize;
9471                 if (len < size)
9472                         size = len;
9473
9474                 len -= size;
9475
9476                 memcpy(tmp + page_off, buf, size);
9477
9478                 offset = offset + (pagesize - page_off);
9479
9480                 tg3_enable_nvram_access(tp);
9481
9482                 /*
9483                  * Before we can erase the flash page, we need
9484                  * to issue a special "write enable" command.
9485                  */
9486                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9487
9488                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9489                         break;
9490
9491                 /* Erase the target page */
9492                 tw32(NVRAM_ADDR, phy_addr);
9493
9494                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9495                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9496
9497                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9498                         break;
9499
9500                 /* Issue another write enable to start the write. */
9501                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9502
9503                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9504                         break;
9505
9506                 for (j = 0; j < pagesize; j += 4) {
9507                         u32 data;
9508
9509                         data = *((u32 *) (tmp + j));
9510                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9511
9512                         tw32(NVRAM_ADDR, phy_addr + j);
9513
9514                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9515                                 NVRAM_CMD_WR;
9516
9517                         if (j == 0)
9518                                 nvram_cmd |= NVRAM_CMD_FIRST;
9519                         else if (j == (pagesize - 4))
9520                                 nvram_cmd |= NVRAM_CMD_LAST;
9521
9522                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9523                                 break;
9524                 }
9525                 if (ret)
9526                         break;
9527         }
9528
9529         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9530         tg3_nvram_exec_cmd(tp, nvram_cmd);
9531
9532         kfree(tmp);
9533
9534         return ret;
9535 }
9536
9537 /* offset and length are dword aligned */
9538 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9539                 u8 *buf)
9540 {
9541         int i, ret = 0;
9542
9543         for (i = 0; i < len; i += 4, offset += 4) {
9544                 u32 data, page_off, phy_addr, nvram_cmd;
9545
9546                 memcpy(&data, buf + i, 4);
9547                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9548
9549                 page_off = offset % tp->nvram_pagesize;
9550
9551                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9552
9553                 tw32(NVRAM_ADDR, phy_addr);
9554
9555                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9556
9557                 if ((page_off == 0) || (i == 0))
9558                         nvram_cmd |= NVRAM_CMD_FIRST;
9559                 if (page_off == (tp->nvram_pagesize - 4))
9560                         nvram_cmd |= NVRAM_CMD_LAST;
9561
9562                 if (i == (len - 4))
9563                         nvram_cmd |= NVRAM_CMD_LAST;
9564
9565                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9566                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9567                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9568                     (tp->nvram_jedecnum == JEDEC_ST) &&
9569                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9570
9571                         if ((ret = tg3_nvram_exec_cmd(tp,
9572                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9573                                 NVRAM_CMD_DONE)))
9574
9575                                 break;
9576                 }
9577                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9578                         /* We always do complete word writes to eeprom. */
9579                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9580                 }
9581
9582                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9583                         break;
9584         }
9585         return ret;
9586 }
9587
9588 /* offset and length are dword aligned */
9589 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9590 {
9591         int ret;
9592
9593         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9594                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9595                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9596                 udelay(40);
9597         }
9598
9599         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9600                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9601         }
9602         else {
9603                 u32 grc_mode;
9604
9605                 ret = tg3_nvram_lock(tp);
9606                 if (ret)
9607                         return ret;
9608
9609                 tg3_enable_nvram_access(tp);
9610                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9611                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9612                         tw32(NVRAM_WRITE1, 0x406);
9613
9614                 grc_mode = tr32(GRC_MODE);
9615                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9616
9617                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9618                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9619
9620                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9621                                 buf);
9622                 }
9623                 else {
9624                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9625                                 buf);
9626                 }
9627
9628                 grc_mode = tr32(GRC_MODE);
9629                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9630
9631                 tg3_disable_nvram_access(tp);
9632                 tg3_nvram_unlock(tp);
9633         }
9634
9635         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9636                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9637                 udelay(40);
9638         }
9639
9640         return ret;
9641 }
9642
9643 struct subsys_tbl_ent {
9644         u16 subsys_vendor, subsys_devid;
9645         u32 phy_id;
9646 };
9647
9648 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9649         /* Broadcom boards. */
9650         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9651         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9652         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9653         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9654         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9655         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9656         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9657         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9658         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9659         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9660         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9661
9662         /* 3com boards. */
9663         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9664         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9665         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9666         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9667         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9668
9669         /* DELL boards. */
9670         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9671         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9672         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9673         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9674
9675         /* Compaq boards. */
9676         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9677         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9678         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9679         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9680         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9681
9682         /* IBM boards. */
9683         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9684 };
9685
9686 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9687 {
9688         int i;
9689
9690         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9691                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9692                      tp->pdev->subsystem_vendor) &&
9693                     (subsys_id_to_phy_id[i].subsys_devid ==
9694                      tp->pdev->subsystem_device))
9695                         return &subsys_id_to_phy_id[i];
9696         }
9697         return NULL;
9698 }
9699
9700 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9701 {
9702         u32 val;
9703         u16 pmcsr;
9704
9705         /* On some early chips the SRAM cannot be accessed in D3hot state,
9706          * so need make sure we're in D0.
9707          */
9708         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9709         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9710         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9711         msleep(1);
9712
9713         /* Make sure register accesses (indirect or otherwise)
9714          * will function correctly.
9715          */
9716         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9717                                tp->misc_host_ctrl);
9718
9719         /* The memory arbiter has to be enabled in order for SRAM accesses
9720          * to succeed.  Normally on powerup the tg3 chip firmware will make
9721          * sure it is enabled, but other entities such as system netboot
9722          * code might disable it.
9723          */
9724         val = tr32(MEMARB_MODE);
9725         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9726
9727         tp->phy_id = PHY_ID_INVALID;
9728         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9729
9730         /* Assume an onboard device by default.  */
9731         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9732
9733         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9734         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9735                 u32 nic_cfg, led_cfg;
9736                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9737                 int eeprom_phy_serdes = 0;
9738
9739                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9740                 tp->nic_sram_data_cfg = nic_cfg;
9741
9742                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9743                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9744                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9745                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9746                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9747                     (ver > 0) && (ver < 0x100))
9748                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9749
9750                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9751                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9752                         eeprom_phy_serdes = 1;
9753
9754                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9755                 if (nic_phy_id != 0) {
9756                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9757                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9758
9759                         eeprom_phy_id  = (id1 >> 16) << 10;
9760                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9761                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9762                 } else
9763                         eeprom_phy_id = 0;
9764
9765                 tp->phy_id = eeprom_phy_id;
9766                 if (eeprom_phy_serdes) {
9767                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9768                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9769                         else
9770                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9771                 }
9772
9773                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9774                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9775                                     SHASTA_EXT_LED_MODE_MASK);
9776                 else
9777                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9778
9779                 switch (led_cfg) {
9780                 default:
9781                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9782                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9783                         break;
9784
9785                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9786                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9787                         break;
9788
9789                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9790                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9791
9792                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9793                          * read on some older 5700/5701 bootcode.
9794                          */
9795                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9796                             ASIC_REV_5700 ||
9797                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9798                             ASIC_REV_5701)
9799                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9800
9801                         break;
9802
9803                 case SHASTA_EXT_LED_SHARED:
9804                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9805                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9806                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9807                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9808                                                  LED_CTRL_MODE_PHY_2);
9809                         break;
9810
9811                 case SHASTA_EXT_LED_MAC:
9812                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9813                         break;
9814
9815                 case SHASTA_EXT_LED_COMBO:
9816                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9817                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9818                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9819                                                  LED_CTRL_MODE_PHY_2);
9820                         break;
9821
9822                 };
9823
9824                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9825                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9826                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9827                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9828
9829                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9830                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9831                 else
9832                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9833
9834                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9835                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9836                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9837                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9838                 }
9839                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9840                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9841
9842                 if (cfg2 & (1 << 17))
9843                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9844
9845                 /* serdes signal pre-emphasis in register 0x590 set by */
9846                 /* bootcode if bit 18 is set */
9847                 if (cfg2 & (1 << 18))
9848                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9849         }
9850 }
9851
9852 static int __devinit tg3_phy_probe(struct tg3 *tp)
9853 {
9854         u32 hw_phy_id_1, hw_phy_id_2;
9855         u32 hw_phy_id, hw_phy_id_masked;
9856         int err;
9857
9858         /* Reading the PHY ID register can conflict with ASF
9859          * firwmare access to the PHY hardware.
9860          */
9861         err = 0;
9862         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9863                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9864         } else {
9865                 /* Now read the physical PHY_ID from the chip and verify
9866                  * that it is sane.  If it doesn't look good, we fall back
9867                  * to either the hard-coded table based PHY_ID and failing
9868                  * that the value found in the eeprom area.
9869                  */
9870                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9871                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9872
9873                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9874                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9875                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9876
9877                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9878         }
9879
9880         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9881                 tp->phy_id = hw_phy_id;
9882                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9883                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9884                 else
9885                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9886         } else {
9887                 if (tp->phy_id != PHY_ID_INVALID) {
9888                         /* Do nothing, phy ID already set up in
9889                          * tg3_get_eeprom_hw_cfg().
9890                          */
9891                 } else {
9892                         struct subsys_tbl_ent *p;
9893
9894                         /* No eeprom signature?  Try the hardcoded
9895                          * subsys device table.
9896                          */
9897                         p = lookup_by_subsys(tp);
9898                         if (!p)
9899                                 return -ENODEV;
9900
9901                         tp->phy_id = p->phy_id;
9902                         if (!tp->phy_id ||
9903                             tp->phy_id == PHY_ID_BCM8002)
9904                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9905                 }
9906         }
9907
9908         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9909             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9910                 u32 bmsr, adv_reg, tg3_ctrl;
9911
9912                 tg3_readphy(tp, MII_BMSR, &bmsr);
9913                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9914                     (bmsr & BMSR_LSTATUS))
9915                         goto skip_phy_reset;
9916                     
9917                 err = tg3_phy_reset(tp);
9918                 if (err)
9919                         return err;
9920
9921                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9922                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9923                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9924                 tg3_ctrl = 0;
9925                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9926                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9927                                     MII_TG3_CTRL_ADV_1000_FULL);
9928                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9929                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9930                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9931                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9932                 }
9933
9934                 if (!tg3_copper_is_advertising_all(tp)) {
9935                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9936
9937                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9938                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9939
9940                         tg3_writephy(tp, MII_BMCR,
9941                                      BMCR_ANENABLE | BMCR_ANRESTART);
9942                 }
9943                 tg3_phy_set_wirespeed(tp);
9944
9945                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9946                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9947                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9948         }
9949
9950 skip_phy_reset:
9951         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9952                 err = tg3_init_5401phy_dsp(tp);
9953                 if (err)
9954                         return err;
9955         }
9956
9957         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9958                 err = tg3_init_5401phy_dsp(tp);
9959         }
9960
9961         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9962                 tp->link_config.advertising =
9963                         (ADVERTISED_1000baseT_Half |
9964                          ADVERTISED_1000baseT_Full |
9965                          ADVERTISED_Autoneg |
9966                          ADVERTISED_FIBRE);
9967         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9968                 tp->link_config.advertising &=
9969                         ~(ADVERTISED_1000baseT_Half |
9970                           ADVERTISED_1000baseT_Full);
9971
9972         return err;
9973 }
9974
9975 static void __devinit tg3_read_partno(struct tg3 *tp)
9976 {
9977         unsigned char vpd_data[256];
9978         int i;
9979         u32 magic;
9980
9981         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9982                 goto out_not_found;
9983
9984         if (magic == TG3_EEPROM_MAGIC) {
9985                 for (i = 0; i < 256; i += 4) {
9986                         u32 tmp;
9987
9988                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9989                                 goto out_not_found;
9990
9991                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9992                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9993                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9994                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9995                 }
9996         } else {
9997                 int vpd_cap;
9998
9999                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10000                 for (i = 0; i < 256; i += 4) {
10001                         u32 tmp, j = 0;
10002                         u16 tmp16;
10003
10004                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10005                                               i);
10006                         while (j++ < 100) {
10007                                 pci_read_config_word(tp->pdev, vpd_cap +
10008                                                      PCI_VPD_ADDR, &tmp16);
10009                                 if (tmp16 & 0x8000)
10010                                         break;
10011                                 msleep(1);
10012                         }
10013                         if (!(tmp16 & 0x8000))
10014                                 goto out_not_found;
10015
10016                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10017                                               &tmp);
10018                         tmp = cpu_to_le32(tmp);
10019                         memcpy(&vpd_data[i], &tmp, 4);
10020                 }
10021         }
10022
10023         /* Now parse and find the part number. */
10024         for (i = 0; i < 256; ) {
10025                 unsigned char val = vpd_data[i];
10026                 int block_end;
10027
10028                 if (val == 0x82 || val == 0x91) {
10029                         i = (i + 3 +
10030                              (vpd_data[i + 1] +
10031                               (vpd_data[i + 2] << 8)));
10032                         continue;
10033                 }
10034
10035                 if (val != 0x90)
10036                         goto out_not_found;
10037
10038                 block_end = (i + 3 +
10039                              (vpd_data[i + 1] +
10040                               (vpd_data[i + 2] << 8)));
10041                 i += 3;
10042                 while (i < block_end) {
10043                         if (vpd_data[i + 0] == 'P' &&
10044                             vpd_data[i + 1] == 'N') {
10045                                 int partno_len = vpd_data[i + 2];
10046
10047                                 if (partno_len > 24)
10048                                         goto out_not_found;
10049
10050                                 memcpy(tp->board_part_number,
10051                                        &vpd_data[i + 3],
10052                                        partno_len);
10053
10054                                 /* Success. */
10055                                 return;
10056                         }
10057                 }
10058
10059                 /* Part number not found. */
10060                 goto out_not_found;
10061         }
10062
10063 out_not_found:
10064         strcpy(tp->board_part_number, "none");
10065 }
10066
10067 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10068 {
10069         u32 val, offset, start;
10070
10071         if (tg3_nvram_read_swab(tp, 0, &val))
10072                 return;
10073
10074         if (val != TG3_EEPROM_MAGIC)
10075                 return;
10076
10077         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10078             tg3_nvram_read_swab(tp, 0x4, &start))
10079                 return;
10080
10081         offset = tg3_nvram_logical_addr(tp, offset);
10082         if (tg3_nvram_read_swab(tp, offset, &val))
10083                 return;
10084
10085         if ((val & 0xfc000000) == 0x0c000000) {
10086                 u32 ver_offset, addr;
10087                 int i;
10088
10089                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10090                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10091                         return;
10092
10093                 if (val != 0)
10094                         return;
10095
10096                 addr = offset + ver_offset - start;
10097                 for (i = 0; i < 16; i += 4) {
10098                         if (tg3_nvram_read(tp, addr + i, &val))
10099                                 return;
10100
10101                         val = cpu_to_le32(val);
10102                         memcpy(tp->fw_ver + i, &val, 4);
10103                 }
10104         }
10105 }
10106
10107 static int __devinit tg3_get_invariants(struct tg3 *tp)
10108 {
10109         static struct pci_device_id write_reorder_chipsets[] = {
10110                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10111                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10112                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10113                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10114                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10115                              PCI_DEVICE_ID_VIA_8385_0) },
10116                 { },
10117         };
10118         u32 misc_ctrl_reg;
10119         u32 cacheline_sz_reg;
10120         u32 pci_state_reg, grc_misc_cfg;
10121         u32 val;
10122         u16 pci_cmd;
10123         int err;
10124
10125         /* Force memory write invalidate off.  If we leave it on,
10126          * then on 5700_BX chips we have to enable a workaround.
10127          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10128          * to match the cacheline size.  The Broadcom driver have this
10129          * workaround but turns MWI off all the times so never uses
10130          * it.  This seems to suggest that the workaround is insufficient.
10131          */
10132         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10133         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10134         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10135
10136         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10137          * has the register indirect write enable bit set before
10138          * we try to access any of the MMIO registers.  It is also
10139          * critical that the PCI-X hw workaround situation is decided
10140          * before that as well.
10141          */
10142         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10143                               &misc_ctrl_reg);
10144
10145         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10146                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10147
10148         /* Wrong chip ID in 5752 A0. This code can be removed later
10149          * as A0 is not in production.
10150          */
10151         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10152                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10153
10154         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10155          * we need to disable memory and use config. cycles
10156          * only to access all registers. The 5702/03 chips
10157          * can mistakenly decode the special cycles from the
10158          * ICH chipsets as memory write cycles, causing corruption
10159          * of register and memory space. Only certain ICH bridges
10160          * will drive special cycles with non-zero data during the
10161          * address phase which can fall within the 5703's address
10162          * range. This is not an ICH bug as the PCI spec allows
10163          * non-zero address during special cycles. However, only
10164          * these ICH bridges are known to drive non-zero addresses
10165          * during special cycles.
10166          *
10167          * Since special cycles do not cross PCI bridges, we only
10168          * enable this workaround if the 5703 is on the secondary
10169          * bus of these ICH bridges.
10170          */
10171         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10172             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10173                 static struct tg3_dev_id {
10174                         u32     vendor;
10175                         u32     device;
10176                         u32     rev;
10177                 } ich_chipsets[] = {
10178                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10179                           PCI_ANY_ID },
10180                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10181                           PCI_ANY_ID },
10182                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10183                           0xa },
10184                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10185                           PCI_ANY_ID },
10186                         { },
10187                 };
10188                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10189                 struct pci_dev *bridge = NULL;
10190
10191                 while (pci_id->vendor != 0) {
10192                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10193                                                 bridge);
10194                         if (!bridge) {
10195                                 pci_id++;
10196                                 continue;
10197                         }
10198                         if (pci_id->rev != PCI_ANY_ID) {
10199                                 u8 rev;
10200
10201                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10202                                                      &rev);
10203                                 if (rev > pci_id->rev)
10204                                         continue;
10205                         }
10206                         if (bridge->subordinate &&
10207                             (bridge->subordinate->number ==
10208                              tp->pdev->bus->number)) {
10209
10210                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10211                                 pci_dev_put(bridge);
10212                                 break;
10213                         }
10214                 }
10215         }
10216
10217         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10218          * DMA addresses > 40-bit. This bridge may have other additional
10219          * 57xx devices behind it in some 4-port NIC designs for example.
10220          * Any tg3 device found behind the bridge will also need the 40-bit
10221          * DMA workaround.
10222          */
10223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10225                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10226                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10227                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10228         }
10229         else {
10230                 struct pci_dev *bridge = NULL;
10231
10232                 do {
10233                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10234                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10235                                                 bridge);
10236                         if (bridge && bridge->subordinate &&
10237                             (bridge->subordinate->number <=
10238                              tp->pdev->bus->number) &&
10239                             (bridge->subordinate->subordinate >=
10240                              tp->pdev->bus->number)) {
10241                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10242                                 pci_dev_put(bridge);
10243                                 break;
10244                         }
10245                 } while (bridge);
10246         }
10247
10248         /* Initialize misc host control in PCI block. */
10249         tp->misc_host_ctrl |= (misc_ctrl_reg &
10250                                MISC_HOST_CTRL_CHIPREV);
10251         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10252                                tp->misc_host_ctrl);
10253
10254         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10255                               &cacheline_sz_reg);
10256
10257         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10258         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10259         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10260         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10261
10262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10263             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10265             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10266             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10267                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10268
10269         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10270             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10271                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10272
10273         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10275                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10276                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10277                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10278                 } else {
10279                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10280                                           TG3_FLG2_HW_TSO_1_BUG;
10281                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10282                                 ASIC_REV_5750 &&
10283                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10284                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10285                 }
10286         }
10287
10288         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10289             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10290             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10291             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10293                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10294
10295         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10296                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10297
10298         /* If we have an AMD 762 or VIA K8T800 chipset, write
10299          * reordering to the mailbox registers done by the host
10300          * controller can cause major troubles.  We read back from
10301          * every mailbox register write to force the writes to be
10302          * posted to the chip in order.
10303          */
10304         if (pci_dev_present(write_reorder_chipsets) &&
10305             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10306                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10307
10308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10309             tp->pci_lat_timer < 64) {
10310                 tp->pci_lat_timer = 64;
10311
10312                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10313                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10314                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10315                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10316
10317                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10318                                        cacheline_sz_reg);
10319         }
10320
10321         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10322                               &pci_state_reg);
10323
10324         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10325                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10326
10327                 /* If this is a 5700 BX chipset, and we are in PCI-X
10328                  * mode, enable register write workaround.
10329                  *
10330                  * The workaround is to use indirect register accesses
10331                  * for all chip writes not to mailbox registers.
10332                  */
10333                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10334                         u32 pm_reg;
10335                         u16 pci_cmd;
10336
10337                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10338
10339                         /* The chip can have it's power management PCI config
10340                          * space registers clobbered due to this bug.
10341                          * So explicitly force the chip into D0 here.
10342                          */
10343                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10344                                               &pm_reg);
10345                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10346                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10347                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10348                                                pm_reg);
10349
10350                         /* Also, force SERR#/PERR# in PCI command. */
10351                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10352                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10353                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10354                 }
10355         }
10356
10357         /* 5700 BX chips need to have their TX producer index mailboxes
10358          * written twice to workaround a bug.
10359          */
10360         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10361                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10362
10363         /* Back to back register writes can cause problems on this chip,
10364          * the workaround is to read back all reg writes except those to
10365          * mailbox regs.  See tg3_write_indirect_reg32().
10366          *
10367          * PCI Express 5750_A0 rev chips need this workaround too.
10368          */
10369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10370             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10371              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10372                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10373
10374         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10375                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10376         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10377                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10378
10379         /* Chip-specific fixup from Broadcom driver */
10380         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10381             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10382                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10383                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10384         }
10385
10386         /* Default fast path register access methods */
10387         tp->read32 = tg3_read32;
10388         tp->write32 = tg3_write32;
10389         tp->read32_mbox = tg3_read32;
10390         tp->write32_mbox = tg3_write32;
10391         tp->write32_tx_mbox = tg3_write32;
10392         tp->write32_rx_mbox = tg3_write32;
10393
10394         /* Various workaround register access methods */
10395         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10396                 tp->write32 = tg3_write_indirect_reg32;
10397         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10398                 tp->write32 = tg3_write_flush_reg32;
10399
10400         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10401             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10402                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10403                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10404                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10405         }
10406
10407         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10408                 tp->read32 = tg3_read_indirect_reg32;
10409                 tp->write32 = tg3_write_indirect_reg32;
10410                 tp->read32_mbox = tg3_read_indirect_mbox;
10411                 tp->write32_mbox = tg3_write_indirect_mbox;
10412                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10413                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10414
10415                 iounmap(tp->regs);
10416                 tp->regs = NULL;
10417
10418                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10419                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10420                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10421         }
10422
10423         if (tp->write32 == tg3_write_indirect_reg32 ||
10424             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10425              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10426               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10427                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10428
10429         /* Get eeprom hw config before calling tg3_set_power_state().
10430          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10431          * determined before calling tg3_set_power_state() so that
10432          * we know whether or not to switch out of Vaux power.
10433          * When the flag is set, it means that GPIO1 is used for eeprom
10434          * write protect and also implies that it is a LOM where GPIOs
10435          * are not used to switch power.
10436          */ 
10437         tg3_get_eeprom_hw_cfg(tp);
10438
10439         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10440          * GPIO1 driven high will bring 5700's external PHY out of reset.
10441          * It is also used as eeprom write protect on LOMs.
10442          */
10443         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10444         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10445             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10446                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10447                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10448         /* Unused GPIO3 must be driven as output on 5752 because there
10449          * are no pull-up resistors on unused GPIO pins.
10450          */
10451         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10452                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10453
10454         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10455                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10456
10457         /* Force the chip into D0. */
10458         err = tg3_set_power_state(tp, PCI_D0);
10459         if (err) {
10460                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10461                        pci_name(tp->pdev));
10462                 return err;
10463         }
10464
10465         /* 5700 B0 chips do not support checksumming correctly due
10466          * to hardware bugs.
10467          */
10468         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10469                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10470
10471         /* Derive initial jumbo mode from MTU assigned in
10472          * ether_setup() via the alloc_etherdev() call
10473          */
10474         if (tp->dev->mtu > ETH_DATA_LEN &&
10475             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10476                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10477
10478         /* Determine WakeOnLan speed to use. */
10479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10480             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10481             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10482             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10483                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10484         } else {
10485                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10486         }
10487
10488         /* A few boards don't want Ethernet@WireSpeed phy feature */
10489         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10490             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10491              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10492              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10493             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10494                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10495
10496         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10497             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10498                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10499         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10500                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10501
10502         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10503                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10504                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10505                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10506                 else
10507                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10508         }
10509
10510         tp->coalesce_mode = 0;
10511         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10512             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10513                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10514
10515         /* Initialize MAC MI mode, polling disabled. */
10516         tw32_f(MAC_MI_MODE, tp->mi_mode);
10517         udelay(80);
10518
10519         /* Initialize data/descriptor byte/word swapping. */
10520         val = tr32(GRC_MODE);
10521         val &= GRC_MODE_HOST_STACKUP;
10522         tw32(GRC_MODE, val | tp->grc_mode);
10523
10524         tg3_switch_clocks(tp);
10525
10526         /* Clear this out for sanity. */
10527         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10528
10529         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10530                               &pci_state_reg);
10531         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10532             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10533                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10534
10535                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10536                     chiprevid == CHIPREV_ID_5701_B0 ||
10537                     chiprevid == CHIPREV_ID_5701_B2 ||
10538                     chiprevid == CHIPREV_ID_5701_B5) {
10539                         void __iomem *sram_base;
10540
10541                         /* Write some dummy words into the SRAM status block
10542                          * area, see if it reads back correctly.  If the return
10543                          * value is bad, force enable the PCIX workaround.
10544                          */
10545                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10546
10547                         writel(0x00000000, sram_base);
10548                         writel(0x00000000, sram_base + 4);
10549                         writel(0xffffffff, sram_base + 4);
10550                         if (readl(sram_base) != 0x00000000)
10551                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10552                 }
10553         }
10554
10555         udelay(50);
10556         tg3_nvram_init(tp);
10557
10558         grc_misc_cfg = tr32(GRC_MISC_CFG);
10559         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10560
10561         /* Broadcom's driver says that CIOBE multisplit has a bug */
10562 #if 0
10563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10564             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10565                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10566                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10567         }
10568 #endif
10569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10570             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10571              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10572                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10573
10574         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10575             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10576                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10577         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10578                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10579                                       HOSTCC_MODE_CLRTICK_TXBD);
10580
10581                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10582                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10583                                        tp->misc_host_ctrl);
10584         }
10585
10586         /* these are limited to 10/100 only */
10587         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10588              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10589             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10590              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10591              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10592               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10593               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10594             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10595              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10596               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10597                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10598
10599         err = tg3_phy_probe(tp);
10600         if (err) {
10601                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10602                        pci_name(tp->pdev), err);
10603                 /* ... but do not return immediately ... */
10604         }
10605
10606         tg3_read_partno(tp);
10607         tg3_read_fw_ver(tp);
10608
10609         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10610                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10611         } else {
10612                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10613                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10614                 else
10615                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10616         }
10617
10618         /* 5700 {AX,BX} chips have a broken status block link
10619          * change bit implementation, so we must use the
10620          * status register in those cases.
10621          */
10622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10623                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10624         else
10625                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10626
10627         /* The led_ctrl is set during tg3_phy_probe, here we might
10628          * have to force the link status polling mechanism based
10629          * upon subsystem IDs.
10630          */
10631         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10632             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10633                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10634                                   TG3_FLAG_USE_LINKCHG_REG);
10635         }
10636
10637         /* For all SERDES we poll the MAC status register. */
10638         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10639                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10640         else
10641                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10642
10643         /* All chips before 5787 can get confused if TX buffers
10644          * straddle the 4GB address boundary in some cases.
10645          */
10646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10647             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10648                 tp->dev->hard_start_xmit = tg3_start_xmit;
10649         else
10650                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10651
10652         tp->rx_offset = 2;
10653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10654             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10655                 tp->rx_offset = 0;
10656
10657         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10658
10659         /* Increment the rx prod index on the rx std ring by at most
10660          * 8 for these chips to workaround hw errata.
10661          */
10662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10663             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10665                 tp->rx_std_max_post = 8;
10666
10667         /* By default, disable wake-on-lan.  User can change this
10668          * using ETHTOOL_SWOL.
10669          */
10670         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10671
10672         return err;
10673 }
10674
10675 #ifdef CONFIG_SPARC64
10676 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10677 {
10678         struct net_device *dev = tp->dev;
10679         struct pci_dev *pdev = tp->pdev;
10680         struct pcidev_cookie *pcp = pdev->sysdata;
10681
10682         if (pcp != NULL) {
10683                 unsigned char *addr;
10684                 int len;
10685
10686                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10687                                         &len);
10688                 if (addr && len == 6) {
10689                         memcpy(dev->dev_addr, addr, 6);
10690                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10691                         return 0;
10692                 }
10693         }
10694         return -ENODEV;
10695 }
10696
10697 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10698 {
10699         struct net_device *dev = tp->dev;
10700
10701         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10702         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10703         return 0;
10704 }
10705 #endif
10706
10707 static int __devinit tg3_get_device_address(struct tg3 *tp)
10708 {
10709         struct net_device *dev = tp->dev;
10710         u32 hi, lo, mac_offset;
10711         int addr_ok = 0;
10712
10713 #ifdef CONFIG_SPARC64
10714         if (!tg3_get_macaddr_sparc(tp))
10715                 return 0;
10716 #endif
10717
10718         mac_offset = 0x7c;
10719         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10720             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10721                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10722                         mac_offset = 0xcc;
10723                 if (tg3_nvram_lock(tp))
10724                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10725                 else
10726                         tg3_nvram_unlock(tp);
10727         }
10728
10729         /* First try to get it from MAC address mailbox. */
10730         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10731         if ((hi >> 16) == 0x484b) {
10732                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10733                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10734
10735                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10736                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10737                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10738                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10739                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10740
10741                 /* Some old bootcode may report a 0 MAC address in SRAM */
10742                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10743         }
10744         if (!addr_ok) {
10745                 /* Next, try NVRAM. */
10746                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10747                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10748                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10749                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10750                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10751                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10752                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10753                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10754                 }
10755                 /* Finally just fetch it out of the MAC control regs. */
10756                 else {
10757                         hi = tr32(MAC_ADDR_0_HIGH);
10758                         lo = tr32(MAC_ADDR_0_LOW);
10759
10760                         dev->dev_addr[5] = lo & 0xff;
10761                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10762                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10763                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10764                         dev->dev_addr[1] = hi & 0xff;
10765                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10766                 }
10767         }
10768
10769         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10770 #ifdef CONFIG_SPARC64
10771                 if (!tg3_get_default_macaddr_sparc(tp))
10772                         return 0;
10773 #endif
10774                 return -EINVAL;
10775         }
10776         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10777         return 0;
10778 }
10779
10780 #define BOUNDARY_SINGLE_CACHELINE       1
10781 #define BOUNDARY_MULTI_CACHELINE        2
10782
10783 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10784 {
10785         int cacheline_size;
10786         u8 byte;
10787         int goal;
10788
10789         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10790         if (byte == 0)
10791                 cacheline_size = 1024;
10792         else
10793                 cacheline_size = (int) byte * 4;
10794
10795         /* On 5703 and later chips, the boundary bits have no
10796          * effect.
10797          */
10798         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10799             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10800             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10801                 goto out;
10802
10803 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10804         goal = BOUNDARY_MULTI_CACHELINE;
10805 #else
10806 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10807         goal = BOUNDARY_SINGLE_CACHELINE;
10808 #else
10809         goal = 0;
10810 #endif
10811 #endif
10812
10813         if (!goal)
10814                 goto out;
10815
10816         /* PCI controllers on most RISC systems tend to disconnect
10817          * when a device tries to burst across a cache-line boundary.
10818          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10819          *
10820          * Unfortunately, for PCI-E there are only limited
10821          * write-side controls for this, and thus for reads
10822          * we will still get the disconnects.  We'll also waste
10823          * these PCI cycles for both read and write for chips
10824          * other than 5700 and 5701 which do not implement the
10825          * boundary bits.
10826          */
10827         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10828             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10829                 switch (cacheline_size) {
10830                 case 16:
10831                 case 32:
10832                 case 64:
10833                 case 128:
10834                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10835                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10836                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10837                         } else {
10838                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10839                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10840                         }
10841                         break;
10842
10843                 case 256:
10844                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10845                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10846                         break;
10847
10848                 default:
10849                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10850                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10851                         break;
10852                 };
10853         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10854                 switch (cacheline_size) {
10855                 case 16:
10856                 case 32:
10857                 case 64:
10858                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10859                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10860                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10861                                 break;
10862                         }
10863                         /* fallthrough */
10864                 case 128:
10865                 default:
10866                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10867                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10868                         break;
10869                 };
10870         } else {
10871                 switch (cacheline_size) {
10872                 case 16:
10873                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10874                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10875                                         DMA_RWCTRL_WRITE_BNDRY_16);
10876                                 break;
10877                         }
10878                         /* fallthrough */
10879                 case 32:
10880                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10881                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10882                                         DMA_RWCTRL_WRITE_BNDRY_32);
10883                                 break;
10884                         }
10885                         /* fallthrough */
10886                 case 64:
10887                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10888                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10889                                         DMA_RWCTRL_WRITE_BNDRY_64);
10890                                 break;
10891                         }
10892                         /* fallthrough */
10893                 case 128:
10894                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10895                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10896                                         DMA_RWCTRL_WRITE_BNDRY_128);
10897                                 break;
10898                         }
10899                         /* fallthrough */
10900                 case 256:
10901                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10902                                 DMA_RWCTRL_WRITE_BNDRY_256);
10903                         break;
10904                 case 512:
10905                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10906                                 DMA_RWCTRL_WRITE_BNDRY_512);
10907                         break;
10908                 case 1024:
10909                 default:
10910                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10911                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10912                         break;
10913                 };
10914         }
10915
10916 out:
10917         return val;
10918 }
10919
10920 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10921 {
10922         struct tg3_internal_buffer_desc test_desc;
10923         u32 sram_dma_descs;
10924         int i, ret;
10925
10926         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10927
10928         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10929         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10930         tw32(RDMAC_STATUS, 0);
10931         tw32(WDMAC_STATUS, 0);
10932
10933         tw32(BUFMGR_MODE, 0);
10934         tw32(FTQ_RESET, 0);
10935
10936         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10937         test_desc.addr_lo = buf_dma & 0xffffffff;
10938         test_desc.nic_mbuf = 0x00002100;
10939         test_desc.len = size;
10940
10941         /*
10942          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10943          * the *second* time the tg3 driver was getting loaded after an
10944          * initial scan.
10945          *
10946          * Broadcom tells me:
10947          *   ...the DMA engine is connected to the GRC block and a DMA
10948          *   reset may affect the GRC block in some unpredictable way...
10949          *   The behavior of resets to individual blocks has not been tested.
10950          *
10951          * Broadcom noted the GRC reset will also reset all sub-components.
10952          */
10953         if (to_device) {
10954                 test_desc.cqid_sqid = (13 << 8) | 2;
10955
10956                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10957                 udelay(40);
10958         } else {
10959                 test_desc.cqid_sqid = (16 << 8) | 7;
10960
10961                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10962                 udelay(40);
10963         }
10964         test_desc.flags = 0x00000005;
10965
10966         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10967                 u32 val;
10968
10969                 val = *(((u32 *)&test_desc) + i);
10970                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10971                                        sram_dma_descs + (i * sizeof(u32)));
10972                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10973         }
10974         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10975
10976         if (to_device) {
10977                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10978         } else {
10979                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10980         }
10981
10982         ret = -ENODEV;
10983         for (i = 0; i < 40; i++) {
10984                 u32 val;
10985
10986                 if (to_device)
10987                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10988                 else
10989                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10990                 if ((val & 0xffff) == sram_dma_descs) {
10991                         ret = 0;
10992                         break;
10993                 }
10994
10995                 udelay(100);
10996         }
10997
10998         return ret;
10999 }
11000
11001 #define TEST_BUFFER_SIZE        0x2000
11002
11003 static int __devinit tg3_test_dma(struct tg3 *tp)
11004 {
11005         dma_addr_t buf_dma;
11006         u32 *buf, saved_dma_rwctrl;
11007         int ret;
11008
11009         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11010         if (!buf) {
11011                 ret = -ENOMEM;
11012                 goto out_nofree;
11013         }
11014
11015         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11016                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11017
11018         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11019
11020         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11021                 /* DMA read watermark not used on PCIE */
11022                 tp->dma_rwctrl |= 0x00180000;
11023         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11024                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11025                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11026                         tp->dma_rwctrl |= 0x003f0000;
11027                 else
11028                         tp->dma_rwctrl |= 0x003f000f;
11029         } else {
11030                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11031                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11032                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11033
11034                         /* If the 5704 is behind the EPB bridge, we can
11035                          * do the less restrictive ONE_DMA workaround for
11036                          * better performance.
11037                          */
11038                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11039                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11040                                 tp->dma_rwctrl |= 0x8000;
11041                         else if (ccval == 0x6 || ccval == 0x7)
11042                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11043
11044                         /* Set bit 23 to enable PCIX hw bug fix */
11045                         tp->dma_rwctrl |= 0x009f0000;
11046                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11047                         /* 5780 always in PCIX mode */
11048                         tp->dma_rwctrl |= 0x00144000;
11049                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11050                         /* 5714 always in PCIX mode */
11051                         tp->dma_rwctrl |= 0x00148000;
11052                 } else {
11053                         tp->dma_rwctrl |= 0x001b000f;
11054                 }
11055         }
11056
11057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11058             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11059                 tp->dma_rwctrl &= 0xfffffff0;
11060
11061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11062             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11063                 /* Remove this if it causes problems for some boards. */
11064                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11065
11066                 /* On 5700/5701 chips, we need to set this bit.
11067                  * Otherwise the chip will issue cacheline transactions
11068                  * to streamable DMA memory with not all the byte
11069                  * enables turned on.  This is an error on several
11070                  * RISC PCI controllers, in particular sparc64.
11071                  *
11072                  * On 5703/5704 chips, this bit has been reassigned
11073                  * a different meaning.  In particular, it is used
11074                  * on those chips to enable a PCI-X workaround.
11075                  */
11076                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11077         }
11078
11079         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11080
11081 #if 0
11082         /* Unneeded, already done by tg3_get_invariants.  */
11083         tg3_switch_clocks(tp);
11084 #endif
11085
11086         ret = 0;
11087         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11088             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11089                 goto out;
11090
11091         /* It is best to perform DMA test with maximum write burst size
11092          * to expose the 5700/5701 write DMA bug.
11093          */
11094         saved_dma_rwctrl = tp->dma_rwctrl;
11095         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11096         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11097
11098         while (1) {
11099                 u32 *p = buf, i;
11100
11101                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11102                         p[i] = i;
11103
11104                 /* Send the buffer to the chip. */
11105                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11106                 if (ret) {
11107                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11108                         break;
11109                 }
11110
11111 #if 0
11112                 /* validate data reached card RAM correctly. */
11113                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11114                         u32 val;
11115                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11116                         if (le32_to_cpu(val) != p[i]) {
11117                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11118                                 /* ret = -ENODEV here? */
11119                         }
11120                         p[i] = 0;
11121                 }
11122 #endif
11123                 /* Now read it back. */
11124                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11125                 if (ret) {
11126                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11127
11128                         break;
11129                 }
11130
11131                 /* Verify it. */
11132                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11133                         if (p[i] == i)
11134                                 continue;
11135
11136                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11137                             DMA_RWCTRL_WRITE_BNDRY_16) {
11138                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11139                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11140                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11141                                 break;
11142                         } else {
11143                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11144                                 ret = -ENODEV;
11145                                 goto out;
11146                         }
11147                 }
11148
11149                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11150                         /* Success. */
11151                         ret = 0;
11152                         break;
11153                 }
11154         }
11155         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11156             DMA_RWCTRL_WRITE_BNDRY_16) {
11157                 static struct pci_device_id dma_wait_state_chipsets[] = {
11158                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11159                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11160                         { },
11161                 };
11162
11163                 /* DMA test passed without adjusting DMA boundary,
11164                  * now look for chipsets that are known to expose the
11165                  * DMA bug without failing the test.
11166                  */
11167                 if (pci_dev_present(dma_wait_state_chipsets)) {
11168                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11169                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11170                 }
11171                 else
11172                         /* Safe to use the calculated DMA boundary. */
11173                         tp->dma_rwctrl = saved_dma_rwctrl;
11174
11175                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11176         }
11177
11178 out:
11179         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11180 out_nofree:
11181         return ret;
11182 }
11183
11184 static void __devinit tg3_init_link_config(struct tg3 *tp)
11185 {
11186         tp->link_config.advertising =
11187                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11188                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11189                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11190                  ADVERTISED_Autoneg | ADVERTISED_MII);
11191         tp->link_config.speed = SPEED_INVALID;
11192         tp->link_config.duplex = DUPLEX_INVALID;
11193         tp->link_config.autoneg = AUTONEG_ENABLE;
11194         tp->link_config.active_speed = SPEED_INVALID;
11195         tp->link_config.active_duplex = DUPLEX_INVALID;
11196         tp->link_config.phy_is_low_power = 0;
11197         tp->link_config.orig_speed = SPEED_INVALID;
11198         tp->link_config.orig_duplex = DUPLEX_INVALID;
11199         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11200 }
11201
11202 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11203 {
11204         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11205                 tp->bufmgr_config.mbuf_read_dma_low_water =
11206                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11207                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11208                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11209                 tp->bufmgr_config.mbuf_high_water =
11210                         DEFAULT_MB_HIGH_WATER_5705;
11211
11212                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11213                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11214                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11215                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11216                 tp->bufmgr_config.mbuf_high_water_jumbo =
11217                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11218         } else {
11219                 tp->bufmgr_config.mbuf_read_dma_low_water =
11220                         DEFAULT_MB_RDMA_LOW_WATER;
11221                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11222                         DEFAULT_MB_MACRX_LOW_WATER;
11223                 tp->bufmgr_config.mbuf_high_water =
11224                         DEFAULT_MB_HIGH_WATER;
11225
11226                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11227                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11228                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11229                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11230                 tp->bufmgr_config.mbuf_high_water_jumbo =
11231                         DEFAULT_MB_HIGH_WATER_JUMBO;
11232         }
11233
11234         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11235         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11236 }
11237
11238 static char * __devinit tg3_phy_string(struct tg3 *tp)
11239 {
11240         switch (tp->phy_id & PHY_ID_MASK) {
11241         case PHY_ID_BCM5400:    return "5400";
11242         case PHY_ID_BCM5401:    return "5401";
11243         case PHY_ID_BCM5411:    return "5411";
11244         case PHY_ID_BCM5701:    return "5701";
11245         case PHY_ID_BCM5703:    return "5703";
11246         case PHY_ID_BCM5704:    return "5704";
11247         case PHY_ID_BCM5705:    return "5705";
11248         case PHY_ID_BCM5750:    return "5750";
11249         case PHY_ID_BCM5752:    return "5752";
11250         case PHY_ID_BCM5714:    return "5714";
11251         case PHY_ID_BCM5780:    return "5780";
11252         case PHY_ID_BCM5755:    return "5755";
11253         case PHY_ID_BCM5787:    return "5787";
11254         case PHY_ID_BCM8002:    return "8002/serdes";
11255         case 0:                 return "serdes";
11256         default:                return "unknown";
11257         };
11258 }
11259
11260 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11261 {
11262         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11263                 strcpy(str, "PCI Express");
11264                 return str;
11265         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11266                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11267
11268                 strcpy(str, "PCIX:");
11269
11270                 if ((clock_ctrl == 7) ||
11271                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11272                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11273                         strcat(str, "133MHz");
11274                 else if (clock_ctrl == 0)
11275                         strcat(str, "33MHz");
11276                 else if (clock_ctrl == 2)
11277                         strcat(str, "50MHz");
11278                 else if (clock_ctrl == 4)
11279                         strcat(str, "66MHz");
11280                 else if (clock_ctrl == 6)
11281                         strcat(str, "100MHz");
11282         } else {
11283                 strcpy(str, "PCI:");
11284                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11285                         strcat(str, "66MHz");
11286                 else
11287                         strcat(str, "33MHz");
11288         }
11289         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11290                 strcat(str, ":32-bit");
11291         else
11292                 strcat(str, ":64-bit");
11293         return str;
11294 }
11295
11296 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11297 {
11298         struct pci_dev *peer;
11299         unsigned int func, devnr = tp->pdev->devfn & ~7;
11300
11301         for (func = 0; func < 8; func++) {
11302                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11303                 if (peer && peer != tp->pdev)
11304                         break;
11305                 pci_dev_put(peer);
11306         }
11307         /* 5704 can be configured in single-port mode, set peer to
11308          * tp->pdev in that case.
11309          */
11310         if (!peer) {
11311                 peer = tp->pdev;
11312                 return peer;
11313         }
11314
11315         /*
11316          * We don't need to keep the refcount elevated; there's no way
11317          * to remove one half of this device without removing the other
11318          */
11319         pci_dev_put(peer);
11320
11321         return peer;
11322 }
11323
11324 static void __devinit tg3_init_coal(struct tg3 *tp)
11325 {
11326         struct ethtool_coalesce *ec = &tp->coal;
11327
11328         memset(ec, 0, sizeof(*ec));
11329         ec->cmd = ETHTOOL_GCOALESCE;
11330         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11331         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11332         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11333         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11334         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11335         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11336         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11337         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11338         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11339
11340         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11341                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11342                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11343                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11344                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11345                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11346         }
11347
11348         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11349                 ec->rx_coalesce_usecs_irq = 0;
11350                 ec->tx_coalesce_usecs_irq = 0;
11351                 ec->stats_block_coalesce_usecs = 0;
11352         }
11353 }
11354
11355 static int __devinit tg3_init_one(struct pci_dev *pdev,
11356                                   const struct pci_device_id *ent)
11357 {
11358         static int tg3_version_printed = 0;
11359         unsigned long tg3reg_base, tg3reg_len;
11360         struct net_device *dev;
11361         struct tg3 *tp;
11362         int i, err, pm_cap;
11363         char str[40];
11364         u64 dma_mask, persist_dma_mask;
11365
11366         if (tg3_version_printed++ == 0)
11367                 printk(KERN_INFO "%s", version);
11368
11369         err = pci_enable_device(pdev);
11370         if (err) {
11371                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11372                        "aborting.\n");
11373                 return err;
11374         }
11375
11376         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11377                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11378                        "base address, aborting.\n");
11379                 err = -ENODEV;
11380                 goto err_out_disable_pdev;
11381         }
11382
11383         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11384         if (err) {
11385                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11386                        "aborting.\n");
11387                 goto err_out_disable_pdev;
11388         }
11389
11390         pci_set_master(pdev);
11391
11392         /* Find power-management capability. */
11393         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11394         if (pm_cap == 0) {
11395                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11396                        "aborting.\n");
11397                 err = -EIO;
11398                 goto err_out_free_res;
11399         }
11400
11401         tg3reg_base = pci_resource_start(pdev, 0);
11402         tg3reg_len = pci_resource_len(pdev, 0);
11403
11404         dev = alloc_etherdev(sizeof(*tp));
11405         if (!dev) {
11406                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11407                 err = -ENOMEM;
11408                 goto err_out_free_res;
11409         }
11410
11411         SET_MODULE_OWNER(dev);
11412         SET_NETDEV_DEV(dev, &pdev->dev);
11413
11414 #if TG3_VLAN_TAG_USED
11415         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11416         dev->vlan_rx_register = tg3_vlan_rx_register;
11417         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11418 #endif
11419
11420         tp = netdev_priv(dev);
11421         tp->pdev = pdev;
11422         tp->dev = dev;
11423         tp->pm_cap = pm_cap;
11424         tp->mac_mode = TG3_DEF_MAC_MODE;
11425         tp->rx_mode = TG3_DEF_RX_MODE;
11426         tp->tx_mode = TG3_DEF_TX_MODE;
11427         tp->mi_mode = MAC_MI_MODE_BASE;
11428         if (tg3_debug > 0)
11429                 tp->msg_enable = tg3_debug;
11430         else
11431                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11432
11433         /* The word/byte swap controls here control register access byte
11434          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11435          * setting below.
11436          */
11437         tp->misc_host_ctrl =
11438                 MISC_HOST_CTRL_MASK_PCI_INT |
11439                 MISC_HOST_CTRL_WORD_SWAP |
11440                 MISC_HOST_CTRL_INDIR_ACCESS |
11441                 MISC_HOST_CTRL_PCISTATE_RW;
11442
11443         /* The NONFRM (non-frame) byte/word swap controls take effect
11444          * on descriptor entries, anything which isn't packet data.
11445          *
11446          * The StrongARM chips on the board (one for tx, one for rx)
11447          * are running in big-endian mode.
11448          */
11449         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11450                         GRC_MODE_WSWAP_NONFRM_DATA);
11451 #ifdef __BIG_ENDIAN
11452         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11453 #endif
11454         spin_lock_init(&tp->lock);
11455         spin_lock_init(&tp->tx_lock);
11456         spin_lock_init(&tp->indirect_lock);
11457         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11458
11459         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11460         if (tp->regs == 0UL) {
11461                 printk(KERN_ERR PFX "Cannot map device registers, "
11462                        "aborting.\n");
11463                 err = -ENOMEM;
11464                 goto err_out_free_dev;
11465         }
11466
11467         tg3_init_link_config(tp);
11468
11469         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11470         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11471         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11472
11473         dev->open = tg3_open;
11474         dev->stop = tg3_close;
11475         dev->get_stats = tg3_get_stats;
11476         dev->set_multicast_list = tg3_set_rx_mode;
11477         dev->set_mac_address = tg3_set_mac_addr;
11478         dev->do_ioctl = tg3_ioctl;
11479         dev->tx_timeout = tg3_tx_timeout;
11480         dev->poll = tg3_poll;
11481         dev->ethtool_ops = &tg3_ethtool_ops;
11482         dev->weight = 64;
11483         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11484         dev->change_mtu = tg3_change_mtu;
11485         dev->irq = pdev->irq;
11486 #ifdef CONFIG_NET_POLL_CONTROLLER
11487         dev->poll_controller = tg3_poll_controller;
11488 #endif
11489
11490         err = tg3_get_invariants(tp);
11491         if (err) {
11492                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11493                        "aborting.\n");
11494                 goto err_out_iounmap;
11495         }
11496
11497         /* The EPB bridge inside 5714, 5715, and 5780 and any
11498          * device behind the EPB cannot support DMA addresses > 40-bit.
11499          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11500          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11501          * do DMA address check in tg3_start_xmit().
11502          */
11503         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11504                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11505         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11506                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11507 #ifdef CONFIG_HIGHMEM
11508                 dma_mask = DMA_64BIT_MASK;
11509 #endif
11510         } else
11511                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11512
11513         /* Configure DMA attributes. */
11514         if (dma_mask > DMA_32BIT_MASK) {
11515                 err = pci_set_dma_mask(pdev, dma_mask);
11516                 if (!err) {
11517                         dev->features |= NETIF_F_HIGHDMA;
11518                         err = pci_set_consistent_dma_mask(pdev,
11519                                                           persist_dma_mask);
11520                         if (err < 0) {
11521                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11522                                        "DMA for consistent allocations\n");
11523                                 goto err_out_iounmap;
11524                         }
11525                 }
11526         }
11527         if (err || dma_mask == DMA_32BIT_MASK) {
11528                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11529                 if (err) {
11530                         printk(KERN_ERR PFX "No usable DMA configuration, "
11531                                "aborting.\n");
11532                         goto err_out_iounmap;
11533                 }
11534         }
11535
11536         tg3_init_bufmgr_config(tp);
11537
11538 #if TG3_TSO_SUPPORT != 0
11539         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11540                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11541         }
11542         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11544             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11545             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11546                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11547         } else {
11548                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11549         }
11550
11551         /* TSO is on by default on chips that support hardware TSO.
11552          * Firmware TSO on older chips gives lower performance, so it
11553          * is off by default, but can be enabled using ethtool.
11554          */
11555         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11556                 dev->features |= NETIF_F_TSO;
11557                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11558                         dev->features |= NETIF_F_TSO6;
11559         }
11560
11561 #endif
11562
11563         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11564             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11565             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11566                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11567                 tp->rx_pending = 63;
11568         }
11569
11570         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11571             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11572                 tp->pdev_peer = tg3_find_peer(tp);
11573
11574         err = tg3_get_device_address(tp);
11575         if (err) {
11576                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11577                        "aborting.\n");
11578                 goto err_out_iounmap;
11579         }
11580
11581         /*
11582          * Reset chip in case UNDI or EFI driver did not shutdown
11583          * DMA self test will enable WDMAC and we'll see (spurious)
11584          * pending DMA on the PCI bus at that point.
11585          */
11586         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11587             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11588                 pci_save_state(tp->pdev);
11589                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11590                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11591         }
11592
11593         err = tg3_test_dma(tp);
11594         if (err) {
11595                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11596                 goto err_out_iounmap;
11597         }
11598
11599         /* Tigon3 can do ipv4 only... and some chips have buggy
11600          * checksumming.
11601          */
11602         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11603                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11604                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11605                         dev->features |= NETIF_F_HW_CSUM;
11606                 else
11607                         dev->features |= NETIF_F_IP_CSUM;
11608                 dev->features |= NETIF_F_SG;
11609                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11610         } else
11611                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11612
11613         /* flow control autonegotiation is default behavior */
11614         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11615
11616         tg3_init_coal(tp);
11617
11618         /* Now that we have fully setup the chip, save away a snapshot
11619          * of the PCI config space.  We need to restore this after
11620          * GRC_MISC_CFG core clock resets and some resume events.
11621          */
11622         pci_save_state(tp->pdev);
11623
11624         err = register_netdev(dev);
11625         if (err) {
11626                 printk(KERN_ERR PFX "Cannot register net device, "
11627                        "aborting.\n");
11628                 goto err_out_iounmap;
11629         }
11630
11631         pci_set_drvdata(pdev, dev);
11632
11633         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11634                dev->name,
11635                tp->board_part_number,
11636                tp->pci_chip_rev_id,
11637                tg3_phy_string(tp),
11638                tg3_bus_string(tp, str),
11639                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11640
11641         for (i = 0; i < 6; i++)
11642                 printk("%2.2x%c", dev->dev_addr[i],
11643                        i == 5 ? '\n' : ':');
11644
11645         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11646                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11647                "TSOcap[%d] \n",
11648                dev->name,
11649                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11650                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11651                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11652                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11653                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11654                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11655                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11656         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11657                dev->name, tp->dma_rwctrl,
11658                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11659                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11660
11661         netif_carrier_off(tp->dev);
11662
11663         return 0;
11664
11665 err_out_iounmap:
11666         if (tp->regs) {
11667                 iounmap(tp->regs);
11668                 tp->regs = NULL;
11669         }
11670
11671 err_out_free_dev:
11672         free_netdev(dev);
11673
11674 err_out_free_res:
11675         pci_release_regions(pdev);
11676
11677 err_out_disable_pdev:
11678         pci_disable_device(pdev);
11679         pci_set_drvdata(pdev, NULL);
11680         return err;
11681 }
11682
11683 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11684 {
11685         struct net_device *dev = pci_get_drvdata(pdev);
11686
11687         if (dev) {
11688                 struct tg3 *tp = netdev_priv(dev);
11689
11690                 flush_scheduled_work();
11691                 unregister_netdev(dev);
11692                 if (tp->regs) {
11693                         iounmap(tp->regs);
11694                         tp->regs = NULL;
11695                 }
11696                 free_netdev(dev);
11697                 pci_release_regions(pdev);
11698                 pci_disable_device(pdev);
11699                 pci_set_drvdata(pdev, NULL);
11700         }
11701 }
11702
11703 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11704 {
11705         struct net_device *dev = pci_get_drvdata(pdev);
11706         struct tg3 *tp = netdev_priv(dev);
11707         int err;
11708
11709         if (!netif_running(dev))
11710                 return 0;
11711
11712         flush_scheduled_work();
11713         tg3_netif_stop(tp);
11714
11715         del_timer_sync(&tp->timer);
11716
11717         tg3_full_lock(tp, 1);
11718         tg3_disable_ints(tp);
11719         tg3_full_unlock(tp);
11720
11721         netif_device_detach(dev);
11722
11723         tg3_full_lock(tp, 0);
11724         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11725         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11726         tg3_full_unlock(tp);
11727
11728         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11729         if (err) {
11730                 tg3_full_lock(tp, 0);
11731
11732                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11733                 if (tg3_restart_hw(tp, 1))
11734                         goto out;
11735
11736                 tp->timer.expires = jiffies + tp->timer_offset;
11737                 add_timer(&tp->timer);
11738
11739                 netif_device_attach(dev);
11740                 tg3_netif_start(tp);
11741
11742 out:
11743                 tg3_full_unlock(tp);
11744         }
11745
11746         return err;
11747 }
11748
11749 static int tg3_resume(struct pci_dev *pdev)
11750 {
11751         struct net_device *dev = pci_get_drvdata(pdev);
11752         struct tg3 *tp = netdev_priv(dev);
11753         int err;
11754
11755         if (!netif_running(dev))
11756                 return 0;
11757
11758         pci_restore_state(tp->pdev);
11759
11760         err = tg3_set_power_state(tp, PCI_D0);
11761         if (err)
11762                 return err;
11763
11764         netif_device_attach(dev);
11765
11766         tg3_full_lock(tp, 0);
11767
11768         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11769         err = tg3_restart_hw(tp, 1);
11770         if (err)
11771                 goto out;
11772
11773         tp->timer.expires = jiffies + tp->timer_offset;
11774         add_timer(&tp->timer);
11775
11776         tg3_netif_start(tp);
11777
11778 out:
11779         tg3_full_unlock(tp);
11780
11781         return err;
11782 }
11783
11784 static struct pci_driver tg3_driver = {
11785         .name           = DRV_MODULE_NAME,
11786         .id_table       = tg3_pci_tbl,
11787         .probe          = tg3_init_one,
11788         .remove         = __devexit_p(tg3_remove_one),
11789         .suspend        = tg3_suspend,
11790         .resume         = tg3_resume
11791 };
11792
11793 static int __init tg3_init(void)
11794 {
11795         return pci_module_init(&tg3_driver);
11796 }
11797
11798 static void __exit tg3_cleanup(void)
11799 {
11800         pci_unregister_driver(&tg3_driver);
11801 }
11802
11803 module_init(tg3_init);
11804 module_exit(tg3_cleanup);