]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
73e271e59c6a20c4401c3aaef08fb98642787286
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.56"
73 #define DRV_MODULE_RELDATE      "Apr 1, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517 {
518         unsigned long flags;
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524
525                 /* Always leave this as zero. */
526                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         } else {
528                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529                 *val = tr32(TG3PCI_MEM_WIN_DATA);
530
531                 /* Always leave this as zero. */
532                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533         }
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536
537 static void tg3_disable_ints(struct tg3 *tp)
538 {
539         tw32(TG3PCI_MISC_HOST_CTRL,
540              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
541         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
542 }
543
544 static inline void tg3_cond_int(struct tg3 *tp)
545 {
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             (tp->hw_status->status & SD_STATUS_UPDATED))
548                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
549 }
550
551 static void tg3_enable_ints(struct tg3 *tp)
552 {
553         tp->irq_sync = 0;
554         wmb();
555
556         tw32(TG3PCI_MISC_HOST_CTRL,
557              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
558         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
559                        (tp->last_tag << 24));
560         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
562                                (tp->last_tag << 24));
563         tg3_cond_int(tp);
564 }
565
566 static inline unsigned int tg3_has_work(struct tg3 *tp)
567 {
568         struct tg3_hw_status *sblk = tp->hw_status;
569         unsigned int work_exists = 0;
570
571         /* check for phy events */
572         if (!(tp->tg3_flags &
573               (TG3_FLAG_USE_LINKCHG_REG |
574                TG3_FLAG_POLL_SERDES))) {
575                 if (sblk->status & SD_STATUS_LINK_CHG)
576                         work_exists = 1;
577         }
578         /* check for RX/TX work to do */
579         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
580             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
581                 work_exists = 1;
582
583         return work_exists;
584 }
585
586 /* tg3_restart_ints
587  *  similar to tg3_enable_ints, but it accurately determines whether there
588  *  is new work pending and can return without flushing the PIO write
589  *  which reenables interrupts 
590  */
591 static void tg3_restart_ints(struct tg3 *tp)
592 {
593         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
594                      tp->last_tag << 24);
595         mmiowb();
596
597         /* When doing tagged status, this work check is unnecessary.
598          * The last_tag we write above tells the chip which piece of
599          * work we've completed.
600          */
601         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
602             tg3_has_work(tp))
603                 tw32(HOSTCC_MODE, tp->coalesce_mode |
604                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
605 }
606
607 static inline void tg3_netif_stop(struct tg3 *tp)
608 {
609         tp->dev->trans_start = jiffies; /* prevent tx timeout */
610         netif_poll_disable(tp->dev);
611         netif_tx_disable(tp->dev);
612 }
613
614 static inline void tg3_netif_start(struct tg3 *tp)
615 {
616         netif_wake_queue(tp->dev);
617         /* NOTE: unconditional netif_wake_queue is only appropriate
618          * so long as all callers are assured to have free tx slots
619          * (such as after tg3_init_hw)
620          */
621         netif_poll_enable(tp->dev);
622         tp->hw_status->status |= SD_STATUS_UPDATED;
623         tg3_enable_ints(tp);
624 }
625
626 static void tg3_switch_clocks(struct tg3 *tp)
627 {
628         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
629         u32 orig_clock_ctrl;
630
631         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
632                 return;
633
634         orig_clock_ctrl = clock_ctrl;
635         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
636                        CLOCK_CTRL_CLKRUN_OENABLE |
637                        0x1f);
638         tp->pci_clock_ctrl = clock_ctrl;
639
640         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
641                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
642                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
643                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
644                 }
645         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
646                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
647                             clock_ctrl |
648                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
649                             40);
650                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
651                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
652                             40);
653         }
654         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
655 }
656
657 #define PHY_BUSY_LOOPS  5000
658
659 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
660 {
661         u32 frame_val;
662         unsigned int loops;
663         int ret;
664
665         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
666                 tw32_f(MAC_MI_MODE,
667                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
668                 udelay(80);
669         }
670
671         *val = 0x0;
672
673         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
674                       MI_COM_PHY_ADDR_MASK);
675         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
676                       MI_COM_REG_ADDR_MASK);
677         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
678         
679         tw32_f(MAC_MI_COM, frame_val);
680
681         loops = PHY_BUSY_LOOPS;
682         while (loops != 0) {
683                 udelay(10);
684                 frame_val = tr32(MAC_MI_COM);
685
686                 if ((frame_val & MI_COM_BUSY) == 0) {
687                         udelay(5);
688                         frame_val = tr32(MAC_MI_COM);
689                         break;
690                 }
691                 loops -= 1;
692         }
693
694         ret = -EBUSY;
695         if (loops != 0) {
696                 *val = frame_val & MI_COM_DATA_MASK;
697                 ret = 0;
698         }
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE, tp->mi_mode);
702                 udelay(80);
703         }
704
705         return ret;
706 }
707
708 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721                       MI_COM_PHY_ADDR_MASK);
722         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723                       MI_COM_REG_ADDR_MASK);
724         frame_val |= (val & MI_COM_DATA_MASK);
725         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
726         
727         tw32_f(MAC_MI_COM, frame_val);
728
729         loops = PHY_BUSY_LOOPS;
730         while (loops != 0) {
731                 udelay(10);
732                 frame_val = tr32(MAC_MI_COM);
733                 if ((frame_val & MI_COM_BUSY) == 0) {
734                         udelay(5);
735                         frame_val = tr32(MAC_MI_COM);
736                         break;
737                 }
738                 loops -= 1;
739         }
740
741         ret = -EBUSY;
742         if (loops != 0)
743                 ret = 0;
744
745         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
746                 tw32_f(MAC_MI_MODE, tp->mi_mode);
747                 udelay(80);
748         }
749
750         return ret;
751 }
752
753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
754 {
755         u32 val;
756
757         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
758                 return;
759
760         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
761             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
762                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
763                              (val | (1 << 15) | (1 << 4)));
764 }
765
766 static int tg3_bmcr_reset(struct tg3 *tp)
767 {
768         u32 phy_control;
769         int limit, err;
770
771         /* OK, reset it, and poll the BMCR_RESET bit until it
772          * clears or we time out.
773          */
774         phy_control = BMCR_RESET;
775         err = tg3_writephy(tp, MII_BMCR, phy_control);
776         if (err != 0)
777                 return -EBUSY;
778
779         limit = 5000;
780         while (limit--) {
781                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
782                 if (err != 0)
783                         return -EBUSY;
784
785                 if ((phy_control & BMCR_RESET) == 0) {
786                         udelay(40);
787                         break;
788                 }
789                 udelay(10);
790         }
791         if (limit <= 0)
792                 return -EBUSY;
793
794         return 0;
795 }
796
797 static int tg3_wait_macro_done(struct tg3 *tp)
798 {
799         int limit = 100;
800
801         while (limit--) {
802                 u32 tmp32;
803
804                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
805                         if ((tmp32 & 0x1000) == 0)
806                                 break;
807                 }
808         }
809         if (limit <= 0)
810                 return -EBUSY;
811
812         return 0;
813 }
814
815 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
816 {
817         static const u32 test_pat[4][6] = {
818         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
819         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
820         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
821         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
822         };
823         int chan;
824
825         for (chan = 0; chan < 4; chan++) {
826                 int i;
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0002);
831
832                 for (i = 0; i < 6; i++)
833                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
834                                      test_pat[chan][i]);
835
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
843                              (chan * 0x2000) | 0x0200);
844                 tg3_writephy(tp, 0x16, 0x0082);
845                 if (tg3_wait_macro_done(tp)) {
846                         *resetp = 1;
847                         return -EBUSY;
848                 }
849
850                 tg3_writephy(tp, 0x16, 0x0802);
851                 if (tg3_wait_macro_done(tp)) {
852                         *resetp = 1;
853                         return -EBUSY;
854                 }
855
856                 for (i = 0; i < 6; i += 2) {
857                         u32 low, high;
858
859                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
860                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
861                             tg3_wait_macro_done(tp)) {
862                                 *resetp = 1;
863                                 return -EBUSY;
864                         }
865                         low &= 0x7fff;
866                         high &= 0x000f;
867                         if (low != test_pat[chan][i] ||
868                             high != test_pat[chan][i+1]) {
869                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
870                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
872
873                                 return -EBUSY;
874                         }
875                 }
876         }
877
878         return 0;
879 }
880
881 static int tg3_phy_reset_chanpat(struct tg3 *tp)
882 {
883         int chan;
884
885         for (chan = 0; chan < 4; chan++) {
886                 int i;
887
888                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
889                              (chan * 0x2000) | 0x0200);
890                 tg3_writephy(tp, 0x16, 0x0002);
891                 for (i = 0; i < 6; i++)
892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
893                 tg3_writephy(tp, 0x16, 0x0202);
894                 if (tg3_wait_macro_done(tp))
895                         return -EBUSY;
896         }
897
898         return 0;
899 }
900
901 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
902 {
903         u32 reg32, phy9_orig;
904         int retries, do_phy_reset, err;
905
906         retries = 10;
907         do_phy_reset = 1;
908         do {
909                 if (do_phy_reset) {
910                         err = tg3_bmcr_reset(tp);
911                         if (err)
912                                 return err;
913                         do_phy_reset = 0;
914                 }
915
916                 /* Disable transmitter and interrupt.  */
917                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
918                         continue;
919
920                 reg32 |= 0x3000;
921                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
922
923                 /* Set full-duplex, 1000 mbps.  */
924                 tg3_writephy(tp, MII_BMCR,
925                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
926
927                 /* Set to master mode.  */
928                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
929                         continue;
930
931                 tg3_writephy(tp, MII_TG3_CTRL,
932                              (MII_TG3_CTRL_AS_MASTER |
933                               MII_TG3_CTRL_ENABLE_AS_MASTER));
934
935                 /* Enable SM_DSP_CLOCK and 6dB.  */
936                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
937
938                 /* Block the PHY control access.  */
939                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
940                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
941
942                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
943                 if (!err)
944                         break;
945         } while (--retries);
946
947         err = tg3_phy_reset_chanpat(tp);
948         if (err)
949                 return err;
950
951         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
953
954         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
955         tg3_writephy(tp, 0x16, 0x0000);
956
957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
959                 /* Set Extended packet length bit for jumbo frames */
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
961         }
962         else {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
964         }
965
966         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
967
968         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
969                 reg32 &= ~0x3000;
970                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
971         } else if (!err)
972                 err = -EBUSY;
973
974         return err;
975 }
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
993                 err = tg3_phy_reset_5703_4_5(tp);
994                 if (err)
995                         return err;
996                 goto out;
997         }
998
999         err = tg3_bmcr_reset(tp);
1000         if (err)
1001                 return err;
1002
1003 out:
1004         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1013                 tg3_writephy(tp, 0x1c, 0x8d68);
1014                 tg3_writephy(tp, 0x1c, 0x8d68);
1015         }
1016         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1018                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1019                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1020                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1021                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1022                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1023                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025         }
1026         /* Set Extended packet length bit (bit 14) on all chips that */
1027         /* support jumbo frames */
1028         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1029                 /* Cannot do read-modify-write on 5401 */
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1031         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 /* Set bit 14 with read-modify-write to preserve other bits */
1035                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1036                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1037                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1038         }
1039
1040         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1041          * jumbo frames transmission.
1042          */
1043         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1044                 u32 phy_reg;
1045
1046                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1047                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1048                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1049         }
1050
1051         tg3_phy_set_wirespeed(tp);
1052         return 0;
1053 }
1054
1055 static void tg3_frob_aux_power(struct tg3 *tp)
1056 {
1057         struct tg3 *tp_peer = tp;
1058
1059         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1060                 return;
1061
1062         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1063             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1064                 struct net_device *dev_peer;
1065
1066                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1067                 /* remove_one() may have been run on the peer. */
1068                 if (!dev_peer)
1069                         tp_peer = tp;
1070                 else
1071                         tp_peer = netdev_priv(dev_peer);
1072         }
1073
1074         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1075             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1076             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1077             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1080                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1081                                     (GRC_LCLCTRL_GPIO_OE0 |
1082                                      GRC_LCLCTRL_GPIO_OE1 |
1083                                      GRC_LCLCTRL_GPIO_OE2 |
1084                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1085                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1086                                     100);
1087                 } else {
1088                         u32 no_gpio2;
1089                         u32 grc_local_ctrl = 0;
1090
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         /* Workaround to prevent overdrawing Amps. */
1096                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1097                             ASIC_REV_5714) {
1098                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1099                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                             grc_local_ctrl, 100);
1101                         }
1102
1103                         /* On 5753 and variants, GPIO2 cannot be used. */
1104                         no_gpio2 = tp->nic_sram_data_cfg &
1105                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1106
1107                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1108                                          GRC_LCLCTRL_GPIO_OE1 |
1109                                          GRC_LCLCTRL_GPIO_OE2 |
1110                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1111                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1112                         if (no_gpio2) {
1113                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1114                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1115                         }
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                                     grc_local_ctrl, 100);
1118
1119                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1120
1121                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122                                                     grc_local_ctrl, 100);
1123
1124                         if (!no_gpio2) {
1125                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1126                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                             grc_local_ctrl, 100);
1128                         }
1129                 }
1130         } else {
1131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1132                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1133                         if (tp_peer != tp &&
1134                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1135                                 return;
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140
1141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                     GRC_LCLCTRL_GPIO_OE1, 100);
1143
1144                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145                                     (GRC_LCLCTRL_GPIO_OE1 |
1146                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1147                 }
1148         }
1149 }
1150
1151 static int tg3_setup_phy(struct tg3 *, int);
1152
1153 #define RESET_KIND_SHUTDOWN     0
1154 #define RESET_KIND_INIT         1
1155 #define RESET_KIND_SUSPEND      2
1156
1157 static void tg3_write_sig_post_reset(struct tg3 *, int);
1158 static int tg3_halt_cpu(struct tg3 *, u32);
1159 static int tg3_nvram_lock(struct tg3 *);
1160 static void tg3_nvram_unlock(struct tg3 *);
1161
1162 static void tg3_power_down_phy(struct tg3 *tp)
1163 {
1164         /* The PHY should not be powered down on some chips because
1165          * of bugs.
1166          */
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1170              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1171                 return;
1172         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1173 }
1174
1175 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1176 {
1177         u32 misc_host_ctrl;
1178         u16 power_control, power_caps;
1179         int pm = tp->pm_cap;
1180
1181         /* Make sure register accesses (indirect or otherwise)
1182          * will function correctly.
1183          */
1184         pci_write_config_dword(tp->pdev,
1185                                TG3PCI_MISC_HOST_CTRL,
1186                                tp->misc_host_ctrl);
1187
1188         pci_read_config_word(tp->pdev,
1189                              pm + PCI_PM_CTRL,
1190                              &power_control);
1191         power_control |= PCI_PM_CTRL_PME_STATUS;
1192         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1193         switch (state) {
1194         case PCI_D0:
1195                 power_control |= 0;
1196                 pci_write_config_word(tp->pdev,
1197                                       pm + PCI_PM_CTRL,
1198                                       power_control);
1199                 udelay(100);    /* Delay after power state change */
1200
1201                 /* Switch out of Vaux if it is not a LOM */
1202                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1204
1205                 return 0;
1206
1207         case PCI_D1:
1208                 power_control |= 1;
1209                 break;
1210
1211         case PCI_D2:
1212                 power_control |= 2;
1213                 break;
1214
1215         case PCI_D3hot:
1216                 power_control |= 3;
1217                 break;
1218
1219         default:
1220                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1221                        "requested.\n",
1222                        tp->dev->name, state);
1223                 return -EINVAL;
1224         };
1225
1226         power_control |= PCI_PM_CTRL_PME_ENABLE;
1227
1228         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1229         tw32(TG3PCI_MISC_HOST_CTRL,
1230              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1231
1232         if (tp->link_config.phy_is_low_power == 0) {
1233                 tp->link_config.phy_is_low_power = 1;
1234                 tp->link_config.orig_speed = tp->link_config.speed;
1235                 tp->link_config.orig_duplex = tp->link_config.duplex;
1236                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1237         }
1238
1239         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1240                 tp->link_config.speed = SPEED_10;
1241                 tp->link_config.duplex = DUPLEX_HALF;
1242                 tp->link_config.autoneg = AUTONEG_ENABLE;
1243                 tg3_setup_phy(tp, 0);
1244         }
1245
1246         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1247                 int i;
1248                 u32 val;
1249
1250                 for (i = 0; i < 200; i++) {
1251                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1252                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1253                                 break;
1254                         msleep(1);
1255                 }
1256         }
1257         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1258                                              WOL_DRV_STATE_SHUTDOWN |
1259                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1260
1261         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1262
1263         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1264                 u32 mac_mode;
1265
1266                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1267                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1268                         udelay(40);
1269
1270                         mac_mode = MAC_MODE_PORT_MODE_MII;
1271
1272                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1273                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1274                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1275                 } else {
1276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1277                 }
1278
1279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1281
1282                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1283                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1284                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1285
1286                 tw32_f(MAC_MODE, mac_mode);
1287                 udelay(100);
1288
1289                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1290                 udelay(10);
1291         }
1292
1293         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1294             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1296                 u32 base_val;
1297
1298                 base_val = tp->pci_clock_ctrl;
1299                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1300                              CLOCK_CTRL_TXCLK_DISABLE);
1301
1302                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1303                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1304         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1305                 /* do nothing */
1306         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1307                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1308                 u32 newbits1, newbits2;
1309
1310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1311                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1312                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1313                                     CLOCK_CTRL_TXCLK_DISABLE |
1314                                     CLOCK_CTRL_ALTCLK);
1315                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1316                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1317                         newbits1 = CLOCK_CTRL_625_CORE;
1318                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1319                 } else {
1320                         newbits1 = CLOCK_CTRL_ALTCLK;
1321                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1322                 }
1323
1324                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1325                             40);
1326
1327                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1328                             40);
1329
1330                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1331                         u32 newbits3;
1332
1333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1335                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1336                                             CLOCK_CTRL_TXCLK_DISABLE |
1337                                             CLOCK_CTRL_44MHZ_CORE);
1338                         } else {
1339                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1340                         }
1341
1342                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1343                                     tp->pci_clock_ctrl | newbits3, 40);
1344                 }
1345         }
1346
1347         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1348             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1349                 /* Turn off the PHY */
1350                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1351                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1352                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1353                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1354                         tg3_power_down_phy(tp);
1355                 }
1356         }
1357
1358         tg3_frob_aux_power(tp);
1359
1360         /* Workaround for unstable PLL clock */
1361         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1362             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1363                 u32 val = tr32(0x7d00);
1364
1365                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1366                 tw32(0x7d00, val);
1367                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1368                         int err;
1369
1370                         err = tg3_nvram_lock(tp);
1371                         tg3_halt_cpu(tp, RX_CPU_BASE);
1372                         if (!err)
1373                                 tg3_nvram_unlock(tp);
1374                 }
1375         }
1376
1377         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1378
1379         /* Finally, set the new power state. */
1380         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1381         udelay(100);    /* Delay after power state change */
1382
1383         return 0;
1384 }
1385
1386 static void tg3_link_report(struct tg3 *tp)
1387 {
1388         if (!netif_carrier_ok(tp->dev)) {
1389                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1390         } else {
1391                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1392                        tp->dev->name,
1393                        (tp->link_config.active_speed == SPEED_1000 ?
1394                         1000 :
1395                         (tp->link_config.active_speed == SPEED_100 ?
1396                          100 : 10)),
1397                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1398                         "full" : "half"));
1399
1400                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1401                        "%s for RX.\n",
1402                        tp->dev->name,
1403                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1404                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1405         }
1406 }
1407
1408 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1409 {
1410         u32 new_tg3_flags = 0;
1411         u32 old_rx_mode = tp->rx_mode;
1412         u32 old_tx_mode = tp->tx_mode;
1413
1414         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1415
1416                 /* Convert 1000BaseX flow control bits to 1000BaseT
1417                  * bits before resolving flow control.
1418                  */
1419                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1420                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1421                                        ADVERTISE_PAUSE_ASYM);
1422                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1423
1424                         if (local_adv & ADVERTISE_1000XPAUSE)
1425                                 local_adv |= ADVERTISE_PAUSE_CAP;
1426                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1427                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1428                         if (remote_adv & LPA_1000XPAUSE)
1429                                 remote_adv |= LPA_PAUSE_CAP;
1430                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1431                                 remote_adv |= LPA_PAUSE_ASYM;
1432                 }
1433
1434                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1435                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1436                                 if (remote_adv & LPA_PAUSE_CAP)
1437                                         new_tg3_flags |=
1438                                                 (TG3_FLAG_RX_PAUSE |
1439                                                 TG3_FLAG_TX_PAUSE);
1440                                 else if (remote_adv & LPA_PAUSE_ASYM)
1441                                         new_tg3_flags |=
1442                                                 (TG3_FLAG_RX_PAUSE);
1443                         } else {
1444                                 if (remote_adv & LPA_PAUSE_CAP)
1445                                         new_tg3_flags |=
1446                                                 (TG3_FLAG_RX_PAUSE |
1447                                                 TG3_FLAG_TX_PAUSE);
1448                         }
1449                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                         if ((remote_adv & LPA_PAUSE_CAP) &&
1451                         (remote_adv & LPA_PAUSE_ASYM))
1452                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1453                 }
1454
1455                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1456                 tp->tg3_flags |= new_tg3_flags;
1457         } else {
1458                 new_tg3_flags = tp->tg3_flags;
1459         }
1460
1461         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1462                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1463         else
1464                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1465
1466         if (old_rx_mode != tp->rx_mode) {
1467                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1468         }
1469         
1470         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1471                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_tx_mode != tp->tx_mode) {
1476                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1477         }
1478 }
1479
1480 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1481 {
1482         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1483         case MII_TG3_AUX_STAT_10HALF:
1484                 *speed = SPEED_10;
1485                 *duplex = DUPLEX_HALF;
1486                 break;
1487
1488         case MII_TG3_AUX_STAT_10FULL:
1489                 *speed = SPEED_10;
1490                 *duplex = DUPLEX_FULL;
1491                 break;
1492
1493         case MII_TG3_AUX_STAT_100HALF:
1494                 *speed = SPEED_100;
1495                 *duplex = DUPLEX_HALF;
1496                 break;
1497
1498         case MII_TG3_AUX_STAT_100FULL:
1499                 *speed = SPEED_100;
1500                 *duplex = DUPLEX_FULL;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_1000HALF:
1504                 *speed = SPEED_1000;
1505                 *duplex = DUPLEX_HALF;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_1000FULL:
1509                 *speed = SPEED_1000;
1510                 *duplex = DUPLEX_FULL;
1511                 break;
1512
1513         default:
1514                 *speed = SPEED_INVALID;
1515                 *duplex = DUPLEX_INVALID;
1516                 break;
1517         };
1518 }
1519
1520 static void tg3_phy_copper_begin(struct tg3 *tp)
1521 {
1522         u32 new_adv;
1523         int i;
1524
1525         if (tp->link_config.phy_is_low_power) {
1526                 /* Entering low power mode.  Disable gigabit and
1527                  * 100baseT advertisements.
1528                  */
1529                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1530
1531                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1532                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1533                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1534                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1535
1536                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537         } else if (tp->link_config.speed == SPEED_INVALID) {
1538                 tp->link_config.advertising =
1539                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1540                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1541                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1542                          ADVERTISED_Autoneg | ADVERTISED_MII);
1543
1544                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1545                         tp->link_config.advertising &=
1546                                 ~(ADVERTISED_1000baseT_Half |
1547                                   ADVERTISED_1000baseT_Full);
1548
1549                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1550                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1551                         new_adv |= ADVERTISE_10HALF;
1552                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1553                         new_adv |= ADVERTISE_10FULL;
1554                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1555                         new_adv |= ADVERTISE_100HALF;
1556                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1557                         new_adv |= ADVERTISE_100FULL;
1558                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1559
1560                 if (tp->link_config.advertising &
1561                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1562                         new_adv = 0;
1563                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1564                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1565                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1566                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1567                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1568                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1569                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1570                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1571                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1572                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1573                 } else {
1574                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1575                 }
1576         } else {
1577                 /* Asking for a specific link mode. */
1578                 if (tp->link_config.speed == SPEED_1000) {
1579                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581
1582                         if (tp->link_config.duplex == DUPLEX_FULL)
1583                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1584                         else
1585                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1586                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1587                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1588                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1589                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1590                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1591                 } else {
1592                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1593
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         if (tp->link_config.speed == SPEED_100) {
1596                                 if (tp->link_config.duplex == DUPLEX_FULL)
1597                                         new_adv |= ADVERTISE_100FULL;
1598                                 else
1599                                         new_adv |= ADVERTISE_100HALF;
1600                         } else {
1601                                 if (tp->link_config.duplex == DUPLEX_FULL)
1602                                         new_adv |= ADVERTISE_10FULL;
1603                                 else
1604                                         new_adv |= ADVERTISE_10HALF;
1605                         }
1606                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1607                 }
1608         }
1609
1610         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1611             tp->link_config.speed != SPEED_INVALID) {
1612                 u32 bmcr, orig_bmcr;
1613
1614                 tp->link_config.active_speed = tp->link_config.speed;
1615                 tp->link_config.active_duplex = tp->link_config.duplex;
1616
1617                 bmcr = 0;
1618                 switch (tp->link_config.speed) {
1619                 default:
1620                 case SPEED_10:
1621                         break;
1622
1623                 case SPEED_100:
1624                         bmcr |= BMCR_SPEED100;
1625                         break;
1626
1627                 case SPEED_1000:
1628                         bmcr |= TG3_BMCR_SPEED1000;
1629                         break;
1630                 };
1631
1632                 if (tp->link_config.duplex == DUPLEX_FULL)
1633                         bmcr |= BMCR_FULLDPLX;
1634
1635                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1636                     (bmcr != orig_bmcr)) {
1637                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1638                         for (i = 0; i < 1500; i++) {
1639                                 u32 tmp;
1640
1641                                 udelay(10);
1642                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1643                                     tg3_readphy(tp, MII_BMSR, &tmp))
1644                                         continue;
1645                                 if (!(tmp & BMSR_LSTATUS)) {
1646                                         udelay(40);
1647                                         break;
1648                                 }
1649                         }
1650                         tg3_writephy(tp, MII_BMCR, bmcr);
1651                         udelay(40);
1652                 }
1653         } else {
1654                 tg3_writephy(tp, MII_BMCR,
1655                              BMCR_ANENABLE | BMCR_ANRESTART);
1656         }
1657 }
1658
1659 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1660 {
1661         int err;
1662
1663         /* Turn off tap power management. */
1664         /* Set Extended packet length bit */
1665         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1666
1667         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1668         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1669
1670         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1671         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1672
1673         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1674         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1675
1676         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1677         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1678
1679         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1680         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1681
1682         udelay(40);
1683
1684         return err;
1685 }
1686
1687 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1688 {
1689         u32 adv_reg, all_mask;
1690
1691         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1692                 return 0;
1693
1694         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1695                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1696         if ((adv_reg & all_mask) != all_mask)
1697                 return 0;
1698         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1699                 u32 tg3_ctrl;
1700
1701                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1702                         return 0;
1703
1704                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1705                             MII_TG3_CTRL_ADV_1000_FULL);
1706                 if ((tg3_ctrl & all_mask) != all_mask)
1707                         return 0;
1708         }
1709         return 1;
1710 }
1711
1712 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1713 {
1714         int current_link_up;
1715         u32 bmsr, dummy;
1716         u16 current_speed;
1717         u8 current_duplex;
1718         int i, err;
1719
1720         tw32(MAC_EVENT, 0);
1721
1722         tw32_f(MAC_STATUS,
1723              (MAC_STATUS_SYNC_CHANGED |
1724               MAC_STATUS_CFG_CHANGED |
1725               MAC_STATUS_MI_COMPLETION |
1726               MAC_STATUS_LNKSTATE_CHANGED));
1727         udelay(40);
1728
1729         tp->mi_mode = MAC_MI_MODE_BASE;
1730         tw32_f(MAC_MI_MODE, tp->mi_mode);
1731         udelay(80);
1732
1733         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1734
1735         /* Some third-party PHYs need to be reset on link going
1736          * down.
1737          */
1738         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1739              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1740              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1741             netif_carrier_ok(tp->dev)) {
1742                 tg3_readphy(tp, MII_BMSR, &bmsr);
1743                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1744                     !(bmsr & BMSR_LSTATUS))
1745                         force_reset = 1;
1746         }
1747         if (force_reset)
1748                 tg3_phy_reset(tp);
1749
1750         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1751                 tg3_readphy(tp, MII_BMSR, &bmsr);
1752                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1753                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1754                         bmsr = 0;
1755
1756                 if (!(bmsr & BMSR_LSTATUS)) {
1757                         err = tg3_init_5401phy_dsp(tp);
1758                         if (err)
1759                                 return err;
1760
1761                         tg3_readphy(tp, MII_BMSR, &bmsr);
1762                         for (i = 0; i < 1000; i++) {
1763                                 udelay(10);
1764                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1765                                     (bmsr & BMSR_LSTATUS)) {
1766                                         udelay(40);
1767                                         break;
1768                                 }
1769                         }
1770
1771                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1772                             !(bmsr & BMSR_LSTATUS) &&
1773                             tp->link_config.active_speed == SPEED_1000) {
1774                                 err = tg3_phy_reset(tp);
1775                                 if (!err)
1776                                         err = tg3_init_5401phy_dsp(tp);
1777                                 if (err)
1778                                         return err;
1779                         }
1780                 }
1781         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1782                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1783                 /* 5701 {A0,B0} CRC bug workaround */
1784                 tg3_writephy(tp, 0x15, 0x0a75);
1785                 tg3_writephy(tp, 0x1c, 0x8c68);
1786                 tg3_writephy(tp, 0x1c, 0x8d68);
1787                 tg3_writephy(tp, 0x1c, 0x8c68);
1788         }
1789
1790         /* Clear pending interrupts... */
1791         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1792         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1793
1794         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1795                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1796         else
1797                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1798
1799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1801                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1802                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1803                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1804                 else
1805                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1806         }
1807
1808         current_link_up = 0;
1809         current_speed = SPEED_INVALID;
1810         current_duplex = DUPLEX_INVALID;
1811
1812         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1813                 u32 val;
1814
1815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1816                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1817                 if (!(val & (1 << 10))) {
1818                         val |= (1 << 10);
1819                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1820                         goto relink;
1821                 }
1822         }
1823
1824         bmsr = 0;
1825         for (i = 0; i < 100; i++) {
1826                 tg3_readphy(tp, MII_BMSR, &bmsr);
1827                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1828                     (bmsr & BMSR_LSTATUS))
1829                         break;
1830                 udelay(40);
1831         }
1832
1833         if (bmsr & BMSR_LSTATUS) {
1834                 u32 aux_stat, bmcr;
1835
1836                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1837                 for (i = 0; i < 2000; i++) {
1838                         udelay(10);
1839                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1840                             aux_stat)
1841                                 break;
1842                 }
1843
1844                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1845                                              &current_speed,
1846                                              &current_duplex);
1847
1848                 bmcr = 0;
1849                 for (i = 0; i < 200; i++) {
1850                         tg3_readphy(tp, MII_BMCR, &bmcr);
1851                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1852                                 continue;
1853                         if (bmcr && bmcr != 0x7fff)
1854                                 break;
1855                         udelay(10);
1856                 }
1857
1858                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1859                         if (bmcr & BMCR_ANENABLE) {
1860                                 current_link_up = 1;
1861
1862                                 /* Force autoneg restart if we are exiting
1863                                  * low power mode.
1864                                  */
1865                                 if (!tg3_copper_is_advertising_all(tp))
1866                                         current_link_up = 0;
1867                         } else {
1868                                 current_link_up = 0;
1869                         }
1870                 } else {
1871                         if (!(bmcr & BMCR_ANENABLE) &&
1872                             tp->link_config.speed == current_speed &&
1873                             tp->link_config.duplex == current_duplex) {
1874                                 current_link_up = 1;
1875                         } else {
1876                                 current_link_up = 0;
1877                         }
1878                 }
1879
1880                 tp->link_config.active_speed = current_speed;
1881                 tp->link_config.active_duplex = current_duplex;
1882         }
1883
1884         if (current_link_up == 1 &&
1885             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1886             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1887                 u32 local_adv, remote_adv;
1888
1889                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1890                         local_adv = 0;
1891                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1892
1893                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1894                         remote_adv = 0;
1895
1896                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1897
1898                 /* If we are not advertising full pause capability,
1899                  * something is wrong.  Bring the link down and reconfigure.
1900                  */
1901                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1902                         current_link_up = 0;
1903                 } else {
1904                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1905                 }
1906         }
1907 relink:
1908         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1909                 u32 tmp;
1910
1911                 tg3_phy_copper_begin(tp);
1912
1913                 tg3_readphy(tp, MII_BMSR, &tmp);
1914                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1915                     (tmp & BMSR_LSTATUS))
1916                         current_link_up = 1;
1917         }
1918
1919         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1920         if (current_link_up == 1) {
1921                 if (tp->link_config.active_speed == SPEED_100 ||
1922                     tp->link_config.active_speed == SPEED_10)
1923                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1924                 else
1925                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1926         } else
1927                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1928
1929         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1930         if (tp->link_config.active_duplex == DUPLEX_HALF)
1931                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1932
1933         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1935                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1936                     (current_link_up == 1 &&
1937                      tp->link_config.active_speed == SPEED_10))
1938                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1939         } else {
1940                 if (current_link_up == 1)
1941                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1942         }
1943
1944         /* ??? Without this setting Netgear GA302T PHY does not
1945          * ??? send/receive packets...
1946          */
1947         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1948             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1949                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1950                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1951                 udelay(80);
1952         }
1953
1954         tw32_f(MAC_MODE, tp->mac_mode);
1955         udelay(40);
1956
1957         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1958                 /* Polled via timer. */
1959                 tw32_f(MAC_EVENT, 0);
1960         } else {
1961                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1962         }
1963         udelay(40);
1964
1965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1966             current_link_up == 1 &&
1967             tp->link_config.active_speed == SPEED_1000 &&
1968             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1969              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1970                 udelay(120);
1971                 tw32_f(MAC_STATUS,
1972                      (MAC_STATUS_SYNC_CHANGED |
1973                       MAC_STATUS_CFG_CHANGED));
1974                 udelay(40);
1975                 tg3_write_mem(tp,
1976                               NIC_SRAM_FIRMWARE_MBOX,
1977                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1978         }
1979
1980         if (current_link_up != netif_carrier_ok(tp->dev)) {
1981                 if (current_link_up)
1982                         netif_carrier_on(tp->dev);
1983                 else
1984                         netif_carrier_off(tp->dev);
1985                 tg3_link_report(tp);
1986         }
1987
1988         return 0;
1989 }
1990
1991 struct tg3_fiber_aneginfo {
1992         int state;
1993 #define ANEG_STATE_UNKNOWN              0
1994 #define ANEG_STATE_AN_ENABLE            1
1995 #define ANEG_STATE_RESTART_INIT         2
1996 #define ANEG_STATE_RESTART              3
1997 #define ANEG_STATE_DISABLE_LINK_OK      4
1998 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1999 #define ANEG_STATE_ABILITY_DETECT       6
2000 #define ANEG_STATE_ACK_DETECT_INIT      7
2001 #define ANEG_STATE_ACK_DETECT           8
2002 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2003 #define ANEG_STATE_COMPLETE_ACK         10
2004 #define ANEG_STATE_IDLE_DETECT_INIT     11
2005 #define ANEG_STATE_IDLE_DETECT          12
2006 #define ANEG_STATE_LINK_OK              13
2007 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2008 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2009
2010         u32 flags;
2011 #define MR_AN_ENABLE            0x00000001
2012 #define MR_RESTART_AN           0x00000002
2013 #define MR_AN_COMPLETE          0x00000004
2014 #define MR_PAGE_RX              0x00000008
2015 #define MR_NP_LOADED            0x00000010
2016 #define MR_TOGGLE_TX            0x00000020
2017 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2018 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2019 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2020 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2021 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2022 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2023 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2024 #define MR_TOGGLE_RX            0x00002000
2025 #define MR_NP_RX                0x00004000
2026
2027 #define MR_LINK_OK              0x80000000
2028
2029         unsigned long link_time, cur_time;
2030
2031         u32 ability_match_cfg;
2032         int ability_match_count;
2033
2034         char ability_match, idle_match, ack_match;
2035
2036         u32 txconfig, rxconfig;
2037 #define ANEG_CFG_NP             0x00000080
2038 #define ANEG_CFG_ACK            0x00000040
2039 #define ANEG_CFG_RF2            0x00000020
2040 #define ANEG_CFG_RF1            0x00000010
2041 #define ANEG_CFG_PS2            0x00000001
2042 #define ANEG_CFG_PS1            0x00008000
2043 #define ANEG_CFG_HD             0x00004000
2044 #define ANEG_CFG_FD             0x00002000
2045 #define ANEG_CFG_INVAL          0x00001f06
2046
2047 };
2048 #define ANEG_OK         0
2049 #define ANEG_DONE       1
2050 #define ANEG_TIMER_ENAB 2
2051 #define ANEG_FAILED     -1
2052
2053 #define ANEG_STATE_SETTLE_TIME  10000
2054
2055 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2056                                    struct tg3_fiber_aneginfo *ap)
2057 {
2058         unsigned long delta;
2059         u32 rx_cfg_reg;
2060         int ret;
2061
2062         if (ap->state == ANEG_STATE_UNKNOWN) {
2063                 ap->rxconfig = 0;
2064                 ap->link_time = 0;
2065                 ap->cur_time = 0;
2066                 ap->ability_match_cfg = 0;
2067                 ap->ability_match_count = 0;
2068                 ap->ability_match = 0;
2069                 ap->idle_match = 0;
2070                 ap->ack_match = 0;
2071         }
2072         ap->cur_time++;
2073
2074         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2075                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2076
2077                 if (rx_cfg_reg != ap->ability_match_cfg) {
2078                         ap->ability_match_cfg = rx_cfg_reg;
2079                         ap->ability_match = 0;
2080                         ap->ability_match_count = 0;
2081                 } else {
2082                         if (++ap->ability_match_count > 1) {
2083                                 ap->ability_match = 1;
2084                                 ap->ability_match_cfg = rx_cfg_reg;
2085                         }
2086                 }
2087                 if (rx_cfg_reg & ANEG_CFG_ACK)
2088                         ap->ack_match = 1;
2089                 else
2090                         ap->ack_match = 0;
2091
2092                 ap->idle_match = 0;
2093         } else {
2094                 ap->idle_match = 1;
2095                 ap->ability_match_cfg = 0;
2096                 ap->ability_match_count = 0;
2097                 ap->ability_match = 0;
2098                 ap->ack_match = 0;
2099
2100                 rx_cfg_reg = 0;
2101         }
2102
2103         ap->rxconfig = rx_cfg_reg;
2104         ret = ANEG_OK;
2105
2106         switch(ap->state) {
2107         case ANEG_STATE_UNKNOWN:
2108                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2109                         ap->state = ANEG_STATE_AN_ENABLE;
2110
2111                 /* fallthru */
2112         case ANEG_STATE_AN_ENABLE:
2113                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2114                 if (ap->flags & MR_AN_ENABLE) {
2115                         ap->link_time = 0;
2116                         ap->cur_time = 0;
2117                         ap->ability_match_cfg = 0;
2118                         ap->ability_match_count = 0;
2119                         ap->ability_match = 0;
2120                         ap->idle_match = 0;
2121                         ap->ack_match = 0;
2122
2123                         ap->state = ANEG_STATE_RESTART_INIT;
2124                 } else {
2125                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2126                 }
2127                 break;
2128
2129         case ANEG_STATE_RESTART_INIT:
2130                 ap->link_time = ap->cur_time;
2131                 ap->flags &= ~(MR_NP_LOADED);
2132                 ap->txconfig = 0;
2133                 tw32(MAC_TX_AUTO_NEG, 0);
2134                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2135                 tw32_f(MAC_MODE, tp->mac_mode);
2136                 udelay(40);
2137
2138                 ret = ANEG_TIMER_ENAB;
2139                 ap->state = ANEG_STATE_RESTART;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_RESTART:
2143                 delta = ap->cur_time - ap->link_time;
2144                 if (delta > ANEG_STATE_SETTLE_TIME) {
2145                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2146                 } else {
2147                         ret = ANEG_TIMER_ENAB;
2148                 }
2149                 break;
2150
2151         case ANEG_STATE_DISABLE_LINK_OK:
2152                 ret = ANEG_DONE;
2153                 break;
2154
2155         case ANEG_STATE_ABILITY_DETECT_INIT:
2156                 ap->flags &= ~(MR_TOGGLE_TX);
2157                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2158                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2159                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2160                 tw32_f(MAC_MODE, tp->mac_mode);
2161                 udelay(40);
2162
2163                 ap->state = ANEG_STATE_ABILITY_DETECT;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT:
2167                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2168                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2169                 }
2170                 break;
2171
2172         case ANEG_STATE_ACK_DETECT_INIT:
2173                 ap->txconfig |= ANEG_CFG_ACK;
2174                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2175                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2176                 tw32_f(MAC_MODE, tp->mac_mode);
2177                 udelay(40);
2178
2179                 ap->state = ANEG_STATE_ACK_DETECT;
2180
2181                 /* fallthru */
2182         case ANEG_STATE_ACK_DETECT:
2183                 if (ap->ack_match != 0) {
2184                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2185                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2186                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2187                         } else {
2188                                 ap->state = ANEG_STATE_AN_ENABLE;
2189                         }
2190                 } else if (ap->ability_match != 0 &&
2191                            ap->rxconfig == 0) {
2192                         ap->state = ANEG_STATE_AN_ENABLE;
2193                 }
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK_INIT:
2197                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2198                         ret = ANEG_FAILED;
2199                         break;
2200                 }
2201                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2202                                MR_LP_ADV_HALF_DUPLEX |
2203                                MR_LP_ADV_SYM_PAUSE |
2204                                MR_LP_ADV_ASYM_PAUSE |
2205                                MR_LP_ADV_REMOTE_FAULT1 |
2206                                MR_LP_ADV_REMOTE_FAULT2 |
2207                                MR_LP_ADV_NEXT_PAGE |
2208                                MR_TOGGLE_RX |
2209                                MR_NP_RX);
2210                 if (ap->rxconfig & ANEG_CFG_FD)
2211                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2212                 if (ap->rxconfig & ANEG_CFG_HD)
2213                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2214                 if (ap->rxconfig & ANEG_CFG_PS1)
2215                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2216                 if (ap->rxconfig & ANEG_CFG_PS2)
2217                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2218                 if (ap->rxconfig & ANEG_CFG_RF1)
2219                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2220                 if (ap->rxconfig & ANEG_CFG_RF2)
2221                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2222                 if (ap->rxconfig & ANEG_CFG_NP)
2223                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2224
2225                 ap->link_time = ap->cur_time;
2226
2227                 ap->flags ^= (MR_TOGGLE_TX);
2228                 if (ap->rxconfig & 0x0008)
2229                         ap->flags |= MR_TOGGLE_RX;
2230                 if (ap->rxconfig & ANEG_CFG_NP)
2231                         ap->flags |= MR_NP_RX;
2232                 ap->flags |= MR_PAGE_RX;
2233
2234                 ap->state = ANEG_STATE_COMPLETE_ACK;
2235                 ret = ANEG_TIMER_ENAB;
2236                 break;
2237
2238         case ANEG_STATE_COMPLETE_ACK:
2239                 if (ap->ability_match != 0 &&
2240                     ap->rxconfig == 0) {
2241                         ap->state = ANEG_STATE_AN_ENABLE;
2242                         break;
2243                 }
2244                 delta = ap->cur_time - ap->link_time;
2245                 if (delta > ANEG_STATE_SETTLE_TIME) {
2246                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2247                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2248                         } else {
2249                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2250                                     !(ap->flags & MR_NP_RX)) {
2251                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2252                                 } else {
2253                                         ret = ANEG_FAILED;
2254                                 }
2255                         }
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_IDLE_DETECT_INIT:
2260                 ap->link_time = ap->cur_time;
2261                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2262                 tw32_f(MAC_MODE, tp->mac_mode);
2263                 udelay(40);
2264
2265                 ap->state = ANEG_STATE_IDLE_DETECT;
2266                 ret = ANEG_TIMER_ENAB;
2267                 break;
2268
2269         case ANEG_STATE_IDLE_DETECT:
2270                 if (ap->ability_match != 0 &&
2271                     ap->rxconfig == 0) {
2272                         ap->state = ANEG_STATE_AN_ENABLE;
2273                         break;
2274                 }
2275                 delta = ap->cur_time - ap->link_time;
2276                 if (delta > ANEG_STATE_SETTLE_TIME) {
2277                         /* XXX another gem from the Broadcom driver :( */
2278                         ap->state = ANEG_STATE_LINK_OK;
2279                 }
2280                 break;
2281
2282         case ANEG_STATE_LINK_OK:
2283                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2284                 ret = ANEG_DONE;
2285                 break;
2286
2287         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2288                 /* ??? unimplemented */
2289                 break;
2290
2291         case ANEG_STATE_NEXT_PAGE_WAIT:
2292                 /* ??? unimplemented */
2293                 break;
2294
2295         default:
2296                 ret = ANEG_FAILED;
2297                 break;
2298         };
2299
2300         return ret;
2301 }
2302
2303 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2304 {
2305         int res = 0;
2306         struct tg3_fiber_aneginfo aninfo;
2307         int status = ANEG_FAILED;
2308         unsigned int tick;
2309         u32 tmp;
2310
2311         tw32_f(MAC_TX_AUTO_NEG, 0);
2312
2313         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2314         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2315         udelay(40);
2316
2317         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2318         udelay(40);
2319
2320         memset(&aninfo, 0, sizeof(aninfo));
2321         aninfo.flags |= MR_AN_ENABLE;
2322         aninfo.state = ANEG_STATE_UNKNOWN;
2323         aninfo.cur_time = 0;
2324         tick = 0;
2325         while (++tick < 195000) {
2326                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2327                 if (status == ANEG_DONE || status == ANEG_FAILED)
2328                         break;
2329
2330                 udelay(1);
2331         }
2332
2333         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2334         tw32_f(MAC_MODE, tp->mac_mode);
2335         udelay(40);
2336
2337         *flags = aninfo.flags;
2338
2339         if (status == ANEG_DONE &&
2340             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2341                              MR_LP_ADV_FULL_DUPLEX)))
2342                 res = 1;
2343
2344         return res;
2345 }
2346
2347 static void tg3_init_bcm8002(struct tg3 *tp)
2348 {
2349         u32 mac_status = tr32(MAC_STATUS);
2350         int i;
2351
2352         /* Reset when initting first time or we have a link. */
2353         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2354             !(mac_status & MAC_STATUS_PCS_SYNCED))
2355                 return;
2356
2357         /* Set PLL lock range. */
2358         tg3_writephy(tp, 0x16, 0x8007);
2359
2360         /* SW reset */
2361         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2362
2363         /* Wait for reset to complete. */
2364         /* XXX schedule_timeout() ... */
2365         for (i = 0; i < 500; i++)
2366                 udelay(10);
2367
2368         /* Config mode; select PMA/Ch 1 regs. */
2369         tg3_writephy(tp, 0x10, 0x8411);
2370
2371         /* Enable auto-lock and comdet, select txclk for tx. */
2372         tg3_writephy(tp, 0x11, 0x0a10);
2373
2374         tg3_writephy(tp, 0x18, 0x00a0);
2375         tg3_writephy(tp, 0x16, 0x41ff);
2376
2377         /* Assert and deassert POR. */
2378         tg3_writephy(tp, 0x13, 0x0400);
2379         udelay(40);
2380         tg3_writephy(tp, 0x13, 0x0000);
2381
2382         tg3_writephy(tp, 0x11, 0x0a50);
2383         udelay(40);
2384         tg3_writephy(tp, 0x11, 0x0a10);
2385
2386         /* Wait for signal to stabilize */
2387         /* XXX schedule_timeout() ... */
2388         for (i = 0; i < 15000; i++)
2389                 udelay(10);
2390
2391         /* Deselect the channel register so we can read the PHYID
2392          * later.
2393          */
2394         tg3_writephy(tp, 0x10, 0x8011);
2395 }
2396
2397 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2398 {
2399         u32 sg_dig_ctrl, sg_dig_status;
2400         u32 serdes_cfg, expected_sg_dig_ctrl;
2401         int workaround, port_a;
2402         int current_link_up;
2403
2404         serdes_cfg = 0;
2405         expected_sg_dig_ctrl = 0;
2406         workaround = 0;
2407         port_a = 1;
2408         current_link_up = 0;
2409
2410         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2411             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2412                 workaround = 1;
2413                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2414                         port_a = 0;
2415
2416                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2417                 /* preserve bits 20-23 for voltage regulator */
2418                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2419         }
2420
2421         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2422
2423         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2424                 if (sg_dig_ctrl & (1 << 31)) {
2425                         if (workaround) {
2426                                 u32 val = serdes_cfg;
2427
2428                                 if (port_a)
2429                                         val |= 0xc010000;
2430                                 else
2431                                         val |= 0x4010000;
2432                                 tw32_f(MAC_SERDES_CFG, val);
2433                         }
2434                         tw32_f(SG_DIG_CTRL, 0x01388400);
2435                 }
2436                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2437                         tg3_setup_flow_control(tp, 0, 0);
2438                         current_link_up = 1;
2439                 }
2440                 goto out;
2441         }
2442
2443         /* Want auto-negotiation.  */
2444         expected_sg_dig_ctrl = 0x81388400;
2445
2446         /* Pause capability */
2447         expected_sg_dig_ctrl |= (1 << 11);
2448
2449         /* Asymettric pause */
2450         expected_sg_dig_ctrl |= (1 << 12);
2451
2452         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2453                 if (workaround)
2454                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2455                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2456                 udelay(5);
2457                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2458
2459                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2460         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2461                                  MAC_STATUS_SIGNAL_DET)) {
2462                 int i;
2463
2464                 /* Giver time to negotiate (~200ms) */
2465                 for (i = 0; i < 40000; i++) {
2466                         sg_dig_status = tr32(SG_DIG_STATUS);
2467                         if (sg_dig_status & (0x3))
2468                                 break;
2469                         udelay(5);
2470                 }
2471                 mac_status = tr32(MAC_STATUS);
2472
2473                 if ((sg_dig_status & (1 << 1)) &&
2474                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2475                         u32 local_adv, remote_adv;
2476
2477                         local_adv = ADVERTISE_PAUSE_CAP;
2478                         remote_adv = 0;
2479                         if (sg_dig_status & (1 << 19))
2480                                 remote_adv |= LPA_PAUSE_CAP;
2481                         if (sg_dig_status & (1 << 20))
2482                                 remote_adv |= LPA_PAUSE_ASYM;
2483
2484                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2485                         current_link_up = 1;
2486                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2487                 } else if (!(sg_dig_status & (1 << 1))) {
2488                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2489                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2490                         else {
2491                                 if (workaround) {
2492                                         u32 val = serdes_cfg;
2493
2494                                         if (port_a)
2495                                                 val |= 0xc010000;
2496                                         else
2497                                                 val |= 0x4010000;
2498
2499                                         tw32_f(MAC_SERDES_CFG, val);
2500                                 }
2501
2502                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2503                                 udelay(40);
2504
2505                                 /* Link parallel detection - link is up */
2506                                 /* only if we have PCS_SYNC and not */
2507                                 /* receiving config code words */
2508                                 mac_status = tr32(MAC_STATUS);
2509                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2510                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2511                                         tg3_setup_flow_control(tp, 0, 0);
2512                                         current_link_up = 1;
2513                                 }
2514                         }
2515                 }
2516         }
2517
2518 out:
2519         return current_link_up;
2520 }
2521
2522 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2523 {
2524         int current_link_up = 0;
2525
2526         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2527                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2528                 goto out;
2529         }
2530
2531         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2532                 u32 flags;
2533                 int i;
2534   
2535                 if (fiber_autoneg(tp, &flags)) {
2536                         u32 local_adv, remote_adv;
2537
2538                         local_adv = ADVERTISE_PAUSE_CAP;
2539                         remote_adv = 0;
2540                         if (flags & MR_LP_ADV_SYM_PAUSE)
2541                                 remote_adv |= LPA_PAUSE_CAP;
2542                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2543                                 remote_adv |= LPA_PAUSE_ASYM;
2544
2545                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2546
2547                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2548                         current_link_up = 1;
2549                 }
2550                 for (i = 0; i < 30; i++) {
2551                         udelay(20);
2552                         tw32_f(MAC_STATUS,
2553                                (MAC_STATUS_SYNC_CHANGED |
2554                                 MAC_STATUS_CFG_CHANGED));
2555                         udelay(40);
2556                         if ((tr32(MAC_STATUS) &
2557                              (MAC_STATUS_SYNC_CHANGED |
2558                               MAC_STATUS_CFG_CHANGED)) == 0)
2559                                 break;
2560                 }
2561
2562                 mac_status = tr32(MAC_STATUS);
2563                 if (current_link_up == 0 &&
2564                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2565                     !(mac_status & MAC_STATUS_RCVD_CFG))
2566                         current_link_up = 1;
2567         } else {
2568                 /* Forcing 1000FD link up. */
2569                 current_link_up = 1;
2570                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2571
2572                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2573                 udelay(40);
2574         }
2575
2576 out:
2577         return current_link_up;
2578 }
2579
2580 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2581 {
2582         u32 orig_pause_cfg;
2583         u16 orig_active_speed;
2584         u8 orig_active_duplex;
2585         u32 mac_status;
2586         int current_link_up;
2587         int i;
2588
2589         orig_pause_cfg =
2590                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                   TG3_FLAG_TX_PAUSE));
2592         orig_active_speed = tp->link_config.active_speed;
2593         orig_active_duplex = tp->link_config.active_duplex;
2594
2595         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2596             netif_carrier_ok(tp->dev) &&
2597             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2598                 mac_status = tr32(MAC_STATUS);
2599                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2600                                MAC_STATUS_SIGNAL_DET |
2601                                MAC_STATUS_CFG_CHANGED |
2602                                MAC_STATUS_RCVD_CFG);
2603                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2604                                    MAC_STATUS_SIGNAL_DET)) {
2605                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2606                                             MAC_STATUS_CFG_CHANGED));
2607                         return 0;
2608                 }
2609         }
2610
2611         tw32_f(MAC_TX_AUTO_NEG, 0);
2612
2613         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2614         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2615         tw32_f(MAC_MODE, tp->mac_mode);
2616         udelay(40);
2617
2618         if (tp->phy_id == PHY_ID_BCM8002)
2619                 tg3_init_bcm8002(tp);
2620
2621         /* Enable link change event even when serdes polling.  */
2622         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2623         udelay(40);
2624
2625         current_link_up = 0;
2626         mac_status = tr32(MAC_STATUS);
2627
2628         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2629                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2630         else
2631                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2632
2633         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2634         tw32_f(MAC_MODE, tp->mac_mode);
2635         udelay(40);
2636
2637         tp->hw_status->status =
2638                 (SD_STATUS_UPDATED |
2639                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2640
2641         for (i = 0; i < 100; i++) {
2642                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2643                                     MAC_STATUS_CFG_CHANGED));
2644                 udelay(5);
2645                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2646                                          MAC_STATUS_CFG_CHANGED)) == 0)
2647                         break;
2648         }
2649
2650         mac_status = tr32(MAC_STATUS);
2651         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2652                 current_link_up = 0;
2653                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2654                         tw32_f(MAC_MODE, (tp->mac_mode |
2655                                           MAC_MODE_SEND_CONFIGS));
2656                         udelay(1);
2657                         tw32_f(MAC_MODE, tp->mac_mode);
2658                 }
2659         }
2660
2661         if (current_link_up == 1) {
2662                 tp->link_config.active_speed = SPEED_1000;
2663                 tp->link_config.active_duplex = DUPLEX_FULL;
2664                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2665                                     LED_CTRL_LNKLED_OVERRIDE |
2666                                     LED_CTRL_1000MBPS_ON));
2667         } else {
2668                 tp->link_config.active_speed = SPEED_INVALID;
2669                 tp->link_config.active_duplex = DUPLEX_INVALID;
2670                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2671                                     LED_CTRL_LNKLED_OVERRIDE |
2672                                     LED_CTRL_TRAFFIC_OVERRIDE));
2673         }
2674
2675         if (current_link_up != netif_carrier_ok(tp->dev)) {
2676                 if (current_link_up)
2677                         netif_carrier_on(tp->dev);
2678                 else
2679                         netif_carrier_off(tp->dev);
2680                 tg3_link_report(tp);
2681         } else {
2682                 u32 now_pause_cfg =
2683                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2684                                          TG3_FLAG_TX_PAUSE);
2685                 if (orig_pause_cfg != now_pause_cfg ||
2686                     orig_active_speed != tp->link_config.active_speed ||
2687                     orig_active_duplex != tp->link_config.active_duplex)
2688                         tg3_link_report(tp);
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2695 {
2696         int current_link_up, err = 0;
2697         u32 bmsr, bmcr;
2698         u16 current_speed;
2699         u8 current_duplex;
2700
2701         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2702         tw32_f(MAC_MODE, tp->mac_mode);
2703         udelay(40);
2704
2705         tw32(MAC_EVENT, 0);
2706
2707         tw32_f(MAC_STATUS,
2708              (MAC_STATUS_SYNC_CHANGED |
2709               MAC_STATUS_CFG_CHANGED |
2710               MAC_STATUS_MI_COMPLETION |
2711               MAC_STATUS_LNKSTATE_CHANGED));
2712         udelay(40);
2713
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         current_link_up = 0;
2718         current_speed = SPEED_INVALID;
2719         current_duplex = DUPLEX_INVALID;
2720
2721         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2722         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2724                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2725                         bmsr |= BMSR_LSTATUS;
2726                 else
2727                         bmsr &= ~BMSR_LSTATUS;
2728         }
2729
2730         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2731
2732         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2733             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2734                 /* do nothing, just check for link up at the end */
2735         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2736                 u32 adv, new_adv;
2737
2738                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2739                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2740                                   ADVERTISE_1000XPAUSE |
2741                                   ADVERTISE_1000XPSE_ASYM |
2742                                   ADVERTISE_SLCT);
2743
2744                 /* Always advertise symmetric PAUSE just like copper */
2745                 new_adv |= ADVERTISE_1000XPAUSE;
2746
2747                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2748                         new_adv |= ADVERTISE_1000XHALF;
2749                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2750                         new_adv |= ADVERTISE_1000XFULL;
2751
2752                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2753                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2754                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2755                         tg3_writephy(tp, MII_BMCR, bmcr);
2756
2757                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2758                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2759                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2760
2761                         return err;
2762                 }
2763         } else {
2764                 u32 new_bmcr;
2765
2766                 bmcr &= ~BMCR_SPEED1000;
2767                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2768
2769                 if (tp->link_config.duplex == DUPLEX_FULL)
2770                         new_bmcr |= BMCR_FULLDPLX;
2771
2772                 if (new_bmcr != bmcr) {
2773                         /* BMCR_SPEED1000 is a reserved bit that needs
2774                          * to be set on write.
2775                          */
2776                         new_bmcr |= BMCR_SPEED1000;
2777
2778                         /* Force a linkdown */
2779                         if (netif_carrier_ok(tp->dev)) {
2780                                 u32 adv;
2781
2782                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2783                                 adv &= ~(ADVERTISE_1000XFULL |
2784                                          ADVERTISE_1000XHALF |
2785                                          ADVERTISE_SLCT);
2786                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2787                                 tg3_writephy(tp, MII_BMCR, bmcr |
2788                                                            BMCR_ANRESTART |
2789                                                            BMCR_ANENABLE);
2790                                 udelay(10);
2791                                 netif_carrier_off(tp->dev);
2792                         }
2793                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2794                         bmcr = new_bmcr;
2795                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2796                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2797                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2798                             ASIC_REV_5714) {
2799                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2800                                         bmsr |= BMSR_LSTATUS;
2801                                 else
2802                                         bmsr &= ~BMSR_LSTATUS;
2803                         }
2804                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2805                 }
2806         }
2807
2808         if (bmsr & BMSR_LSTATUS) {
2809                 current_speed = SPEED_1000;
2810                 current_link_up = 1;
2811                 if (bmcr & BMCR_FULLDPLX)
2812                         current_duplex = DUPLEX_FULL;
2813                 else
2814                         current_duplex = DUPLEX_HALF;
2815
2816                 if (bmcr & BMCR_ANENABLE) {
2817                         u32 local_adv, remote_adv, common;
2818
2819                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2820                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2821                         common = local_adv & remote_adv;
2822                         if (common & (ADVERTISE_1000XHALF |
2823                                       ADVERTISE_1000XFULL)) {
2824                                 if (common & ADVERTISE_1000XFULL)
2825                                         current_duplex = DUPLEX_FULL;
2826                                 else
2827                                         current_duplex = DUPLEX_HALF;
2828
2829                                 tg3_setup_flow_control(tp, local_adv,
2830                                                        remote_adv);
2831                         }
2832                         else
2833                                 current_link_up = 0;
2834                 }
2835         }
2836
2837         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2838         if (tp->link_config.active_duplex == DUPLEX_HALF)
2839                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2840
2841         tw32_f(MAC_MODE, tp->mac_mode);
2842         udelay(40);
2843
2844         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845
2846         tp->link_config.active_speed = current_speed;
2847         tp->link_config.active_duplex = current_duplex;
2848
2849         if (current_link_up != netif_carrier_ok(tp->dev)) {
2850                 if (current_link_up)
2851                         netif_carrier_on(tp->dev);
2852                 else {
2853                         netif_carrier_off(tp->dev);
2854                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2855                 }
2856                 tg3_link_report(tp);
2857         }
2858         return err;
2859 }
2860
2861 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2862 {
2863         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2864                 /* Give autoneg time to complete. */
2865                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2866                 return;
2867         }
2868         if (!netif_carrier_ok(tp->dev) &&
2869             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2870                 u32 bmcr;
2871
2872                 tg3_readphy(tp, MII_BMCR, &bmcr);
2873                 if (bmcr & BMCR_ANENABLE) {
2874                         u32 phy1, phy2;
2875
2876                         /* Select shadow register 0x1f */
2877                         tg3_writephy(tp, 0x1c, 0x7c00);
2878                         tg3_readphy(tp, 0x1c, &phy1);
2879
2880                         /* Select expansion interrupt status register */
2881                         tg3_writephy(tp, 0x17, 0x0f01);
2882                         tg3_readphy(tp, 0x15, &phy2);
2883                         tg3_readphy(tp, 0x15, &phy2);
2884
2885                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2886                                 /* We have signal detect and not receiving
2887                                  * config code words, link is up by parallel
2888                                  * detection.
2889                                  */
2890
2891                                 bmcr &= ~BMCR_ANENABLE;
2892                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2893                                 tg3_writephy(tp, MII_BMCR, bmcr);
2894                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2895                         }
2896                 }
2897         }
2898         else if (netif_carrier_ok(tp->dev) &&
2899                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2900                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2901                 u32 phy2;
2902
2903                 /* Select expansion interrupt status register */
2904                 tg3_writephy(tp, 0x17, 0x0f01);
2905                 tg3_readphy(tp, 0x15, &phy2);
2906                 if (phy2 & 0x20) {
2907                         u32 bmcr;
2908
2909                         /* Config code words received, turn on autoneg. */
2910                         tg3_readphy(tp, MII_BMCR, &bmcr);
2911                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2912
2913                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2914
2915                 }
2916         }
2917 }
2918
2919 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2920 {
2921         int err;
2922
2923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2924                 err = tg3_setup_fiber_phy(tp, force_reset);
2925         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2926                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2927         } else {
2928                 err = tg3_setup_copper_phy(tp, force_reset);
2929         }
2930
2931         if (tp->link_config.active_speed == SPEED_1000 &&
2932             tp->link_config.active_duplex == DUPLEX_HALF)
2933                 tw32(MAC_TX_LENGTHS,
2934                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2935                       (6 << TX_LENGTHS_IPG_SHIFT) |
2936                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2937         else
2938                 tw32(MAC_TX_LENGTHS,
2939                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2940                       (6 << TX_LENGTHS_IPG_SHIFT) |
2941                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2942
2943         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2944                 if (netif_carrier_ok(tp->dev)) {
2945                         tw32(HOSTCC_STAT_COAL_TICKS,
2946                              tp->coal.stats_block_coalesce_usecs);
2947                 } else {
2948                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2949                 }
2950         }
2951
2952         return err;
2953 }
2954
2955 /* Tigon3 never reports partial packet sends.  So we do not
2956  * need special logic to handle SKBs that have not had all
2957  * of their frags sent yet, like SunGEM does.
2958  */
2959 static void tg3_tx(struct tg3 *tp)
2960 {
2961         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2962         u32 sw_idx = tp->tx_cons;
2963
2964         while (sw_idx != hw_idx) {
2965                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2966                 struct sk_buff *skb = ri->skb;
2967                 int i;
2968
2969                 BUG_ON(skb == NULL);
2970                 pci_unmap_single(tp->pdev,
2971                                  pci_unmap_addr(ri, mapping),
2972                                  skb_headlen(skb),
2973                                  PCI_DMA_TODEVICE);
2974
2975                 ri->skb = NULL;
2976
2977                 sw_idx = NEXT_TX(sw_idx);
2978
2979                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2980                         BUG_ON(sw_idx == hw_idx);
2981
2982                         ri = &tp->tx_buffers[sw_idx];
2983                         BUG_ON(ri->skb != NULL);
2984
2985                         pci_unmap_page(tp->pdev,
2986                                        pci_unmap_addr(ri, mapping),
2987                                        skb_shinfo(skb)->frags[i].size,
2988                                        PCI_DMA_TODEVICE);
2989
2990                         sw_idx = NEXT_TX(sw_idx);
2991                 }
2992
2993                 dev_kfree_skb(skb);
2994         }
2995
2996         tp->tx_cons = sw_idx;
2997
2998         if (unlikely(netif_queue_stopped(tp->dev))) {
2999                 spin_lock(&tp->tx_lock);
3000                 if (netif_queue_stopped(tp->dev) &&
3001                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3002                         netif_wake_queue(tp->dev);
3003                 spin_unlock(&tp->tx_lock);
3004         }
3005 }
3006
3007 /* Returns size of skb allocated or < 0 on error.
3008  *
3009  * We only need to fill in the address because the other members
3010  * of the RX descriptor are invariant, see tg3_init_rings.
3011  *
3012  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3013  * posting buffers we only dirty the first cache line of the RX
3014  * descriptor (containing the address).  Whereas for the RX status
3015  * buffers the cpu only reads the last cacheline of the RX descriptor
3016  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3017  */
3018 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3019                             int src_idx, u32 dest_idx_unmasked)
3020 {
3021         struct tg3_rx_buffer_desc *desc;
3022         struct ring_info *map, *src_map;
3023         struct sk_buff *skb;
3024         dma_addr_t mapping;
3025         int skb_size, dest_idx;
3026
3027         src_map = NULL;
3028         switch (opaque_key) {
3029         case RXD_OPAQUE_RING_STD:
3030                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3031                 desc = &tp->rx_std[dest_idx];
3032                 map = &tp->rx_std_buffers[dest_idx];
3033                 if (src_idx >= 0)
3034                         src_map = &tp->rx_std_buffers[src_idx];
3035                 skb_size = tp->rx_pkt_buf_sz;
3036                 break;
3037
3038         case RXD_OPAQUE_RING_JUMBO:
3039                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3040                 desc = &tp->rx_jumbo[dest_idx];
3041                 map = &tp->rx_jumbo_buffers[dest_idx];
3042                 if (src_idx >= 0)
3043                         src_map = &tp->rx_jumbo_buffers[src_idx];
3044                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3045                 break;
3046
3047         default:
3048                 return -EINVAL;
3049         };
3050
3051         /* Do not overwrite any of the map or rp information
3052          * until we are sure we can commit to a new buffer.
3053          *
3054          * Callers depend upon this behavior and assume that
3055          * we leave everything unchanged if we fail.
3056          */
3057         skb = dev_alloc_skb(skb_size);
3058         if (skb == NULL)
3059                 return -ENOMEM;
3060
3061         skb->dev = tp->dev;
3062         skb_reserve(skb, tp->rx_offset);
3063
3064         mapping = pci_map_single(tp->pdev, skb->data,
3065                                  skb_size - tp->rx_offset,
3066                                  PCI_DMA_FROMDEVICE);
3067
3068         map->skb = skb;
3069         pci_unmap_addr_set(map, mapping, mapping);
3070
3071         if (src_map != NULL)
3072                 src_map->skb = NULL;
3073
3074         desc->addr_hi = ((u64)mapping >> 32);
3075         desc->addr_lo = ((u64)mapping & 0xffffffff);
3076
3077         return skb_size;
3078 }
3079
3080 /* We only need to move over in the address because the other
3081  * members of the RX descriptor are invariant.  See notes above
3082  * tg3_alloc_rx_skb for full details.
3083  */
3084 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3085                            int src_idx, u32 dest_idx_unmasked)
3086 {
3087         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3088         struct ring_info *src_map, *dest_map;
3089         int dest_idx;
3090
3091         switch (opaque_key) {
3092         case RXD_OPAQUE_RING_STD:
3093                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3094                 dest_desc = &tp->rx_std[dest_idx];
3095                 dest_map = &tp->rx_std_buffers[dest_idx];
3096                 src_desc = &tp->rx_std[src_idx];
3097                 src_map = &tp->rx_std_buffers[src_idx];
3098                 break;
3099
3100         case RXD_OPAQUE_RING_JUMBO:
3101                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3102                 dest_desc = &tp->rx_jumbo[dest_idx];
3103                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3104                 src_desc = &tp->rx_jumbo[src_idx];
3105                 src_map = &tp->rx_jumbo_buffers[src_idx];
3106                 break;
3107
3108         default:
3109                 return;
3110         };
3111
3112         dest_map->skb = src_map->skb;
3113         pci_unmap_addr_set(dest_map, mapping,
3114                            pci_unmap_addr(src_map, mapping));
3115         dest_desc->addr_hi = src_desc->addr_hi;
3116         dest_desc->addr_lo = src_desc->addr_lo;
3117
3118         src_map->skb = NULL;
3119 }
3120
3121 #if TG3_VLAN_TAG_USED
3122 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3123 {
3124         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3125 }
3126 #endif
3127
3128 /* The RX ring scheme is composed of multiple rings which post fresh
3129  * buffers to the chip, and one special ring the chip uses to report
3130  * status back to the host.
3131  *
3132  * The special ring reports the status of received packets to the
3133  * host.  The chip does not write into the original descriptor the
3134  * RX buffer was obtained from.  The chip simply takes the original
3135  * descriptor as provided by the host, updates the status and length
3136  * field, then writes this into the next status ring entry.
3137  *
3138  * Each ring the host uses to post buffers to the chip is described
3139  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3140  * it is first placed into the on-chip ram.  When the packet's length
3141  * is known, it walks down the TG3_BDINFO entries to select the ring.
3142  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3143  * which is within the range of the new packet's length is chosen.
3144  *
3145  * The "separate ring for rx status" scheme may sound queer, but it makes
3146  * sense from a cache coherency perspective.  If only the host writes
3147  * to the buffer post rings, and only the chip writes to the rx status
3148  * rings, then cache lines never move beyond shared-modified state.
3149  * If both the host and chip were to write into the same ring, cache line
3150  * eviction could occur since both entities want it in an exclusive state.
3151  */
3152 static int tg3_rx(struct tg3 *tp, int budget)
3153 {
3154         u32 work_mask;
3155         u32 sw_idx = tp->rx_rcb_ptr;
3156         u16 hw_idx;
3157         int received;
3158
3159         hw_idx = tp->hw_status->idx[0].rx_producer;
3160         /*
3161          * We need to order the read of hw_idx and the read of
3162          * the opaque cookie.
3163          */
3164         rmb();
3165         work_mask = 0;
3166         received = 0;
3167         while (sw_idx != hw_idx && budget > 0) {
3168                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3169                 unsigned int len;
3170                 struct sk_buff *skb;
3171                 dma_addr_t dma_addr;
3172                 u32 opaque_key, desc_idx, *post_ptr;
3173
3174                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3175                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3176                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3177                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3178                                                   mapping);
3179                         skb = tp->rx_std_buffers[desc_idx].skb;
3180                         post_ptr = &tp->rx_std_ptr;
3181                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3182                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3183                                                   mapping);
3184                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3185                         post_ptr = &tp->rx_jumbo_ptr;
3186                 }
3187                 else {
3188                         goto next_pkt_nopost;
3189                 }
3190
3191                 work_mask |= opaque_key;
3192
3193                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3194                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3195                 drop_it:
3196                         tg3_recycle_rx(tp, opaque_key,
3197                                        desc_idx, *post_ptr);
3198                 drop_it_no_recycle:
3199                         /* Other statistics kept track of by card. */
3200                         tp->net_stats.rx_dropped++;
3201                         goto next_pkt;
3202                 }
3203
3204                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3205
3206                 if (len > RX_COPY_THRESHOLD 
3207                         && tp->rx_offset == 2
3208                         /* rx_offset != 2 iff this is a 5701 card running
3209                          * in PCI-X mode [see tg3_get_invariants()] */
3210                 ) {
3211                         int skb_size;
3212
3213                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3214                                                     desc_idx, *post_ptr);
3215                         if (skb_size < 0)
3216                                 goto drop_it;
3217
3218                         pci_unmap_single(tp->pdev, dma_addr,
3219                                          skb_size - tp->rx_offset,
3220                                          PCI_DMA_FROMDEVICE);
3221
3222                         skb_put(skb, len);
3223                 } else {
3224                         struct sk_buff *copy_skb;
3225
3226                         tg3_recycle_rx(tp, opaque_key,
3227                                        desc_idx, *post_ptr);
3228
3229                         copy_skb = dev_alloc_skb(len + 2);
3230                         if (copy_skb == NULL)
3231                                 goto drop_it_no_recycle;
3232
3233                         copy_skb->dev = tp->dev;
3234                         skb_reserve(copy_skb, 2);
3235                         skb_put(copy_skb, len);
3236                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3237                         memcpy(copy_skb->data, skb->data, len);
3238                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3239
3240                         /* We'll reuse the original ring buffer. */
3241                         skb = copy_skb;
3242                 }
3243
3244                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3245                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3246                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3247                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3248                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3249                 else
3250                         skb->ip_summed = CHECKSUM_NONE;
3251
3252                 skb->protocol = eth_type_trans(skb, tp->dev);
3253 #if TG3_VLAN_TAG_USED
3254                 if (tp->vlgrp != NULL &&
3255                     desc->type_flags & RXD_FLAG_VLAN) {
3256                         tg3_vlan_rx(tp, skb,
3257                                     desc->err_vlan & RXD_VLAN_MASK);
3258                 } else
3259 #endif
3260                         netif_receive_skb(skb);
3261
3262                 tp->dev->last_rx = jiffies;
3263                 received++;
3264                 budget--;
3265
3266 next_pkt:
3267                 (*post_ptr)++;
3268 next_pkt_nopost:
3269                 sw_idx++;
3270                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3271
3272                 /* Refresh hw_idx to see if there is new work */
3273                 if (sw_idx == hw_idx) {
3274                         hw_idx = tp->hw_status->idx[0].rx_producer;
3275                         rmb();
3276                 }
3277         }
3278
3279         /* ACK the status ring. */
3280         tp->rx_rcb_ptr = sw_idx;
3281         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3282
3283         /* Refill RX ring(s). */
3284         if (work_mask & RXD_OPAQUE_RING_STD) {
3285                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3286                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3287                              sw_idx);
3288         }
3289         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3290                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3291                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3292                              sw_idx);
3293         }
3294         mmiowb();
3295
3296         return received;
3297 }
3298
3299 static int tg3_poll(struct net_device *netdev, int *budget)
3300 {
3301         struct tg3 *tp = netdev_priv(netdev);
3302         struct tg3_hw_status *sblk = tp->hw_status;
3303         int done;
3304
3305         /* handle link change and other phy events */
3306         if (!(tp->tg3_flags &
3307               (TG3_FLAG_USE_LINKCHG_REG |
3308                TG3_FLAG_POLL_SERDES))) {
3309                 if (sblk->status & SD_STATUS_LINK_CHG) {
3310                         sblk->status = SD_STATUS_UPDATED |
3311                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3312                         spin_lock(&tp->lock);
3313                         tg3_setup_phy(tp, 0);
3314                         spin_unlock(&tp->lock);
3315                 }
3316         }
3317
3318         /* run TX completion thread */
3319         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3320                 tg3_tx(tp);
3321         }
3322
3323         /* run RX thread, within the bounds set by NAPI.
3324          * All RX "locking" is done by ensuring outside
3325          * code synchronizes with dev->poll()
3326          */
3327         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3328                 int orig_budget = *budget;
3329                 int work_done;
3330
3331                 if (orig_budget > netdev->quota)
3332                         orig_budget = netdev->quota;
3333
3334                 work_done = tg3_rx(tp, orig_budget);
3335
3336                 *budget -= work_done;
3337                 netdev->quota -= work_done;
3338         }
3339
3340         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3341                 tp->last_tag = sblk->status_tag;
3342                 rmb();
3343         } else
3344                 sblk->status &= ~SD_STATUS_UPDATED;
3345
3346         /* if no more work, tell net stack and NIC we're done */
3347         done = !tg3_has_work(tp);
3348         if (done) {
3349                 netif_rx_complete(netdev);
3350                 tg3_restart_ints(tp);
3351         }
3352
3353         return (done ? 0 : 1);
3354 }
3355
3356 static void tg3_irq_quiesce(struct tg3 *tp)
3357 {
3358         BUG_ON(tp->irq_sync);
3359
3360         tp->irq_sync = 1;
3361         smp_mb();
3362
3363         synchronize_irq(tp->pdev->irq);
3364 }
3365
3366 static inline int tg3_irq_sync(struct tg3 *tp)
3367 {
3368         return tp->irq_sync;
3369 }
3370
3371 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3372  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3373  * with as well.  Most of the time, this is not necessary except when
3374  * shutting down the device.
3375  */
3376 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3377 {
3378         if (irq_sync)
3379                 tg3_irq_quiesce(tp);
3380         spin_lock_bh(&tp->lock);
3381         spin_lock(&tp->tx_lock);
3382 }
3383
3384 static inline void tg3_full_unlock(struct tg3 *tp)
3385 {
3386         spin_unlock(&tp->tx_lock);
3387         spin_unlock_bh(&tp->lock);
3388 }
3389
3390 /* One-shot MSI handler - Chip automatically disables interrupt
3391  * after sending MSI so driver doesn't have to do it.
3392  */
3393 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3394 {
3395         struct net_device *dev = dev_id;
3396         struct tg3 *tp = netdev_priv(dev);
3397
3398         prefetch(tp->hw_status);
3399         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3400
3401         if (likely(!tg3_irq_sync(tp)))
3402                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3403
3404         return IRQ_HANDLED;
3405 }
3406
3407 /* MSI ISR - No need to check for interrupt sharing and no need to
3408  * flush status block and interrupt mailbox. PCI ordering rules
3409  * guarantee that MSI will arrive after the status block.
3410  */
3411 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3412 {
3413         struct net_device *dev = dev_id;
3414         struct tg3 *tp = netdev_priv(dev);
3415
3416         prefetch(tp->hw_status);
3417         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3418         /*
3419          * Writing any value to intr-mbox-0 clears PCI INTA# and
3420          * chip-internal interrupt pending events.
3421          * Writing non-zero to intr-mbox-0 additional tells the
3422          * NIC to stop sending us irqs, engaging "in-intr-handler"
3423          * event coalescing.
3424          */
3425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3426         if (likely(!tg3_irq_sync(tp)))
3427                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3428
3429         return IRQ_RETVAL(1);
3430 }
3431
3432 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3433 {
3434         struct net_device *dev = dev_id;
3435         struct tg3 *tp = netdev_priv(dev);
3436         struct tg3_hw_status *sblk = tp->hw_status;
3437         unsigned int handled = 1;
3438
3439         /* In INTx mode, it is possible for the interrupt to arrive at
3440          * the CPU before the status block posted prior to the interrupt.
3441          * Reading the PCI State register will confirm whether the
3442          * interrupt is ours and will flush the status block.
3443          */
3444         if ((sblk->status & SD_STATUS_UPDATED) ||
3445             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3446                 /*
3447                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3448                  * chip-internal interrupt pending events.
3449                  * Writing non-zero to intr-mbox-0 additional tells the
3450                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3451                  * event coalescing.
3452                  */
3453                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3454                              0x00000001);
3455                 if (tg3_irq_sync(tp))
3456                         goto out;
3457                 sblk->status &= ~SD_STATUS_UPDATED;
3458                 if (likely(tg3_has_work(tp))) {
3459                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3460                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3461                 } else {
3462                         /* No work, shared interrupt perhaps?  re-enable
3463                          * interrupts, and flush that PCI write
3464                          */
3465                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3466                                 0x00000000);
3467                 }
3468         } else {        /* shared interrupt */
3469                 handled = 0;
3470         }
3471 out:
3472         return IRQ_RETVAL(handled);
3473 }
3474
3475 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3476 {
3477         struct net_device *dev = dev_id;
3478         struct tg3 *tp = netdev_priv(dev);
3479         struct tg3_hw_status *sblk = tp->hw_status;
3480         unsigned int handled = 1;
3481
3482         /* In INTx mode, it is possible for the interrupt to arrive at
3483          * the CPU before the status block posted prior to the interrupt.
3484          * Reading the PCI State register will confirm whether the
3485          * interrupt is ours and will flush the status block.
3486          */
3487         if ((sblk->status_tag != tp->last_tag) ||
3488             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3489                 /*
3490                  * writing any value to intr-mbox-0 clears PCI INTA# and
3491                  * chip-internal interrupt pending events.
3492                  * writing non-zero to intr-mbox-0 additional tells the
3493                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3494                  * event coalescing.
3495                  */
3496                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3497                              0x00000001);
3498                 if (tg3_irq_sync(tp))
3499                         goto out;
3500                 if (netif_rx_schedule_prep(dev)) {
3501                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3502                         /* Update last_tag to mark that this status has been
3503                          * seen. Because interrupt may be shared, we may be
3504                          * racing with tg3_poll(), so only update last_tag
3505                          * if tg3_poll() is not scheduled.
3506                          */
3507                         tp->last_tag = sblk->status_tag;
3508                         __netif_rx_schedule(dev);
3509                 }
3510         } else {        /* shared interrupt */
3511                 handled = 0;
3512         }
3513 out:
3514         return IRQ_RETVAL(handled);
3515 }
3516
3517 /* ISR for interrupt test */
3518 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3519                 struct pt_regs *regs)
3520 {
3521         struct net_device *dev = dev_id;
3522         struct tg3 *tp = netdev_priv(dev);
3523         struct tg3_hw_status *sblk = tp->hw_status;
3524
3525         if ((sblk->status & SD_STATUS_UPDATED) ||
3526             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3527                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3528                              0x00000001);
3529                 return IRQ_RETVAL(1);
3530         }
3531         return IRQ_RETVAL(0);
3532 }
3533
3534 static int tg3_init_hw(struct tg3 *);
3535 static int tg3_halt(struct tg3 *, int, int);
3536
3537 #ifdef CONFIG_NET_POLL_CONTROLLER
3538 static void tg3_poll_controller(struct net_device *dev)
3539 {
3540         struct tg3 *tp = netdev_priv(dev);
3541
3542         tg3_interrupt(tp->pdev->irq, dev, NULL);
3543 }
3544 #endif
3545
3546 static void tg3_reset_task(void *_data)
3547 {
3548         struct tg3 *tp = _data;
3549         unsigned int restart_timer;
3550
3551         tg3_full_lock(tp, 0);
3552         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3553
3554         if (!netif_running(tp->dev)) {
3555                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3556                 tg3_full_unlock(tp);
3557                 return;
3558         }
3559
3560         tg3_full_unlock(tp);
3561
3562         tg3_netif_stop(tp);
3563
3564         tg3_full_lock(tp, 1);
3565
3566         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3567         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3568
3569         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3570         tg3_init_hw(tp);
3571
3572         tg3_netif_start(tp);
3573
3574         if (restart_timer)
3575                 mod_timer(&tp->timer, jiffies + 1);
3576
3577         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3578
3579         tg3_full_unlock(tp);
3580 }
3581
3582 static void tg3_tx_timeout(struct net_device *dev)
3583 {
3584         struct tg3 *tp = netdev_priv(dev);
3585
3586         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3587                dev->name);
3588
3589         schedule_work(&tp->reset_task);
3590 }
3591
3592 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3593 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3594 {
3595         u32 base = (u32) mapping & 0xffffffff;
3596
3597         return ((base > 0xffffdcc0) &&
3598                 (base + len + 8 < base));
3599 }
3600
3601 /* Test for DMA addresses > 40-bit */
3602 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3603                                           int len)
3604 {
3605 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3606         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3607                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3608         return 0;
3609 #else
3610         return 0;
3611 #endif
3612 }
3613
3614 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3615
3616 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3617 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3618                                        u32 last_plus_one, u32 *start,
3619                                        u32 base_flags, u32 mss)
3620 {
3621         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3622         dma_addr_t new_addr = 0;
3623         u32 entry = *start;
3624         int i, ret = 0;
3625
3626         if (!new_skb) {
3627                 ret = -1;
3628         } else {
3629                 /* New SKB is guaranteed to be linear. */
3630                 entry = *start;
3631                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3632                                           PCI_DMA_TODEVICE);
3633                 /* Make sure new skb does not cross any 4G boundaries.
3634                  * Drop the packet if it does.
3635                  */
3636                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3637                         ret = -1;
3638                         dev_kfree_skb(new_skb);
3639                         new_skb = NULL;
3640                 } else {
3641                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3642                                     base_flags, 1 | (mss << 1));
3643                         *start = NEXT_TX(entry);
3644                 }
3645         }
3646
3647         /* Now clean up the sw ring entries. */
3648         i = 0;
3649         while (entry != last_plus_one) {
3650                 int len;
3651
3652                 if (i == 0)
3653                         len = skb_headlen(skb);
3654                 else
3655                         len = skb_shinfo(skb)->frags[i-1].size;
3656                 pci_unmap_single(tp->pdev,
3657                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3658                                  len, PCI_DMA_TODEVICE);
3659                 if (i == 0) {
3660                         tp->tx_buffers[entry].skb = new_skb;
3661                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3662                 } else {
3663                         tp->tx_buffers[entry].skb = NULL;
3664                 }
3665                 entry = NEXT_TX(entry);
3666                 i++;
3667         }
3668
3669         dev_kfree_skb(skb);
3670
3671         return ret;
3672 }
3673
3674 static void tg3_set_txd(struct tg3 *tp, int entry,
3675                         dma_addr_t mapping, int len, u32 flags,
3676                         u32 mss_and_is_end)
3677 {
3678         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3679         int is_end = (mss_and_is_end & 0x1);
3680         u32 mss = (mss_and_is_end >> 1);
3681         u32 vlan_tag = 0;
3682
3683         if (is_end)
3684                 flags |= TXD_FLAG_END;
3685         if (flags & TXD_FLAG_VLAN) {
3686                 vlan_tag = flags >> 16;
3687                 flags &= 0xffff;
3688         }
3689         vlan_tag |= (mss << TXD_MSS_SHIFT);
3690
3691         txd->addr_hi = ((u64) mapping >> 32);
3692         txd->addr_lo = ((u64) mapping & 0xffffffff);
3693         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3694         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3695 }
3696
3697 /* hard_start_xmit for devices that don't have any bugs and
3698  * support TG3_FLG2_HW_TSO_2 only.
3699  */
3700 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3701 {
3702         struct tg3 *tp = netdev_priv(dev);
3703         dma_addr_t mapping;
3704         u32 len, entry, base_flags, mss;
3705
3706         len = skb_headlen(skb);
3707
3708         /* No BH disabling for tx_lock here.  We are running in BH disabled
3709          * context and TX reclaim runs via tp->poll inside of a software
3710          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3711          * no IRQ context deadlocks to worry about either.  Rejoice!
3712          */
3713         if (!spin_trylock(&tp->tx_lock))
3714                 return NETDEV_TX_LOCKED;
3715
3716         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3717                 if (!netif_queue_stopped(dev)) {
3718                         netif_stop_queue(dev);
3719
3720                         /* This is a hard error, log it. */
3721                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3722                                "queue awake!\n", dev->name);
3723                 }
3724                 spin_unlock(&tp->tx_lock);
3725                 return NETDEV_TX_BUSY;
3726         }
3727
3728         entry = tp->tx_prod;
3729         base_flags = 0;
3730 #if TG3_TSO_SUPPORT != 0
3731         mss = 0;
3732         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3733             (mss = skb_shinfo(skb)->tso_size) != 0) {
3734                 int tcp_opt_len, ip_tcp_len;
3735
3736                 if (skb_header_cloned(skb) &&
3737                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3738                         dev_kfree_skb(skb);
3739                         goto out_unlock;
3740                 }
3741
3742                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3743                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3744
3745                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3746                                TXD_FLAG_CPU_POST_DMA);
3747
3748                 skb->nh.iph->check = 0;
3749                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3750
3751                 skb->h.th->check = 0;
3752
3753                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3754         }
3755         else if (skb->ip_summed == CHECKSUM_HW)
3756                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3757 #else
3758         mss = 0;
3759         if (skb->ip_summed == CHECKSUM_HW)
3760                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3761 #endif
3762 #if TG3_VLAN_TAG_USED
3763         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3764                 base_flags |= (TXD_FLAG_VLAN |
3765                                (vlan_tx_tag_get(skb) << 16));
3766 #endif
3767
3768         /* Queue skb data, a.k.a. the main skb fragment. */
3769         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3770
3771         tp->tx_buffers[entry].skb = skb;
3772         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3773
3774         tg3_set_txd(tp, entry, mapping, len, base_flags,
3775                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3776
3777         entry = NEXT_TX(entry);
3778
3779         /* Now loop through additional data fragments, and queue them. */
3780         if (skb_shinfo(skb)->nr_frags > 0) {
3781                 unsigned int i, last;
3782
3783                 last = skb_shinfo(skb)->nr_frags - 1;
3784                 for (i = 0; i <= last; i++) {
3785                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3786
3787                         len = frag->size;
3788                         mapping = pci_map_page(tp->pdev,
3789                                                frag->page,
3790                                                frag->page_offset,
3791                                                len, PCI_DMA_TODEVICE);
3792
3793                         tp->tx_buffers[entry].skb = NULL;
3794                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3795
3796                         tg3_set_txd(tp, entry, mapping, len,
3797                                     base_flags, (i == last) | (mss << 1));
3798
3799                         entry = NEXT_TX(entry);
3800                 }
3801         }
3802
3803         /* Packets are ready, update Tx producer idx local and on card. */
3804         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3805
3806         tp->tx_prod = entry;
3807         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3808                 netif_stop_queue(dev);
3809                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3810                         netif_wake_queue(tp->dev);
3811         }
3812
3813 out_unlock:
3814         mmiowb();
3815         spin_unlock(&tp->tx_lock);
3816
3817         dev->trans_start = jiffies;
3818
3819         return NETDEV_TX_OK;
3820 }
3821
3822 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3823  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3824  */
3825 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3826 {
3827         struct tg3 *tp = netdev_priv(dev);
3828         dma_addr_t mapping;
3829         u32 len, entry, base_flags, mss;
3830         int would_hit_hwbug;
3831
3832         len = skb_headlen(skb);
3833
3834         /* No BH disabling for tx_lock here.  We are running in BH disabled
3835          * context and TX reclaim runs via tp->poll inside of a software
3836          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3837          * no IRQ context deadlocks to worry about either.  Rejoice!
3838          */
3839         if (!spin_trylock(&tp->tx_lock))
3840                 return NETDEV_TX_LOCKED; 
3841
3842         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3843                 if (!netif_queue_stopped(dev)) {
3844                         netif_stop_queue(dev);
3845
3846                         /* This is a hard error, log it. */
3847                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3848                                "queue awake!\n", dev->name);
3849                 }
3850                 spin_unlock(&tp->tx_lock);
3851                 return NETDEV_TX_BUSY;
3852         }
3853
3854         entry = tp->tx_prod;
3855         base_flags = 0;
3856         if (skb->ip_summed == CHECKSUM_HW)
3857                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3858 #if TG3_TSO_SUPPORT != 0
3859         mss = 0;
3860         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3861             (mss = skb_shinfo(skb)->tso_size) != 0) {
3862                 int tcp_opt_len, ip_tcp_len;
3863
3864                 if (skb_header_cloned(skb) &&
3865                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3866                         dev_kfree_skb(skb);
3867                         goto out_unlock;
3868                 }
3869
3870                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3871                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3872
3873                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3874                                TXD_FLAG_CPU_POST_DMA);
3875
3876                 skb->nh.iph->check = 0;
3877                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3878                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3879                         skb->h.th->check = 0;
3880                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3881                 }
3882                 else {
3883                         skb->h.th->check =
3884                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3885                                                    skb->nh.iph->daddr,
3886                                                    0, IPPROTO_TCP, 0);
3887                 }
3888
3889                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3890                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3891                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3892                                 int tsflags;
3893
3894                                 tsflags = ((skb->nh.iph->ihl - 5) +
3895                                            (tcp_opt_len >> 2));
3896                                 mss |= (tsflags << 11);
3897                         }
3898                 } else {
3899                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3900                                 int tsflags;
3901
3902                                 tsflags = ((skb->nh.iph->ihl - 5) +
3903                                            (tcp_opt_len >> 2));
3904                                 base_flags |= tsflags << 12;
3905                         }
3906                 }
3907         }
3908 #else
3909         mss = 0;
3910 #endif
3911 #if TG3_VLAN_TAG_USED
3912         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3913                 base_flags |= (TXD_FLAG_VLAN |
3914                                (vlan_tx_tag_get(skb) << 16));
3915 #endif
3916
3917         /* Queue skb data, a.k.a. the main skb fragment. */
3918         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3919
3920         tp->tx_buffers[entry].skb = skb;
3921         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3922
3923         would_hit_hwbug = 0;
3924
3925         if (tg3_4g_overflow_test(mapping, len))
3926                 would_hit_hwbug = 1;
3927
3928         tg3_set_txd(tp, entry, mapping, len, base_flags,
3929                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3930
3931         entry = NEXT_TX(entry);
3932
3933         /* Now loop through additional data fragments, and queue them. */
3934         if (skb_shinfo(skb)->nr_frags > 0) {
3935                 unsigned int i, last;
3936
3937                 last = skb_shinfo(skb)->nr_frags - 1;
3938                 for (i = 0; i <= last; i++) {
3939                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3940
3941                         len = frag->size;
3942                         mapping = pci_map_page(tp->pdev,
3943                                                frag->page,
3944                                                frag->page_offset,
3945                                                len, PCI_DMA_TODEVICE);
3946
3947                         tp->tx_buffers[entry].skb = NULL;
3948                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3949
3950                         if (tg3_4g_overflow_test(mapping, len))
3951                                 would_hit_hwbug = 1;
3952
3953                         if (tg3_40bit_overflow_test(tp, mapping, len))
3954                                 would_hit_hwbug = 1;
3955
3956                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3957                                 tg3_set_txd(tp, entry, mapping, len,
3958                                             base_flags, (i == last)|(mss << 1));
3959                         else
3960                                 tg3_set_txd(tp, entry, mapping, len,
3961                                             base_flags, (i == last));
3962
3963                         entry = NEXT_TX(entry);
3964                 }
3965         }
3966
3967         if (would_hit_hwbug) {
3968                 u32 last_plus_one = entry;
3969                 u32 start;
3970
3971                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3972                 start &= (TG3_TX_RING_SIZE - 1);
3973
3974                 /* If the workaround fails due to memory/mapping
3975                  * failure, silently drop this packet.
3976                  */
3977                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3978                                                 &start, base_flags, mss))
3979                         goto out_unlock;
3980
3981                 entry = start;
3982         }
3983
3984         /* Packets are ready, update Tx producer idx local and on card. */
3985         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3986
3987         tp->tx_prod = entry;
3988         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3989                 netif_stop_queue(dev);
3990                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3991                         netif_wake_queue(tp->dev);
3992         }
3993
3994 out_unlock:
3995         mmiowb();
3996         spin_unlock(&tp->tx_lock);
3997
3998         dev->trans_start = jiffies;
3999
4000         return NETDEV_TX_OK;
4001 }
4002
4003 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4004                                int new_mtu)
4005 {
4006         dev->mtu = new_mtu;
4007
4008         if (new_mtu > ETH_DATA_LEN) {
4009                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4010                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4011                         ethtool_op_set_tso(dev, 0);
4012                 }
4013                 else
4014                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4015         } else {
4016                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4017                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4018                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4019         }
4020 }
4021
4022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4023 {
4024         struct tg3 *tp = netdev_priv(dev);
4025
4026         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4027                 return -EINVAL;
4028
4029         if (!netif_running(dev)) {
4030                 /* We'll just catch it later when the
4031                  * device is up'd.
4032                  */
4033                 tg3_set_mtu(dev, tp, new_mtu);
4034                 return 0;
4035         }
4036
4037         tg3_netif_stop(tp);
4038
4039         tg3_full_lock(tp, 1);
4040
4041         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4042
4043         tg3_set_mtu(dev, tp, new_mtu);
4044
4045         tg3_init_hw(tp);
4046
4047         tg3_netif_start(tp);
4048
4049         tg3_full_unlock(tp);
4050
4051         return 0;
4052 }
4053
4054 /* Free up pending packets in all rx/tx rings.
4055  *
4056  * The chip has been shut down and the driver detached from
4057  * the networking, so no interrupts or new tx packets will
4058  * end up in the driver.  tp->{tx,}lock is not held and we are not
4059  * in an interrupt context and thus may sleep.
4060  */
4061 static void tg3_free_rings(struct tg3 *tp)
4062 {
4063         struct ring_info *rxp;
4064         int i;
4065
4066         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4067                 rxp = &tp->rx_std_buffers[i];
4068
4069                 if (rxp->skb == NULL)
4070                         continue;
4071                 pci_unmap_single(tp->pdev,
4072                                  pci_unmap_addr(rxp, mapping),
4073                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4074                                  PCI_DMA_FROMDEVICE);
4075                 dev_kfree_skb_any(rxp->skb);
4076                 rxp->skb = NULL;
4077         }
4078
4079         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4080                 rxp = &tp->rx_jumbo_buffers[i];
4081
4082                 if (rxp->skb == NULL)
4083                         continue;
4084                 pci_unmap_single(tp->pdev,
4085                                  pci_unmap_addr(rxp, mapping),
4086                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4087                                  PCI_DMA_FROMDEVICE);
4088                 dev_kfree_skb_any(rxp->skb);
4089                 rxp->skb = NULL;
4090         }
4091
4092         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4093                 struct tx_ring_info *txp;
4094                 struct sk_buff *skb;
4095                 int j;
4096
4097                 txp = &tp->tx_buffers[i];
4098                 skb = txp->skb;
4099
4100                 if (skb == NULL) {
4101                         i++;
4102                         continue;
4103                 }
4104
4105                 pci_unmap_single(tp->pdev,
4106                                  pci_unmap_addr(txp, mapping),
4107                                  skb_headlen(skb),
4108                                  PCI_DMA_TODEVICE);
4109                 txp->skb = NULL;
4110
4111                 i++;
4112
4113                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4114                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4115                         pci_unmap_page(tp->pdev,
4116                                        pci_unmap_addr(txp, mapping),
4117                                        skb_shinfo(skb)->frags[j].size,
4118                                        PCI_DMA_TODEVICE);
4119                         i++;
4120                 }
4121
4122                 dev_kfree_skb_any(skb);
4123         }
4124 }
4125
4126 /* Initialize tx/rx rings for packet processing.
4127  *
4128  * The chip has been shut down and the driver detached from
4129  * the networking, so no interrupts or new tx packets will
4130  * end up in the driver.  tp->{tx,}lock are held and thus
4131  * we may not sleep.
4132  */
4133 static void tg3_init_rings(struct tg3 *tp)
4134 {
4135         u32 i;
4136
4137         /* Free up all the SKBs. */
4138         tg3_free_rings(tp);
4139
4140         /* Zero out all descriptors. */
4141         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4142         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4143         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4144         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4145
4146         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4147         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4148             (tp->dev->mtu > ETH_DATA_LEN))
4149                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4150
4151         /* Initialize invariants of the rings, we only set this
4152          * stuff once.  This works because the card does not
4153          * write into the rx buffer posting rings.
4154          */
4155         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4156                 struct tg3_rx_buffer_desc *rxd;
4157
4158                 rxd = &tp->rx_std[i];
4159                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4160                         << RXD_LEN_SHIFT;
4161                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4162                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4163                                (i << RXD_OPAQUE_INDEX_SHIFT));
4164         }
4165
4166         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4167                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4168                         struct tg3_rx_buffer_desc *rxd;
4169
4170                         rxd = &tp->rx_jumbo[i];
4171                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4172                                 << RXD_LEN_SHIFT;
4173                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4174                                 RXD_FLAG_JUMBO;
4175                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4176                                (i << RXD_OPAQUE_INDEX_SHIFT));
4177                 }
4178         }
4179
4180         /* Now allocate fresh SKBs for each rx ring. */
4181         for (i = 0; i < tp->rx_pending; i++) {
4182                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4183                                      -1, i) < 0)
4184                         break;
4185         }
4186
4187         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4188                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4189                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4190                                              -1, i) < 0)
4191                                 break;
4192                 }
4193         }
4194 }
4195
4196 /*
4197  * Must not be invoked with interrupt sources disabled and
4198  * the hardware shutdown down.
4199  */
4200 static void tg3_free_consistent(struct tg3 *tp)
4201 {
4202         kfree(tp->rx_std_buffers);
4203         tp->rx_std_buffers = NULL;
4204         if (tp->rx_std) {
4205                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4206                                     tp->rx_std, tp->rx_std_mapping);
4207                 tp->rx_std = NULL;
4208         }
4209         if (tp->rx_jumbo) {
4210                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4211                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4212                 tp->rx_jumbo = NULL;
4213         }
4214         if (tp->rx_rcb) {
4215                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4216                                     tp->rx_rcb, tp->rx_rcb_mapping);
4217                 tp->rx_rcb = NULL;
4218         }
4219         if (tp->tx_ring) {
4220                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4221                         tp->tx_ring, tp->tx_desc_mapping);
4222                 tp->tx_ring = NULL;
4223         }
4224         if (tp->hw_status) {
4225                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4226                                     tp->hw_status, tp->status_mapping);
4227                 tp->hw_status = NULL;
4228         }
4229         if (tp->hw_stats) {
4230                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4231                                     tp->hw_stats, tp->stats_mapping);
4232                 tp->hw_stats = NULL;
4233         }
4234 }
4235
4236 /*
4237  * Must not be invoked with interrupt sources disabled and
4238  * the hardware shutdown down.  Can sleep.
4239  */
4240 static int tg3_alloc_consistent(struct tg3 *tp)
4241 {
4242         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4243                                       (TG3_RX_RING_SIZE +
4244                                        TG3_RX_JUMBO_RING_SIZE)) +
4245                                      (sizeof(struct tx_ring_info) *
4246                                       TG3_TX_RING_SIZE),
4247                                      GFP_KERNEL);
4248         if (!tp->rx_std_buffers)
4249                 return -ENOMEM;
4250
4251         memset(tp->rx_std_buffers, 0,
4252                (sizeof(struct ring_info) *
4253                 (TG3_RX_RING_SIZE +
4254                  TG3_RX_JUMBO_RING_SIZE)) +
4255                (sizeof(struct tx_ring_info) *
4256                 TG3_TX_RING_SIZE));
4257
4258         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4259         tp->tx_buffers = (struct tx_ring_info *)
4260                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4261
4262         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4263                                           &tp->rx_std_mapping);
4264         if (!tp->rx_std)
4265                 goto err_out;
4266
4267         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4268                                             &tp->rx_jumbo_mapping);
4269
4270         if (!tp->rx_jumbo)
4271                 goto err_out;
4272
4273         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4274                                           &tp->rx_rcb_mapping);
4275         if (!tp->rx_rcb)
4276                 goto err_out;
4277
4278         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4279                                            &tp->tx_desc_mapping);
4280         if (!tp->tx_ring)
4281                 goto err_out;
4282
4283         tp->hw_status = pci_alloc_consistent(tp->pdev,
4284                                              TG3_HW_STATUS_SIZE,
4285                                              &tp->status_mapping);
4286         if (!tp->hw_status)
4287                 goto err_out;
4288
4289         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4290                                             sizeof(struct tg3_hw_stats),
4291                                             &tp->stats_mapping);
4292         if (!tp->hw_stats)
4293                 goto err_out;
4294
4295         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4296         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4297
4298         return 0;
4299
4300 err_out:
4301         tg3_free_consistent(tp);
4302         return -ENOMEM;
4303 }
4304
4305 #define MAX_WAIT_CNT 1000
4306
4307 /* To stop a block, clear the enable bit and poll till it
4308  * clears.  tp->lock is held.
4309  */
4310 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4311 {
4312         unsigned int i;
4313         u32 val;
4314
4315         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4316                 switch (ofs) {
4317                 case RCVLSC_MODE:
4318                 case DMAC_MODE:
4319                 case MBFREE_MODE:
4320                 case BUFMGR_MODE:
4321                 case MEMARB_MODE:
4322                         /* We can't enable/disable these bits of the
4323                          * 5705/5750, just say success.
4324                          */
4325                         return 0;
4326
4327                 default:
4328                         break;
4329                 };
4330         }
4331
4332         val = tr32(ofs);
4333         val &= ~enable_bit;
4334         tw32_f(ofs, val);
4335
4336         for (i = 0; i < MAX_WAIT_CNT; i++) {
4337                 udelay(100);
4338                 val = tr32(ofs);
4339                 if ((val & enable_bit) == 0)
4340                         break;
4341         }
4342
4343         if (i == MAX_WAIT_CNT && !silent) {
4344                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4345                        "ofs=%lx enable_bit=%x\n",
4346                        ofs, enable_bit);
4347                 return -ENODEV;
4348         }
4349
4350         return 0;
4351 }
4352
4353 /* tp->lock is held. */
4354 static int tg3_abort_hw(struct tg3 *tp, int silent)
4355 {
4356         int i, err;
4357
4358         tg3_disable_ints(tp);
4359
4360         tp->rx_mode &= ~RX_MODE_ENABLE;
4361         tw32_f(MAC_RX_MODE, tp->rx_mode);
4362         udelay(10);
4363
4364         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4368         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4370
4371         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4372         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4373         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4374         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4375         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4376         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4377         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4378
4379         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4380         tw32_f(MAC_MODE, tp->mac_mode);
4381         udelay(40);
4382
4383         tp->tx_mode &= ~TX_MODE_ENABLE;
4384         tw32_f(MAC_TX_MODE, tp->tx_mode);
4385
4386         for (i = 0; i < MAX_WAIT_CNT; i++) {
4387                 udelay(100);
4388                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4389                         break;
4390         }
4391         if (i >= MAX_WAIT_CNT) {
4392                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4393                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4394                        tp->dev->name, tr32(MAC_TX_MODE));
4395                 err |= -ENODEV;
4396         }
4397
4398         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4399         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4400         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4401
4402         tw32(FTQ_RESET, 0xffffffff);
4403         tw32(FTQ_RESET, 0x00000000);
4404
4405         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4406         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4407
4408         if (tp->hw_status)
4409                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4410         if (tp->hw_stats)
4411                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4412
4413         return err;
4414 }
4415
4416 /* tp->lock is held. */
4417 static int tg3_nvram_lock(struct tg3 *tp)
4418 {
4419         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4420                 int i;
4421
4422                 if (tp->nvram_lock_cnt == 0) {
4423                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4424                         for (i = 0; i < 8000; i++) {
4425                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4426                                         break;
4427                                 udelay(20);
4428                         }
4429                         if (i == 8000) {
4430                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4431                                 return -ENODEV;
4432                         }
4433                 }
4434                 tp->nvram_lock_cnt++;
4435         }
4436         return 0;
4437 }
4438
4439 /* tp->lock is held. */
4440 static void tg3_nvram_unlock(struct tg3 *tp)
4441 {
4442         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4443                 if (tp->nvram_lock_cnt > 0)
4444                         tp->nvram_lock_cnt--;
4445                 if (tp->nvram_lock_cnt == 0)
4446                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4447         }
4448 }
4449
4450 /* tp->lock is held. */
4451 static void tg3_enable_nvram_access(struct tg3 *tp)
4452 {
4453         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4454             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4455                 u32 nvaccess = tr32(NVRAM_ACCESS);
4456
4457                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4458         }
4459 }
4460
4461 /* tp->lock is held. */
4462 static void tg3_disable_nvram_access(struct tg3 *tp)
4463 {
4464         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4465             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4466                 u32 nvaccess = tr32(NVRAM_ACCESS);
4467
4468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4469         }
4470 }
4471
4472 /* tp->lock is held. */
4473 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4474 {
4475         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4476                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4477                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4478
4479         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4480                 switch (kind) {
4481                 case RESET_KIND_INIT:
4482                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4483                                       DRV_STATE_START);
4484                         break;
4485
4486                 case RESET_KIND_SHUTDOWN:
4487                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4488                                       DRV_STATE_UNLOAD);
4489                         break;
4490
4491                 case RESET_KIND_SUSPEND:
4492                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4493                                       DRV_STATE_SUSPEND);
4494                         break;
4495
4496                 default:
4497                         break;
4498                 };
4499         }
4500 }
4501
4502 /* tp->lock is held. */
4503 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4504 {
4505         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4506                 switch (kind) {
4507                 case RESET_KIND_INIT:
4508                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4509                                       DRV_STATE_START_DONE);
4510                         break;
4511
4512                 case RESET_KIND_SHUTDOWN:
4513                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4514                                       DRV_STATE_UNLOAD_DONE);
4515                         break;
4516
4517                 default:
4518                         break;
4519                 };
4520         }
4521 }
4522
4523 /* tp->lock is held. */
4524 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4525 {
4526         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4527                 switch (kind) {
4528                 case RESET_KIND_INIT:
4529                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4530                                       DRV_STATE_START);
4531                         break;
4532
4533                 case RESET_KIND_SHUTDOWN:
4534                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4535                                       DRV_STATE_UNLOAD);
4536                         break;
4537
4538                 case RESET_KIND_SUSPEND:
4539                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4540                                       DRV_STATE_SUSPEND);
4541                         break;
4542
4543                 default:
4544                         break;
4545                 };
4546         }
4547 }
4548
4549 static void tg3_stop_fw(struct tg3 *);
4550
4551 /* tp->lock is held. */
4552 static int tg3_chip_reset(struct tg3 *tp)
4553 {
4554         u32 val;
4555         void (*write_op)(struct tg3 *, u32, u32);
4556         int i;
4557
4558         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4559                 tg3_nvram_lock(tp);
4560                 /* No matching tg3_nvram_unlock() after this because
4561                  * chip reset below will undo the nvram lock.
4562                  */
4563                 tp->nvram_lock_cnt = 0;
4564         }
4565
4566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4569                 tw32(GRC_FASTBOOT_PC, 0);
4570
4571         /*
4572          * We must avoid the readl() that normally takes place.
4573          * It locks machines, causes machine checks, and other
4574          * fun things.  So, temporarily disable the 5701
4575          * hardware workaround, while we do the reset.
4576          */
4577         write_op = tp->write32;
4578         if (write_op == tg3_write_flush_reg32)
4579                 tp->write32 = tg3_write32;
4580
4581         /* do the reset */
4582         val = GRC_MISC_CFG_CORECLK_RESET;
4583
4584         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4585                 if (tr32(0x7e2c) == 0x60) {
4586                         tw32(0x7e2c, 0x20);
4587                 }
4588                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4589                         tw32(GRC_MISC_CFG, (1 << 29));
4590                         val |= (1 << 29);
4591                 }
4592         }
4593
4594         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4595                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4596         tw32(GRC_MISC_CFG, val);
4597
4598         /* restore 5701 hardware bug workaround write method */
4599         tp->write32 = write_op;
4600
4601         /* Unfortunately, we have to delay before the PCI read back.
4602          * Some 575X chips even will not respond to a PCI cfg access
4603          * when the reset command is given to the chip.
4604          *
4605          * How do these hardware designers expect things to work
4606          * properly if the PCI write is posted for a long period
4607          * of time?  It is always necessary to have some method by
4608          * which a register read back can occur to push the write
4609          * out which does the reset.
4610          *
4611          * For most tg3 variants the trick below was working.
4612          * Ho hum...
4613          */
4614         udelay(120);
4615
4616         /* Flush PCI posted writes.  The normal MMIO registers
4617          * are inaccessible at this time so this is the only
4618          * way to make this reliably (actually, this is no longer
4619          * the case, see above).  I tried to use indirect
4620          * register read/write but this upset some 5701 variants.
4621          */
4622         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4623
4624         udelay(120);
4625
4626         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4627                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4628                         int i;
4629                         u32 cfg_val;
4630
4631                         /* Wait for link training to complete.  */
4632                         for (i = 0; i < 5000; i++)
4633                                 udelay(100);
4634
4635                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4636                         pci_write_config_dword(tp->pdev, 0xc4,
4637                                                cfg_val | (1 << 15));
4638                 }
4639                 /* Set PCIE max payload size and clear error status.  */
4640                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4641         }
4642
4643         /* Re-enable indirect register accesses. */
4644         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4645                                tp->misc_host_ctrl);
4646
4647         /* Set MAX PCI retry to zero. */
4648         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4649         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4650             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4651                 val |= PCISTATE_RETRY_SAME_DMA;
4652         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4653
4654         pci_restore_state(tp->pdev);
4655
4656         /* Make sure PCI-X relaxed ordering bit is clear. */
4657         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4658         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4659         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4660
4661         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4662                 u32 val;
4663
4664                 /* Chip reset on 5780 will reset MSI enable bit,
4665                  * so need to restore it.
4666                  */
4667                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4668                         u16 ctrl;
4669
4670                         pci_read_config_word(tp->pdev,
4671                                              tp->msi_cap + PCI_MSI_FLAGS,
4672                                              &ctrl);
4673                         pci_write_config_word(tp->pdev,
4674                                               tp->msi_cap + PCI_MSI_FLAGS,
4675                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4676                         val = tr32(MSGINT_MODE);
4677                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4678                 }
4679
4680                 val = tr32(MEMARB_MODE);
4681                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4682
4683         } else
4684                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4685
4686         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4687                 tg3_stop_fw(tp);
4688                 tw32(0x5000, 0x400);
4689         }
4690
4691         tw32(GRC_MODE, tp->grc_mode);
4692
4693         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4694                 u32 val = tr32(0xc4);
4695
4696                 tw32(0xc4, val | (1 << 15));
4697         }
4698
4699         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4701                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4702                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4703                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4704                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4705         }
4706
4707         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4708                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4709                 tw32_f(MAC_MODE, tp->mac_mode);
4710         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4711                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4712                 tw32_f(MAC_MODE, tp->mac_mode);
4713         } else
4714                 tw32_f(MAC_MODE, 0);
4715         udelay(40);
4716
4717         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4718                 /* Wait for firmware initialization to complete. */
4719                 for (i = 0; i < 100000; i++) {
4720                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4721                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4722                                 break;
4723                         udelay(10);
4724                 }
4725                 if (i >= 100000) {
4726                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4727                                "firmware will not restart magic=%08x\n",
4728                                tp->dev->name, val);
4729                         return -ENODEV;
4730                 }
4731         }
4732
4733         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4734             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4735                 u32 val = tr32(0x7c00);
4736
4737                 tw32(0x7c00, val | (1 << 25));
4738         }
4739
4740         /* Reprobe ASF enable state.  */
4741         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4742         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4743         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4744         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4745                 u32 nic_cfg;
4746
4747                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4748                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4749                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4750                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4751                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4752                 }
4753         }
4754
4755         return 0;
4756 }
4757
4758 /* tp->lock is held. */
4759 static void tg3_stop_fw(struct tg3 *tp)
4760 {
4761         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4762                 u32 val;
4763                 int i;
4764
4765                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4766                 val = tr32(GRC_RX_CPU_EVENT);
4767                 val |= (1 << 14);
4768                 tw32(GRC_RX_CPU_EVENT, val);
4769
4770                 /* Wait for RX cpu to ACK the event.  */
4771                 for (i = 0; i < 100; i++) {
4772                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4773                                 break;
4774                         udelay(1);
4775                 }
4776         }
4777 }
4778
4779 /* tp->lock is held. */
4780 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4781 {
4782         int err;
4783
4784         tg3_stop_fw(tp);
4785
4786         tg3_write_sig_pre_reset(tp, kind);
4787
4788         tg3_abort_hw(tp, silent);
4789         err = tg3_chip_reset(tp);
4790
4791         tg3_write_sig_legacy(tp, kind);
4792         tg3_write_sig_post_reset(tp, kind);
4793
4794         if (err)
4795                 return err;
4796
4797         return 0;
4798 }
4799
4800 #define TG3_FW_RELEASE_MAJOR    0x0
4801 #define TG3_FW_RELASE_MINOR     0x0
4802 #define TG3_FW_RELEASE_FIX      0x0
4803 #define TG3_FW_START_ADDR       0x08000000
4804 #define TG3_FW_TEXT_ADDR        0x08000000
4805 #define TG3_FW_TEXT_LEN         0x9c0
4806 #define TG3_FW_RODATA_ADDR      0x080009c0
4807 #define TG3_FW_RODATA_LEN       0x60
4808 #define TG3_FW_DATA_ADDR        0x08000a40
4809 #define TG3_FW_DATA_LEN         0x20
4810 #define TG3_FW_SBSS_ADDR        0x08000a60
4811 #define TG3_FW_SBSS_LEN         0xc
4812 #define TG3_FW_BSS_ADDR         0x08000a70
4813 #define TG3_FW_BSS_LEN          0x10
4814
4815 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4816         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4817         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4818         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4819         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4820         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4821         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4822         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4823         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4824         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4825         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4826         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4827         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4828         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4829         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4830         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4831         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4832         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4833         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4834         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4835         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4836         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4837         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4838         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4839         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4840         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4841         0, 0, 0, 0, 0, 0,
4842         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4843         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4845         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4846         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4847         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4848         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4850         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4851         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4852         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4853         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4854         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4855         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4856         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4857         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4858         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4859         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4860         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4861         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4862         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4863         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4864         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4865         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4866         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4867         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4868         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4869         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4870         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4871         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4872         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4873         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4874         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4875         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4876         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4877         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4878         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4879         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4880         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4881         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4882         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4883         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4884         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4885         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4886         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4887         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4888         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4889         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4890         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4891         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4892         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4893         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4894         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4895         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4896         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4897         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4898         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4899         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4900         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4901         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4902         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4903         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4904         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4905         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4906         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4907 };
4908
4909 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4910         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4911         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4912         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4913         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4914         0x00000000
4915 };
4916
4917 #if 0 /* All zeros, don't eat up space with it. */
4918 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4919         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4920         0x00000000, 0x00000000, 0x00000000, 0x00000000
4921 };
4922 #endif
4923
4924 #define RX_CPU_SCRATCH_BASE     0x30000
4925 #define RX_CPU_SCRATCH_SIZE     0x04000
4926 #define TX_CPU_SCRATCH_BASE     0x34000
4927 #define TX_CPU_SCRATCH_SIZE     0x04000
4928
4929 /* tp->lock is held. */
4930 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4931 {
4932         int i;
4933
4934         BUG_ON(offset == TX_CPU_BASE &&
4935             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4936
4937         if (offset == RX_CPU_BASE) {
4938                 for (i = 0; i < 10000; i++) {
4939                         tw32(offset + CPU_STATE, 0xffffffff);
4940                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4941                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4942                                 break;
4943                 }
4944
4945                 tw32(offset + CPU_STATE, 0xffffffff);
4946                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4947                 udelay(10);
4948         } else {
4949                 for (i = 0; i < 10000; i++) {
4950                         tw32(offset + CPU_STATE, 0xffffffff);
4951                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4952                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4953                                 break;
4954                 }
4955         }
4956
4957         if (i >= 10000) {
4958                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4959                        "and %s CPU\n",
4960                        tp->dev->name,
4961                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4962                 return -ENODEV;
4963         }
4964
4965         /* Clear firmware's nvram arbitration. */
4966         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4967                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4968         return 0;
4969 }
4970
4971 struct fw_info {
4972         unsigned int text_base;
4973         unsigned int text_len;
4974         u32 *text_data;
4975         unsigned int rodata_base;
4976         unsigned int rodata_len;
4977         u32 *rodata_data;
4978         unsigned int data_base;
4979         unsigned int data_len;
4980         u32 *data_data;
4981 };
4982
4983 /* tp->lock is held. */
4984 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4985                                  int cpu_scratch_size, struct fw_info *info)
4986 {
4987         int err, lock_err, i;
4988         void (*write_op)(struct tg3 *, u32, u32);
4989
4990         if (cpu_base == TX_CPU_BASE &&
4991             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4992                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4993                        "TX cpu firmware on %s which is 5705.\n",
4994                        tp->dev->name);
4995                 return -EINVAL;
4996         }
4997
4998         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4999                 write_op = tg3_write_mem;
5000         else
5001                 write_op = tg3_write_indirect_reg32;
5002
5003         /* It is possible that bootcode is still loading at this point.
5004          * Get the nvram lock first before halting the cpu.
5005          */
5006         lock_err = tg3_nvram_lock(tp);
5007         err = tg3_halt_cpu(tp, cpu_base);
5008         if (!lock_err)
5009                 tg3_nvram_unlock(tp);
5010         if (err)
5011                 goto out;
5012
5013         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5014                 write_op(tp, cpu_scratch_base + i, 0);
5015         tw32(cpu_base + CPU_STATE, 0xffffffff);
5016         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5017         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5018                 write_op(tp, (cpu_scratch_base +
5019                               (info->text_base & 0xffff) +
5020                               (i * sizeof(u32))),
5021                          (info->text_data ?
5022                           info->text_data[i] : 0));
5023         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5024                 write_op(tp, (cpu_scratch_base +
5025                               (info->rodata_base & 0xffff) +
5026                               (i * sizeof(u32))),
5027                          (info->rodata_data ?
5028                           info->rodata_data[i] : 0));
5029         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5030                 write_op(tp, (cpu_scratch_base +
5031                               (info->data_base & 0xffff) +
5032                               (i * sizeof(u32))),
5033                          (info->data_data ?
5034                           info->data_data[i] : 0));
5035
5036         err = 0;
5037
5038 out:
5039         return err;
5040 }
5041
5042 /* tp->lock is held. */
5043 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5044 {
5045         struct fw_info info;
5046         int err, i;
5047
5048         info.text_base = TG3_FW_TEXT_ADDR;
5049         info.text_len = TG3_FW_TEXT_LEN;
5050         info.text_data = &tg3FwText[0];
5051         info.rodata_base = TG3_FW_RODATA_ADDR;
5052         info.rodata_len = TG3_FW_RODATA_LEN;
5053         info.rodata_data = &tg3FwRodata[0];
5054         info.data_base = TG3_FW_DATA_ADDR;
5055         info.data_len = TG3_FW_DATA_LEN;
5056         info.data_data = NULL;
5057
5058         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5059                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5060                                     &info);
5061         if (err)
5062                 return err;
5063
5064         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5065                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5066                                     &info);
5067         if (err)
5068                 return err;
5069
5070         /* Now startup only the RX cpu. */
5071         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5072         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5073
5074         for (i = 0; i < 5; i++) {
5075                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5076                         break;
5077                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5078                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5079                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5080                 udelay(1000);
5081         }
5082         if (i >= 5) {
5083                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5084                        "to set RX CPU PC, is %08x should be %08x\n",
5085                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5086                        TG3_FW_TEXT_ADDR);
5087                 return -ENODEV;
5088         }
5089         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5090         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5091
5092         return 0;
5093 }
5094
5095 #if TG3_TSO_SUPPORT != 0
5096
5097 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5098 #define TG3_TSO_FW_RELASE_MINOR         0x6
5099 #define TG3_TSO_FW_RELEASE_FIX          0x0
5100 #define TG3_TSO_FW_START_ADDR           0x08000000
5101 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5102 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5103 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5104 #define TG3_TSO_FW_RODATA_LEN           0x60
5105 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5106 #define TG3_TSO_FW_DATA_LEN             0x30
5107 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5108 #define TG3_TSO_FW_SBSS_LEN             0x2c
5109 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5110 #define TG3_TSO_FW_BSS_LEN              0x894
5111
5112 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5113         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5114         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5115         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5116         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5117         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5118         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5119         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5120         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5121         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5122         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5123         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5124         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5125         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5126         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5127         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5128         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5129         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5130         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5131         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5132         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5133         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5134         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5135         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5136         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5137         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5138         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5139         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5140         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5141         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5142         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5143         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5144         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5145         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5146         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5147         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5148         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5149         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5150         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5151         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5152         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5153         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5154         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5155         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5156         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5157         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5158         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5159         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5160         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5161         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5162         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5163         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5164         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5165         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5166         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5167         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5168         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5169         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5170         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5171         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5172         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5173         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5174         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5175         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5176         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5177         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5178         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5179         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5180         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5181         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5182         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5183         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5184         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5185         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5186         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5187         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5188         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5189         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5190         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5191         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5192         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5193         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5194         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5195         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5196         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5197         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5198         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5199         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5200         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5201         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5202         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5203         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5204         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5205         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5206         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5207         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5208         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5209         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5210         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5211         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5212         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5213         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5214         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5215         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5216         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5217         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5218         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5219         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5220         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5221         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5222         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5223         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5224         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5225         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5226         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5227         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5228         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5229         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5230         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5231         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5232         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5233         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5234         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5235         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5236         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5237         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5238         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5239         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5240         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5241         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5242         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5243         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5244         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5245         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5246         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5247         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5248         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5249         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5250         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5251         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5252         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5253         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5254         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5255         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5256         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5257         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5258         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5259         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5260         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5261         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5262         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5263         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5264         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5265         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5266         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5267         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5268         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5269         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5270         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5271         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5272         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5273         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5274         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5275         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5276         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5277         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5278         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5279         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5280         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5281         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5282         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5283         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5284         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5285         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5286         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5287         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5288         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5289         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5290         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5291         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5292         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5293         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5294         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5295         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5296         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5297         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5298         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5299         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5300         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5301         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5302         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5303         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5304         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5305         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5306         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5307         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5308         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5309         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5310         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5311         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5312         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5313         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5314         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5315         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5316         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5317         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5318         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5319         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5320         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5321         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5322         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5323         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5324         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5325         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5326         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5327         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5328         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5329         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5330         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5331         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5332         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5333         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5334         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5335         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5336         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5337         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5338         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5339         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5340         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5341         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5342         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5343         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5344         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5345         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5346         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5347         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5348         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5349         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5350         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5351         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5352         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5353         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5354         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5355         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5356         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5357         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5358         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5359         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5360         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5361         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5362         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5363         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5364         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5365         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5366         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5367         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5368         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5369         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5370         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5371         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5372         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5373         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5374         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5375         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5376         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5377         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5378         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5379         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5380         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5381         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5382         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5383         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5384         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5385         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5386         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5387         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5388         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5389         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5390         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5391         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5392         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5393         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5394         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5395         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5396         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5397 };
5398
5399 static u32 tg3TsoFwRodata[] = {
5400         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5401         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5402         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5403         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5404         0x00000000,
5405 };
5406
5407 static u32 tg3TsoFwData[] = {
5408         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5409         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5410         0x00000000,
5411 };
5412
5413 /* 5705 needs a special version of the TSO firmware.  */
5414 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5415 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5416 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5417 #define TG3_TSO5_FW_START_ADDR          0x00010000
5418 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5419 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5420 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5421 #define TG3_TSO5_FW_RODATA_LEN          0x50
5422 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5423 #define TG3_TSO5_FW_DATA_LEN            0x20
5424 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5425 #define TG3_TSO5_FW_SBSS_LEN            0x28
5426 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5427 #define TG3_TSO5_FW_BSS_LEN             0x88
5428
5429 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5430         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5431         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5432         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5433         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5434         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5435         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5436         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5437         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5438         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5439         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5440         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5441         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5442         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5443         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5444         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5445         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5446         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5447         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5448         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5449         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5450         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5451         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5452         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5453         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5454         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5455         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5456         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5457         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5458         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5459         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5460         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5461         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5462         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5463         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5464         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5465         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5466         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5467         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5468         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5469         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5470         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5471         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5472         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5473         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5474         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5475         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5476         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5477         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5478         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5479         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5480         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5481         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5482         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5483         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5484         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5485         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5486         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5487         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5488         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5489         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5490         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5491         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5492         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5493         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5494         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5495         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5496         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5497         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5498         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5499         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5500         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5501         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5502         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5503         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5504         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5505         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5506         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5507         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5508         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5509         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5510         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5511         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5512         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5513         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5514         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5515         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5516         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5517         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5518         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5519         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5520         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5521         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5522         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5523         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5524         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5525         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5526         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5527         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5528         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5529         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5530         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5531         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5532         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5533         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5534         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5535         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5536         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5537         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5538         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5539         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5540         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5541         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5542         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5543         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5544         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5545         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5546         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5547         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5548         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5549         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5550         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5551         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5552         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5553         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5554         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5555         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5556         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5557         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5558         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5559         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5560         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5561         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5562         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5563         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5564         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5565         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5566         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5567         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5568         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5569         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5570         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5571         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5572         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5573         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5574         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5575         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5576         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5577         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5578         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5579         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5580         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5581         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5582         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5583         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5584         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5585         0x00000000, 0x00000000, 0x00000000,
5586 };
5587
5588 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5589         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5590         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5591         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5592         0x00000000, 0x00000000, 0x00000000,
5593 };
5594
5595 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5596         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5597         0x00000000, 0x00000000, 0x00000000,
5598 };
5599
5600 /* tp->lock is held. */
5601 static int tg3_load_tso_firmware(struct tg3 *tp)
5602 {
5603         struct fw_info info;
5604         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5605         int err, i;
5606
5607         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5608                 return 0;
5609
5610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5611                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5612                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5613                 info.text_data = &tg3Tso5FwText[0];
5614                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5615                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5616                 info.rodata_data = &tg3Tso5FwRodata[0];
5617                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5618                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5619                 info.data_data = &tg3Tso5FwData[0];
5620                 cpu_base = RX_CPU_BASE;
5621                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5622                 cpu_scratch_size = (info.text_len +
5623                                     info.rodata_len +
5624                                     info.data_len +
5625                                     TG3_TSO5_FW_SBSS_LEN +
5626                                     TG3_TSO5_FW_BSS_LEN);
5627         } else {
5628                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5629                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5630                 info.text_data = &tg3TsoFwText[0];
5631                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5632                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5633                 info.rodata_data = &tg3TsoFwRodata[0];
5634                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5635                 info.data_len = TG3_TSO_FW_DATA_LEN;
5636                 info.data_data = &tg3TsoFwData[0];
5637                 cpu_base = TX_CPU_BASE;
5638                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5639                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5640         }
5641
5642         err = tg3_load_firmware_cpu(tp, cpu_base,
5643                                     cpu_scratch_base, cpu_scratch_size,
5644                                     &info);
5645         if (err)
5646                 return err;
5647
5648         /* Now startup the cpu. */
5649         tw32(cpu_base + CPU_STATE, 0xffffffff);
5650         tw32_f(cpu_base + CPU_PC,    info.text_base);
5651
5652         for (i = 0; i < 5; i++) {
5653                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5654                         break;
5655                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5656                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5657                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5658                 udelay(1000);
5659         }
5660         if (i >= 5) {
5661                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5662                        "to set CPU PC, is %08x should be %08x\n",
5663                        tp->dev->name, tr32(cpu_base + CPU_PC),
5664                        info.text_base);
5665                 return -ENODEV;
5666         }
5667         tw32(cpu_base + CPU_STATE, 0xffffffff);
5668         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5669         return 0;
5670 }
5671
5672 #endif /* TG3_TSO_SUPPORT != 0 */
5673
5674 /* tp->lock is held. */
5675 static void __tg3_set_mac_addr(struct tg3 *tp)
5676 {
5677         u32 addr_high, addr_low;
5678         int i;
5679
5680         addr_high = ((tp->dev->dev_addr[0] << 8) |
5681                      tp->dev->dev_addr[1]);
5682         addr_low = ((tp->dev->dev_addr[2] << 24) |
5683                     (tp->dev->dev_addr[3] << 16) |
5684                     (tp->dev->dev_addr[4] <<  8) |
5685                     (tp->dev->dev_addr[5] <<  0));
5686         for (i = 0; i < 4; i++) {
5687                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5688                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5689         }
5690
5691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5693                 for (i = 0; i < 12; i++) {
5694                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5695                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5696                 }
5697         }
5698
5699         addr_high = (tp->dev->dev_addr[0] +
5700                      tp->dev->dev_addr[1] +
5701                      tp->dev->dev_addr[2] +
5702                      tp->dev->dev_addr[3] +
5703                      tp->dev->dev_addr[4] +
5704                      tp->dev->dev_addr[5]) &
5705                 TX_BACKOFF_SEED_MASK;
5706         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5707 }
5708
5709 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5710 {
5711         struct tg3 *tp = netdev_priv(dev);
5712         struct sockaddr *addr = p;
5713
5714         if (!is_valid_ether_addr(addr->sa_data))
5715                 return -EINVAL;
5716
5717         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5718
5719         if (!netif_running(dev))
5720                 return 0;
5721
5722         spin_lock_bh(&tp->lock);
5723         __tg3_set_mac_addr(tp);
5724         spin_unlock_bh(&tp->lock);
5725
5726         return 0;
5727 }
5728
5729 /* tp->lock is held. */
5730 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5731                            dma_addr_t mapping, u32 maxlen_flags,
5732                            u32 nic_addr)
5733 {
5734         tg3_write_mem(tp,
5735                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5736                       ((u64) mapping >> 32));
5737         tg3_write_mem(tp,
5738                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5739                       ((u64) mapping & 0xffffffff));
5740         tg3_write_mem(tp,
5741                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5742                        maxlen_flags);
5743
5744         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5745                 tg3_write_mem(tp,
5746                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5747                               nic_addr);
5748 }
5749
5750 static void __tg3_set_rx_mode(struct net_device *);
5751 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5752 {
5753         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5754         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5755         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5756         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5757         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5758                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5759                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5760         }
5761         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5762         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5763         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5764                 u32 val = ec->stats_block_coalesce_usecs;
5765
5766                 if (!netif_carrier_ok(tp->dev))
5767                         val = 0;
5768
5769                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5770         }
5771 }
5772
5773 /* tp->lock is held. */
5774 static int tg3_reset_hw(struct tg3 *tp)
5775 {
5776         u32 val, rdmac_mode;
5777         int i, err, limit;
5778
5779         tg3_disable_ints(tp);
5780
5781         tg3_stop_fw(tp);
5782
5783         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5784
5785         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5786                 tg3_abort_hw(tp, 1);
5787         }
5788
5789         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5790                 tg3_phy_reset(tp);
5791
5792         err = tg3_chip_reset(tp);
5793         if (err)
5794                 return err;
5795
5796         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5797
5798         /* This works around an issue with Athlon chipsets on
5799          * B3 tigon3 silicon.  This bit has no effect on any
5800          * other revision.  But do not set this on PCI Express
5801          * chips.
5802          */
5803         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5804                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5805         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5806
5807         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5808             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5809                 val = tr32(TG3PCI_PCISTATE);
5810                 val |= PCISTATE_RETRY_SAME_DMA;
5811                 tw32(TG3PCI_PCISTATE, val);
5812         }
5813
5814         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5815                 /* Enable some hw fixes.  */
5816                 val = tr32(TG3PCI_MSI_DATA);
5817                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5818                 tw32(TG3PCI_MSI_DATA, val);
5819         }
5820
5821         /* Descriptor ring init may make accesses to the
5822          * NIC SRAM area to setup the TX descriptors, so we
5823          * can only do this after the hardware has been
5824          * successfully reset.
5825          */
5826         tg3_init_rings(tp);
5827
5828         /* This value is determined during the probe time DMA
5829          * engine test, tg3_test_dma.
5830          */
5831         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5832
5833         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5834                           GRC_MODE_4X_NIC_SEND_RINGS |
5835                           GRC_MODE_NO_TX_PHDR_CSUM |
5836                           GRC_MODE_NO_RX_PHDR_CSUM);
5837         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5838
5839         /* Pseudo-header checksum is done by hardware logic and not
5840          * the offload processers, so make the chip do the pseudo-
5841          * header checksums on receive.  For transmit it is more
5842          * convenient to do the pseudo-header checksum in software
5843          * as Linux does that on transmit for us in all cases.
5844          */
5845         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5846
5847         tw32(GRC_MODE,
5848              tp->grc_mode |
5849              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5850
5851         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5852         val = tr32(GRC_MISC_CFG);
5853         val &= ~0xff;
5854         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5855         tw32(GRC_MISC_CFG, val);
5856
5857         /* Initialize MBUF/DESC pool. */
5858         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5859                 /* Do nothing.  */
5860         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5861                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5863                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5864                 else
5865                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5866                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5867                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5868         }
5869 #if TG3_TSO_SUPPORT != 0
5870         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5871                 int fw_len;
5872
5873                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5874                           TG3_TSO5_FW_RODATA_LEN +
5875                           TG3_TSO5_FW_DATA_LEN +
5876                           TG3_TSO5_FW_SBSS_LEN +
5877                           TG3_TSO5_FW_BSS_LEN);
5878                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5879                 tw32(BUFMGR_MB_POOL_ADDR,
5880                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5881                 tw32(BUFMGR_MB_POOL_SIZE,
5882                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5883         }
5884 #endif
5885
5886         if (tp->dev->mtu <= ETH_DATA_LEN) {
5887                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5888                      tp->bufmgr_config.mbuf_read_dma_low_water);
5889                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5890                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5891                 tw32(BUFMGR_MB_HIGH_WATER,
5892                      tp->bufmgr_config.mbuf_high_water);
5893         } else {
5894                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5895                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5896                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5897                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5898                 tw32(BUFMGR_MB_HIGH_WATER,
5899                      tp->bufmgr_config.mbuf_high_water_jumbo);
5900         }
5901         tw32(BUFMGR_DMA_LOW_WATER,
5902              tp->bufmgr_config.dma_low_water);
5903         tw32(BUFMGR_DMA_HIGH_WATER,
5904              tp->bufmgr_config.dma_high_water);
5905
5906         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5907         for (i = 0; i < 2000; i++) {
5908                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5909                         break;
5910                 udelay(10);
5911         }
5912         if (i >= 2000) {
5913                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5914                        tp->dev->name);
5915                 return -ENODEV;
5916         }
5917
5918         /* Setup replenish threshold. */
5919         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5920
5921         /* Initialize TG3_BDINFO's at:
5922          *  RCVDBDI_STD_BD:     standard eth size rx ring
5923          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5924          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5925          *
5926          * like so:
5927          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5928          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5929          *                              ring attribute flags
5930          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5931          *
5932          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5933          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5934          *
5935          * The size of each ring is fixed in the firmware, but the location is
5936          * configurable.
5937          */
5938         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5939              ((u64) tp->rx_std_mapping >> 32));
5940         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5941              ((u64) tp->rx_std_mapping & 0xffffffff));
5942         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5943              NIC_SRAM_RX_BUFFER_DESC);
5944
5945         /* Don't even try to program the JUMBO/MINI buffer descriptor
5946          * configs on 5705.
5947          */
5948         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5949                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5950                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5951         } else {
5952                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5953                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5954
5955                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5956                      BDINFO_FLAGS_DISABLED);
5957
5958                 /* Setup replenish threshold. */
5959                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5960
5961                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5962                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5963                              ((u64) tp->rx_jumbo_mapping >> 32));
5964                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5965                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5966                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5967                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5968                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5969                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5970                 } else {
5971                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5972                              BDINFO_FLAGS_DISABLED);
5973                 }
5974
5975         }
5976
5977         /* There is only one send ring on 5705/5750, no need to explicitly
5978          * disable the others.
5979          */
5980         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5981                 /* Clear out send RCB ring in SRAM. */
5982                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5983                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5984                                       BDINFO_FLAGS_DISABLED);
5985         }
5986
5987         tp->tx_prod = 0;
5988         tp->tx_cons = 0;
5989         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5990         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5991
5992         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5993                        tp->tx_desc_mapping,
5994                        (TG3_TX_RING_SIZE <<
5995                         BDINFO_FLAGS_MAXLEN_SHIFT),
5996                        NIC_SRAM_TX_BUFFER_DESC);
5997
5998         /* There is only one receive return ring on 5705/5750, no need
5999          * to explicitly disable the others.
6000          */
6001         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6002                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6003                      i += TG3_BDINFO_SIZE) {
6004                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6005                                       BDINFO_FLAGS_DISABLED);
6006                 }
6007         }
6008
6009         tp->rx_rcb_ptr = 0;
6010         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6011
6012         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6013                        tp->rx_rcb_mapping,
6014                        (TG3_RX_RCB_RING_SIZE(tp) <<
6015                         BDINFO_FLAGS_MAXLEN_SHIFT),
6016                        0);
6017
6018         tp->rx_std_ptr = tp->rx_pending;
6019         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6020                      tp->rx_std_ptr);
6021
6022         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6023                                                 tp->rx_jumbo_pending : 0;
6024         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6025                      tp->rx_jumbo_ptr);
6026
6027         /* Initialize MAC address and backoff seed. */
6028         __tg3_set_mac_addr(tp);
6029
6030         /* MTU + ethernet header + FCS + optional VLAN tag */
6031         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6032
6033         /* The slot time is changed by tg3_setup_phy if we
6034          * run at gigabit with half duplex.
6035          */
6036         tw32(MAC_TX_LENGTHS,
6037              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6038              (6 << TX_LENGTHS_IPG_SHIFT) |
6039              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6040
6041         /* Receive rules. */
6042         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6043         tw32(RCVLPC_CONFIG, 0x0181);
6044
6045         /* Calculate RDMAC_MODE setting early, we need it to determine
6046          * the RCVLPC_STATE_ENABLE mask.
6047          */
6048         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6049                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6050                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6051                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6052                       RDMAC_MODE_LNGREAD_ENAB);
6053         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6054                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6055
6056         /* If statement applies to 5705 and 5750 PCI devices only */
6057         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6058              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6059             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6060                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6061                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6062                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6063                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6064                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6065                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6066                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6067                 }
6068         }
6069
6070         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6071                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6072
6073 #if TG3_TSO_SUPPORT != 0
6074         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6075                 rdmac_mode |= (1 << 27);
6076 #endif
6077
6078         /* Receive/send statistics. */
6079         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6080             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6081                 val = tr32(RCVLPC_STATS_ENABLE);
6082                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6083                 tw32(RCVLPC_STATS_ENABLE, val);
6084         } else {
6085                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6086         }
6087         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6088         tw32(SNDDATAI_STATSENAB, 0xffffff);
6089         tw32(SNDDATAI_STATSCTRL,
6090              (SNDDATAI_SCTRL_ENABLE |
6091               SNDDATAI_SCTRL_FASTUPD));
6092
6093         /* Setup host coalescing engine. */
6094         tw32(HOSTCC_MODE, 0);
6095         for (i = 0; i < 2000; i++) {
6096                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6097                         break;
6098                 udelay(10);
6099         }
6100
6101         __tg3_set_coalesce(tp, &tp->coal);
6102
6103         /* set status block DMA address */
6104         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6105              ((u64) tp->status_mapping >> 32));
6106         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6107              ((u64) tp->status_mapping & 0xffffffff));
6108
6109         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6110                 /* Status/statistics block address.  See tg3_timer,
6111                  * the tg3_periodic_fetch_stats call there, and
6112                  * tg3_get_stats to see how this works for 5705/5750 chips.
6113                  */
6114                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6115                      ((u64) tp->stats_mapping >> 32));
6116                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6117                      ((u64) tp->stats_mapping & 0xffffffff));
6118                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6119                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6120         }
6121
6122         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6123
6124         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6125         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6126         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6127                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6128
6129         /* Clear statistics/status block in chip, and status block in ram. */
6130         for (i = NIC_SRAM_STATS_BLK;
6131              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6132              i += sizeof(u32)) {
6133                 tg3_write_mem(tp, i, 0);
6134                 udelay(40);
6135         }
6136         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6137
6138         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6139                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6140                 /* reset to prevent losing 1st rx packet intermittently */
6141                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6142                 udelay(10);
6143         }
6144
6145         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6146                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6147         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6148         udelay(40);
6149
6150         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6151          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6152          * register to preserve the GPIO settings for LOMs. The GPIOs,
6153          * whether used as inputs or outputs, are set by boot code after
6154          * reset.
6155          */
6156         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6157                 u32 gpio_mask;
6158
6159                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6160                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6161
6162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6163                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6164                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6165
6166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6167                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6168
6169                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6170
6171                 /* GPIO1 must be driven high for eeprom write protect */
6172                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6173                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6174         }
6175         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6176         udelay(100);
6177
6178         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6179         tp->last_tag = 0;
6180
6181         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6182                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6183                 udelay(40);
6184         }
6185
6186         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6187                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6188                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6189                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6190                WDMAC_MODE_LNGREAD_ENAB);
6191
6192         /* If statement applies to 5705 and 5750 PCI devices only */
6193         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6194              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6196                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6197                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6198                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6199                         /* nothing */
6200                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6201                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6202                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6203                         val |= WDMAC_MODE_RX_ACCEL;
6204                 }
6205         }
6206
6207         /* Enable host coalescing bug fix */
6208         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6209             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6210                 val |= (1 << 29);
6211
6212         tw32_f(WDMAC_MODE, val);
6213         udelay(40);
6214
6215         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6216                 val = tr32(TG3PCI_X_CAPS);
6217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6218                         val &= ~PCIX_CAPS_BURST_MASK;
6219                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6220                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6221                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6222                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6223                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6224                                 val |= (tp->split_mode_max_reqs <<
6225                                         PCIX_CAPS_SPLIT_SHIFT);
6226                 }
6227                 tw32(TG3PCI_X_CAPS, val);
6228         }
6229
6230         tw32_f(RDMAC_MODE, rdmac_mode);
6231         udelay(40);
6232
6233         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6234         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6235                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6236         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6237         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6238         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6239         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6240         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6241 #if TG3_TSO_SUPPORT != 0
6242         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6243                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6244 #endif
6245         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6246         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6247
6248         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6249                 err = tg3_load_5701_a0_firmware_fix(tp);
6250                 if (err)
6251                         return err;
6252         }
6253
6254 #if TG3_TSO_SUPPORT != 0
6255         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6256                 err = tg3_load_tso_firmware(tp);
6257                 if (err)
6258                         return err;
6259         }
6260 #endif
6261
6262         tp->tx_mode = TX_MODE_ENABLE;
6263         tw32_f(MAC_TX_MODE, tp->tx_mode);
6264         udelay(100);
6265
6266         tp->rx_mode = RX_MODE_ENABLE;
6267         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6268                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6269
6270         tw32_f(MAC_RX_MODE, tp->rx_mode);
6271         udelay(10);
6272
6273         if (tp->link_config.phy_is_low_power) {
6274                 tp->link_config.phy_is_low_power = 0;
6275                 tp->link_config.speed = tp->link_config.orig_speed;
6276                 tp->link_config.duplex = tp->link_config.orig_duplex;
6277                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6278         }
6279
6280         tp->mi_mode = MAC_MI_MODE_BASE;
6281         tw32_f(MAC_MI_MODE, tp->mi_mode);
6282         udelay(80);
6283
6284         tw32(MAC_LED_CTRL, tp->led_ctrl);
6285
6286         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6287         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6288                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6289                 udelay(10);
6290         }
6291         tw32_f(MAC_RX_MODE, tp->rx_mode);
6292         udelay(10);
6293
6294         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6295                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6296                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6297                         /* Set drive transmission level to 1.2V  */
6298                         /* only if the signal pre-emphasis bit is not set  */
6299                         val = tr32(MAC_SERDES_CFG);
6300                         val &= 0xfffff000;
6301                         val |= 0x880;
6302                         tw32(MAC_SERDES_CFG, val);
6303                 }
6304                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6305                         tw32(MAC_SERDES_CFG, 0x616000);
6306         }
6307
6308         /* Prevent chip from dropping frames when flow control
6309          * is enabled.
6310          */
6311         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6312
6313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6314             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6315                 /* Use hardware link auto-negotiation */
6316                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6317         }
6318
6319         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6320             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6321                 u32 tmp;
6322
6323                 tmp = tr32(SERDES_RX_CTRL);
6324                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6325                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6326                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6327                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6328         }
6329
6330         err = tg3_setup_phy(tp, 1);
6331         if (err)
6332                 return err;
6333
6334         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6335                 u32 tmp;
6336
6337                 /* Clear CRC stats. */
6338                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6339                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6340                         tg3_readphy(tp, 0x14, &tmp);
6341                 }
6342         }
6343
6344         __tg3_set_rx_mode(tp->dev);
6345
6346         /* Initialize receive rules. */
6347         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6348         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6349         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6350         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6351
6352         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6353             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6354                 limit = 8;
6355         else
6356                 limit = 16;
6357         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6358                 limit -= 4;
6359         switch (limit) {
6360         case 16:
6361                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6362         case 15:
6363                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6364         case 14:
6365                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6366         case 13:
6367                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6368         case 12:
6369                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6370         case 11:
6371                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6372         case 10:
6373                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6374         case 9:
6375                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6376         case 8:
6377                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6378         case 7:
6379                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6380         case 6:
6381                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6382         case 5:
6383                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6384         case 4:
6385                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6386         case 3:
6387                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6388         case 2:
6389         case 1:
6390
6391         default:
6392                 break;
6393         };
6394
6395         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6396
6397         return 0;
6398 }
6399
6400 /* Called at device open time to get the chip ready for
6401  * packet processing.  Invoked with tp->lock held.
6402  */
6403 static int tg3_init_hw(struct tg3 *tp)
6404 {
6405         int err;
6406
6407         /* Force the chip into D0. */
6408         err = tg3_set_power_state(tp, PCI_D0);
6409         if (err)
6410                 goto out;
6411
6412         tg3_switch_clocks(tp);
6413
6414         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6415
6416         err = tg3_reset_hw(tp);
6417
6418 out:
6419         return err;
6420 }
6421
6422 #define TG3_STAT_ADD32(PSTAT, REG) \
6423 do {    u32 __val = tr32(REG); \
6424         (PSTAT)->low += __val; \
6425         if ((PSTAT)->low < __val) \
6426                 (PSTAT)->high += 1; \
6427 } while (0)
6428
6429 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6430 {
6431         struct tg3_hw_stats *sp = tp->hw_stats;
6432
6433         if (!netif_carrier_ok(tp->dev))
6434                 return;
6435
6436         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6437         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6438         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6439         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6440         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6441         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6442         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6443         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6444         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6445         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6446         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6447         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6448         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6449
6450         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6451         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6452         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6453         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6454         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6455         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6456         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6457         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6458         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6459         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6460         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6461         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6462         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6463         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6464 }
6465
6466 static void tg3_timer(unsigned long __opaque)
6467 {
6468         struct tg3 *tp = (struct tg3 *) __opaque;
6469
6470         if (tp->irq_sync)
6471                 goto restart_timer;
6472
6473         spin_lock(&tp->lock);
6474
6475         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6476                 /* All of this garbage is because when using non-tagged
6477                  * IRQ status the mailbox/status_block protocol the chip
6478                  * uses with the cpu is race prone.
6479                  */
6480                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6481                         tw32(GRC_LOCAL_CTRL,
6482                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6483                 } else {
6484                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6485                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6486                 }
6487
6488                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6489                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6490                         spin_unlock(&tp->lock);
6491                         schedule_work(&tp->reset_task);
6492                         return;
6493                 }
6494         }
6495
6496         /* This part only runs once per second. */
6497         if (!--tp->timer_counter) {
6498                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6499                         tg3_periodic_fetch_stats(tp);
6500
6501                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6502                         u32 mac_stat;
6503                         int phy_event;
6504
6505                         mac_stat = tr32(MAC_STATUS);
6506
6507                         phy_event = 0;
6508                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6509                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6510                                         phy_event = 1;
6511                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6512                                 phy_event = 1;
6513
6514                         if (phy_event)
6515                                 tg3_setup_phy(tp, 0);
6516                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6517                         u32 mac_stat = tr32(MAC_STATUS);
6518                         int need_setup = 0;
6519
6520                         if (netif_carrier_ok(tp->dev) &&
6521                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6522                                 need_setup = 1;
6523                         }
6524                         if (! netif_carrier_ok(tp->dev) &&
6525                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6526                                          MAC_STATUS_SIGNAL_DET))) {
6527                                 need_setup = 1;
6528                         }
6529                         if (need_setup) {
6530                                 tw32_f(MAC_MODE,
6531                                      (tp->mac_mode &
6532                                       ~MAC_MODE_PORT_MODE_MASK));
6533                                 udelay(40);
6534                                 tw32_f(MAC_MODE, tp->mac_mode);
6535                                 udelay(40);
6536                                 tg3_setup_phy(tp, 0);
6537                         }
6538                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6539                         tg3_serdes_parallel_detect(tp);
6540
6541                 tp->timer_counter = tp->timer_multiplier;
6542         }
6543
6544         /* Heartbeat is only sent once every 2 seconds.  */
6545         if (!--tp->asf_counter) {
6546                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6547                         u32 val;
6548
6549                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6550                                       FWCMD_NICDRV_ALIVE2);
6551                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6552                         /* 5 seconds timeout */
6553                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6554                         val = tr32(GRC_RX_CPU_EVENT);
6555                         val |= (1 << 14);
6556                         tw32(GRC_RX_CPU_EVENT, val);
6557                 }
6558                 tp->asf_counter = tp->asf_multiplier;
6559         }
6560
6561         spin_unlock(&tp->lock);
6562
6563 restart_timer:
6564         tp->timer.expires = jiffies + tp->timer_offset;
6565         add_timer(&tp->timer);
6566 }
6567
6568 static int tg3_request_irq(struct tg3 *tp)
6569 {
6570         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6571         unsigned long flags;
6572         struct net_device *dev = tp->dev;
6573
6574         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6575                 fn = tg3_msi;
6576                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6577                         fn = tg3_msi_1shot;
6578                 flags = SA_SAMPLE_RANDOM;
6579         } else {
6580                 fn = tg3_interrupt;
6581                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6582                         fn = tg3_interrupt_tagged;
6583                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6584         }
6585         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6586 }
6587
6588 static int tg3_test_interrupt(struct tg3 *tp)
6589 {
6590         struct net_device *dev = tp->dev;
6591         int err, i;
6592         u32 int_mbox = 0;
6593
6594         if (!netif_running(dev))
6595                 return -ENODEV;
6596
6597         tg3_disable_ints(tp);
6598
6599         free_irq(tp->pdev->irq, dev);
6600
6601         err = request_irq(tp->pdev->irq, tg3_test_isr,
6602                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6603         if (err)
6604                 return err;
6605
6606         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6607         tg3_enable_ints(tp);
6608
6609         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6610                HOSTCC_MODE_NOW);
6611
6612         for (i = 0; i < 5; i++) {
6613                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6614                                         TG3_64BIT_REG_LOW);
6615                 if (int_mbox != 0)
6616                         break;
6617                 msleep(10);
6618         }
6619
6620         tg3_disable_ints(tp);
6621
6622         free_irq(tp->pdev->irq, dev);
6623         
6624         err = tg3_request_irq(tp);
6625
6626         if (err)
6627                 return err;
6628
6629         if (int_mbox != 0)
6630                 return 0;
6631
6632         return -EIO;
6633 }
6634
6635 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6636  * successfully restored
6637  */
6638 static int tg3_test_msi(struct tg3 *tp)
6639 {
6640         struct net_device *dev = tp->dev;
6641         int err;
6642         u16 pci_cmd;
6643
6644         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6645                 return 0;
6646
6647         /* Turn off SERR reporting in case MSI terminates with Master
6648          * Abort.
6649          */
6650         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6651         pci_write_config_word(tp->pdev, PCI_COMMAND,
6652                               pci_cmd & ~PCI_COMMAND_SERR);
6653
6654         err = tg3_test_interrupt(tp);
6655
6656         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6657
6658         if (!err)
6659                 return 0;
6660
6661         /* other failures */
6662         if (err != -EIO)
6663                 return err;
6664
6665         /* MSI test failed, go back to INTx mode */
6666         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6667                "switching to INTx mode. Please report this failure to "
6668                "the PCI maintainer and include system chipset information.\n",
6669                        tp->dev->name);
6670
6671         free_irq(tp->pdev->irq, dev);
6672         pci_disable_msi(tp->pdev);
6673
6674         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6675
6676         err = tg3_request_irq(tp);
6677         if (err)
6678                 return err;
6679
6680         /* Need to reset the chip because the MSI cycle may have terminated
6681          * with Master Abort.
6682          */
6683         tg3_full_lock(tp, 1);
6684
6685         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6686         err = tg3_init_hw(tp);
6687
6688         tg3_full_unlock(tp);
6689
6690         if (err)
6691                 free_irq(tp->pdev->irq, dev);
6692
6693         return err;
6694 }
6695
6696 static int tg3_open(struct net_device *dev)
6697 {
6698         struct tg3 *tp = netdev_priv(dev);
6699         int err;
6700
6701         tg3_full_lock(tp, 0);
6702
6703         err = tg3_set_power_state(tp, PCI_D0);
6704         if (err)
6705                 return err;
6706
6707         tg3_disable_ints(tp);
6708         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6709
6710         tg3_full_unlock(tp);
6711
6712         /* The placement of this call is tied
6713          * to the setup and use of Host TX descriptors.
6714          */
6715         err = tg3_alloc_consistent(tp);
6716         if (err)
6717                 return err;
6718
6719         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6720             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6721             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6722             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6723               (tp->pdev_peer == tp->pdev))) {
6724                 /* All MSI supporting chips should support tagged
6725                  * status.  Assert that this is the case.
6726                  */
6727                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6728                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6729                                "Not using MSI.\n", tp->dev->name);
6730                 } else if (pci_enable_msi(tp->pdev) == 0) {
6731                         u32 msi_mode;
6732
6733                         msi_mode = tr32(MSGINT_MODE);
6734                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6735                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6736                 }
6737         }
6738         err = tg3_request_irq(tp);
6739
6740         if (err) {
6741                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6742                         pci_disable_msi(tp->pdev);
6743                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6744                 }
6745                 tg3_free_consistent(tp);
6746                 return err;
6747         }
6748
6749         tg3_full_lock(tp, 0);
6750
6751         err = tg3_init_hw(tp);
6752         if (err) {
6753                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6754                 tg3_free_rings(tp);
6755         } else {
6756                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6757                         tp->timer_offset = HZ;
6758                 else
6759                         tp->timer_offset = HZ / 10;
6760
6761                 BUG_ON(tp->timer_offset > HZ);
6762                 tp->timer_counter = tp->timer_multiplier =
6763                         (HZ / tp->timer_offset);
6764                 tp->asf_counter = tp->asf_multiplier =
6765                         ((HZ / tp->timer_offset) * 2);
6766
6767                 init_timer(&tp->timer);
6768                 tp->timer.expires = jiffies + tp->timer_offset;
6769                 tp->timer.data = (unsigned long) tp;
6770                 tp->timer.function = tg3_timer;
6771         }
6772
6773         tg3_full_unlock(tp);
6774
6775         if (err) {
6776                 free_irq(tp->pdev->irq, dev);
6777                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6778                         pci_disable_msi(tp->pdev);
6779                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6780                 }
6781                 tg3_free_consistent(tp);
6782                 return err;
6783         }
6784
6785         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6786                 err = tg3_test_msi(tp);
6787
6788                 if (err) {
6789                         tg3_full_lock(tp, 0);
6790
6791                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6792                                 pci_disable_msi(tp->pdev);
6793                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6794                         }
6795                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6796                         tg3_free_rings(tp);
6797                         tg3_free_consistent(tp);
6798
6799                         tg3_full_unlock(tp);
6800
6801                         return err;
6802                 }
6803
6804                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6805                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6806                                 u32 val = tr32(0x7c04);
6807
6808                                 tw32(0x7c04, val | (1 << 29));
6809                         }
6810                 }
6811         }
6812
6813         tg3_full_lock(tp, 0);
6814
6815         add_timer(&tp->timer);
6816         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6817         tg3_enable_ints(tp);
6818
6819         tg3_full_unlock(tp);
6820
6821         netif_start_queue(dev);
6822
6823         return 0;
6824 }
6825
6826 #if 0
6827 /*static*/ void tg3_dump_state(struct tg3 *tp)
6828 {
6829         u32 val32, val32_2, val32_3, val32_4, val32_5;
6830         u16 val16;
6831         int i;
6832
6833         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6834         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6835         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6836                val16, val32);
6837
6838         /* MAC block */
6839         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6840                tr32(MAC_MODE), tr32(MAC_STATUS));
6841         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6842                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6843         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6844                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6845         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6846                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6847
6848         /* Send data initiator control block */
6849         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6850                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6851         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6852                tr32(SNDDATAI_STATSCTRL));
6853
6854         /* Send data completion control block */
6855         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6856
6857         /* Send BD ring selector block */
6858         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6859                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6860
6861         /* Send BD initiator control block */
6862         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6863                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6864
6865         /* Send BD completion control block */
6866         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6867
6868         /* Receive list placement control block */
6869         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6870                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6871         printk("       RCVLPC_STATSCTRL[%08x]\n",
6872                tr32(RCVLPC_STATSCTRL));
6873
6874         /* Receive data and receive BD initiator control block */
6875         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6876                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6877
6878         /* Receive data completion control block */
6879         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6880                tr32(RCVDCC_MODE));
6881
6882         /* Receive BD initiator control block */
6883         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6884                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6885
6886         /* Receive BD completion control block */
6887         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6888                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6889
6890         /* Receive list selector control block */
6891         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6892                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6893
6894         /* Mbuf cluster free block */
6895         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6896                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6897
6898         /* Host coalescing control block */
6899         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6900                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6901         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6902                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6903                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6904         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6905                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6906                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6907         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6908                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6909         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6910                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6911
6912         /* Memory arbiter control block */
6913         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6914                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6915
6916         /* Buffer manager control block */
6917         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6918                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6919         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6920                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6921         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6922                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6923                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6924                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6925
6926         /* Read DMA control block */
6927         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6928                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6929
6930         /* Write DMA control block */
6931         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6932                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6933
6934         /* DMA completion block */
6935         printk("DEBUG: DMAC_MODE[%08x]\n",
6936                tr32(DMAC_MODE));
6937
6938         /* GRC block */
6939         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6940                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6941         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6942                tr32(GRC_LOCAL_CTRL));
6943
6944         /* TG3_BDINFOs */
6945         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6946                tr32(RCVDBDI_JUMBO_BD + 0x0),
6947                tr32(RCVDBDI_JUMBO_BD + 0x4),
6948                tr32(RCVDBDI_JUMBO_BD + 0x8),
6949                tr32(RCVDBDI_JUMBO_BD + 0xc));
6950         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6951                tr32(RCVDBDI_STD_BD + 0x0),
6952                tr32(RCVDBDI_STD_BD + 0x4),
6953                tr32(RCVDBDI_STD_BD + 0x8),
6954                tr32(RCVDBDI_STD_BD + 0xc));
6955         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6956                tr32(RCVDBDI_MINI_BD + 0x0),
6957                tr32(RCVDBDI_MINI_BD + 0x4),
6958                tr32(RCVDBDI_MINI_BD + 0x8),
6959                tr32(RCVDBDI_MINI_BD + 0xc));
6960
6961         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6962         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6963         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6964         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6965         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6966                val32, val32_2, val32_3, val32_4);
6967
6968         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6969         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6970         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6971         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6972         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6973                val32, val32_2, val32_3, val32_4);
6974
6975         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6976         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6977         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6978         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6979         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6980         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6981                val32, val32_2, val32_3, val32_4, val32_5);
6982
6983         /* SW status block */
6984         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6985                tp->hw_status->status,
6986                tp->hw_status->status_tag,
6987                tp->hw_status->rx_jumbo_consumer,
6988                tp->hw_status->rx_consumer,
6989                tp->hw_status->rx_mini_consumer,
6990                tp->hw_status->idx[0].rx_producer,
6991                tp->hw_status->idx[0].tx_consumer);
6992
6993         /* SW statistics block */
6994         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6995                ((u32 *)tp->hw_stats)[0],
6996                ((u32 *)tp->hw_stats)[1],
6997                ((u32 *)tp->hw_stats)[2],
6998                ((u32 *)tp->hw_stats)[3]);
6999
7000         /* Mailboxes */
7001         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7002                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7003                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7004                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7005                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7006
7007         /* NIC side send descriptors. */
7008         for (i = 0; i < 6; i++) {
7009                 unsigned long txd;
7010
7011                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7012                         + (i * sizeof(struct tg3_tx_buffer_desc));
7013                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7014                        i,
7015                        readl(txd + 0x0), readl(txd + 0x4),
7016                        readl(txd + 0x8), readl(txd + 0xc));
7017         }
7018
7019         /* NIC side RX descriptors. */
7020         for (i = 0; i < 6; i++) {
7021                 unsigned long rxd;
7022
7023                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7024                         + (i * sizeof(struct tg3_rx_buffer_desc));
7025                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7026                        i,
7027                        readl(rxd + 0x0), readl(rxd + 0x4),
7028                        readl(rxd + 0x8), readl(rxd + 0xc));
7029                 rxd += (4 * sizeof(u32));
7030                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7031                        i,
7032                        readl(rxd + 0x0), readl(rxd + 0x4),
7033                        readl(rxd + 0x8), readl(rxd + 0xc));
7034         }
7035
7036         for (i = 0; i < 6; i++) {
7037                 unsigned long rxd;
7038
7039                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7040                         + (i * sizeof(struct tg3_rx_buffer_desc));
7041                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7042                        i,
7043                        readl(rxd + 0x0), readl(rxd + 0x4),
7044                        readl(rxd + 0x8), readl(rxd + 0xc));
7045                 rxd += (4 * sizeof(u32));
7046                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7047                        i,
7048                        readl(rxd + 0x0), readl(rxd + 0x4),
7049                        readl(rxd + 0x8), readl(rxd + 0xc));
7050         }
7051 }
7052 #endif
7053
7054 static struct net_device_stats *tg3_get_stats(struct net_device *);
7055 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7056
7057 static int tg3_close(struct net_device *dev)
7058 {
7059         struct tg3 *tp = netdev_priv(dev);
7060
7061         /* Calling flush_scheduled_work() may deadlock because
7062          * linkwatch_event() may be on the workqueue and it will try to get
7063          * the rtnl_lock which we are holding.
7064          */
7065         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7066                 msleep(1);
7067
7068         netif_stop_queue(dev);
7069
7070         del_timer_sync(&tp->timer);
7071
7072         tg3_full_lock(tp, 1);
7073 #if 0
7074         tg3_dump_state(tp);
7075 #endif
7076
7077         tg3_disable_ints(tp);
7078
7079         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7080         tg3_free_rings(tp);
7081         tp->tg3_flags &=
7082                 ~(TG3_FLAG_INIT_COMPLETE |
7083                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7084
7085         tg3_full_unlock(tp);
7086
7087         free_irq(tp->pdev->irq, dev);
7088         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7089                 pci_disable_msi(tp->pdev);
7090                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7091         }
7092
7093         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7094                sizeof(tp->net_stats_prev));
7095         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7096                sizeof(tp->estats_prev));
7097
7098         tg3_free_consistent(tp);
7099
7100         tg3_set_power_state(tp, PCI_D3hot);
7101
7102         netif_carrier_off(tp->dev);
7103
7104         return 0;
7105 }
7106
7107 static inline unsigned long get_stat64(tg3_stat64_t *val)
7108 {
7109         unsigned long ret;
7110
7111 #if (BITS_PER_LONG == 32)
7112         ret = val->low;
7113 #else
7114         ret = ((u64)val->high << 32) | ((u64)val->low);
7115 #endif
7116         return ret;
7117 }
7118
7119 static unsigned long calc_crc_errors(struct tg3 *tp)
7120 {
7121         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7122
7123         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7124             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7125              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7126                 u32 val;
7127
7128                 spin_lock_bh(&tp->lock);
7129                 if (!tg3_readphy(tp, 0x1e, &val)) {
7130                         tg3_writephy(tp, 0x1e, val | 0x8000);
7131                         tg3_readphy(tp, 0x14, &val);
7132                 } else
7133                         val = 0;
7134                 spin_unlock_bh(&tp->lock);
7135
7136                 tp->phy_crc_errors += val;
7137
7138                 return tp->phy_crc_errors;
7139         }
7140
7141         return get_stat64(&hw_stats->rx_fcs_errors);
7142 }
7143
7144 #define ESTAT_ADD(member) \
7145         estats->member =        old_estats->member + \
7146                                 get_stat64(&hw_stats->member)
7147
7148 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7149 {
7150         struct tg3_ethtool_stats *estats = &tp->estats;
7151         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7152         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7153
7154         if (!hw_stats)
7155                 return old_estats;
7156
7157         ESTAT_ADD(rx_octets);
7158         ESTAT_ADD(rx_fragments);
7159         ESTAT_ADD(rx_ucast_packets);
7160         ESTAT_ADD(rx_mcast_packets);
7161         ESTAT_ADD(rx_bcast_packets);
7162         ESTAT_ADD(rx_fcs_errors);
7163         ESTAT_ADD(rx_align_errors);
7164         ESTAT_ADD(rx_xon_pause_rcvd);
7165         ESTAT_ADD(rx_xoff_pause_rcvd);
7166         ESTAT_ADD(rx_mac_ctrl_rcvd);
7167         ESTAT_ADD(rx_xoff_entered);
7168         ESTAT_ADD(rx_frame_too_long_errors);
7169         ESTAT_ADD(rx_jabbers);
7170         ESTAT_ADD(rx_undersize_packets);
7171         ESTAT_ADD(rx_in_length_errors);
7172         ESTAT_ADD(rx_out_length_errors);
7173         ESTAT_ADD(rx_64_or_less_octet_packets);
7174         ESTAT_ADD(rx_65_to_127_octet_packets);
7175         ESTAT_ADD(rx_128_to_255_octet_packets);
7176         ESTAT_ADD(rx_256_to_511_octet_packets);
7177         ESTAT_ADD(rx_512_to_1023_octet_packets);
7178         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7179         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7180         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7181         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7182         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7183
7184         ESTAT_ADD(tx_octets);
7185         ESTAT_ADD(tx_collisions);
7186         ESTAT_ADD(tx_xon_sent);
7187         ESTAT_ADD(tx_xoff_sent);
7188         ESTAT_ADD(tx_flow_control);
7189         ESTAT_ADD(tx_mac_errors);
7190         ESTAT_ADD(tx_single_collisions);
7191         ESTAT_ADD(tx_mult_collisions);
7192         ESTAT_ADD(tx_deferred);
7193         ESTAT_ADD(tx_excessive_collisions);
7194         ESTAT_ADD(tx_late_collisions);
7195         ESTAT_ADD(tx_collide_2times);
7196         ESTAT_ADD(tx_collide_3times);
7197         ESTAT_ADD(tx_collide_4times);
7198         ESTAT_ADD(tx_collide_5times);
7199         ESTAT_ADD(tx_collide_6times);
7200         ESTAT_ADD(tx_collide_7times);
7201         ESTAT_ADD(tx_collide_8times);
7202         ESTAT_ADD(tx_collide_9times);
7203         ESTAT_ADD(tx_collide_10times);
7204         ESTAT_ADD(tx_collide_11times);
7205         ESTAT_ADD(tx_collide_12times);
7206         ESTAT_ADD(tx_collide_13times);
7207         ESTAT_ADD(tx_collide_14times);
7208         ESTAT_ADD(tx_collide_15times);
7209         ESTAT_ADD(tx_ucast_packets);
7210         ESTAT_ADD(tx_mcast_packets);
7211         ESTAT_ADD(tx_bcast_packets);
7212         ESTAT_ADD(tx_carrier_sense_errors);
7213         ESTAT_ADD(tx_discards);
7214         ESTAT_ADD(tx_errors);
7215
7216         ESTAT_ADD(dma_writeq_full);
7217         ESTAT_ADD(dma_write_prioq_full);
7218         ESTAT_ADD(rxbds_empty);
7219         ESTAT_ADD(rx_discards);
7220         ESTAT_ADD(rx_errors);
7221         ESTAT_ADD(rx_threshold_hit);
7222
7223         ESTAT_ADD(dma_readq_full);
7224         ESTAT_ADD(dma_read_prioq_full);
7225         ESTAT_ADD(tx_comp_queue_full);
7226
7227         ESTAT_ADD(ring_set_send_prod_index);
7228         ESTAT_ADD(ring_status_update);
7229         ESTAT_ADD(nic_irqs);
7230         ESTAT_ADD(nic_avoided_irqs);
7231         ESTAT_ADD(nic_tx_threshold_hit);
7232
7233         return estats;
7234 }
7235
7236 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7237 {
7238         struct tg3 *tp = netdev_priv(dev);
7239         struct net_device_stats *stats = &tp->net_stats;
7240         struct net_device_stats *old_stats = &tp->net_stats_prev;
7241         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7242
7243         if (!hw_stats)
7244                 return old_stats;
7245
7246         stats->rx_packets = old_stats->rx_packets +
7247                 get_stat64(&hw_stats->rx_ucast_packets) +
7248                 get_stat64(&hw_stats->rx_mcast_packets) +
7249                 get_stat64(&hw_stats->rx_bcast_packets);
7250                 
7251         stats->tx_packets = old_stats->tx_packets +
7252                 get_stat64(&hw_stats->tx_ucast_packets) +
7253                 get_stat64(&hw_stats->tx_mcast_packets) +
7254                 get_stat64(&hw_stats->tx_bcast_packets);
7255
7256         stats->rx_bytes = old_stats->rx_bytes +
7257                 get_stat64(&hw_stats->rx_octets);
7258         stats->tx_bytes = old_stats->tx_bytes +
7259                 get_stat64(&hw_stats->tx_octets);
7260
7261         stats->rx_errors = old_stats->rx_errors +
7262                 get_stat64(&hw_stats->rx_errors);
7263         stats->tx_errors = old_stats->tx_errors +
7264                 get_stat64(&hw_stats->tx_errors) +
7265                 get_stat64(&hw_stats->tx_mac_errors) +
7266                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7267                 get_stat64(&hw_stats->tx_discards);
7268
7269         stats->multicast = old_stats->multicast +
7270                 get_stat64(&hw_stats->rx_mcast_packets);
7271         stats->collisions = old_stats->collisions +
7272                 get_stat64(&hw_stats->tx_collisions);
7273
7274         stats->rx_length_errors = old_stats->rx_length_errors +
7275                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7276                 get_stat64(&hw_stats->rx_undersize_packets);
7277
7278         stats->rx_over_errors = old_stats->rx_over_errors +
7279                 get_stat64(&hw_stats->rxbds_empty);
7280         stats->rx_frame_errors = old_stats->rx_frame_errors +
7281                 get_stat64(&hw_stats->rx_align_errors);
7282         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7283                 get_stat64(&hw_stats->tx_discards);
7284         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7285                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7286
7287         stats->rx_crc_errors = old_stats->rx_crc_errors +
7288                 calc_crc_errors(tp);
7289
7290         stats->rx_missed_errors = old_stats->rx_missed_errors +
7291                 get_stat64(&hw_stats->rx_discards);
7292
7293         return stats;
7294 }
7295
7296 static inline u32 calc_crc(unsigned char *buf, int len)
7297 {
7298         u32 reg;
7299         u32 tmp;
7300         int j, k;
7301
7302         reg = 0xffffffff;
7303
7304         for (j = 0; j < len; j++) {
7305                 reg ^= buf[j];
7306
7307                 for (k = 0; k < 8; k++) {
7308                         tmp = reg & 0x01;
7309
7310                         reg >>= 1;
7311
7312                         if (tmp) {
7313                                 reg ^= 0xedb88320;
7314                         }
7315                 }
7316         }
7317
7318         return ~reg;
7319 }
7320
7321 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7322 {
7323         /* accept or reject all multicast frames */
7324         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7325         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7326         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7327         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7328 }
7329
7330 static void __tg3_set_rx_mode(struct net_device *dev)
7331 {
7332         struct tg3 *tp = netdev_priv(dev);
7333         u32 rx_mode;
7334
7335         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7336                                   RX_MODE_KEEP_VLAN_TAG);
7337
7338         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7339          * flag clear.
7340          */
7341 #if TG3_VLAN_TAG_USED
7342         if (!tp->vlgrp &&
7343             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7344                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7345 #else
7346         /* By definition, VLAN is disabled always in this
7347          * case.
7348          */
7349         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7350                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7351 #endif
7352
7353         if (dev->flags & IFF_PROMISC) {
7354                 /* Promiscuous mode. */
7355                 rx_mode |= RX_MODE_PROMISC;
7356         } else if (dev->flags & IFF_ALLMULTI) {
7357                 /* Accept all multicast. */
7358                 tg3_set_multi (tp, 1);
7359         } else if (dev->mc_count < 1) {
7360                 /* Reject all multicast. */
7361                 tg3_set_multi (tp, 0);
7362         } else {
7363                 /* Accept one or more multicast(s). */
7364                 struct dev_mc_list *mclist;
7365                 unsigned int i;
7366                 u32 mc_filter[4] = { 0, };
7367                 u32 regidx;
7368                 u32 bit;
7369                 u32 crc;
7370
7371                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7372                      i++, mclist = mclist->next) {
7373
7374                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7375                         bit = ~crc & 0x7f;
7376                         regidx = (bit & 0x60) >> 5;
7377                         bit &= 0x1f;
7378                         mc_filter[regidx] |= (1 << bit);
7379                 }
7380
7381                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7382                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7383                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7384                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7385         }
7386
7387         if (rx_mode != tp->rx_mode) {
7388                 tp->rx_mode = rx_mode;
7389                 tw32_f(MAC_RX_MODE, rx_mode);
7390                 udelay(10);
7391         }
7392 }
7393
7394 static void tg3_set_rx_mode(struct net_device *dev)
7395 {
7396         struct tg3 *tp = netdev_priv(dev);
7397
7398         if (!netif_running(dev))
7399                 return;
7400
7401         tg3_full_lock(tp, 0);
7402         __tg3_set_rx_mode(dev);
7403         tg3_full_unlock(tp);
7404 }
7405
7406 #define TG3_REGDUMP_LEN         (32 * 1024)
7407
7408 static int tg3_get_regs_len(struct net_device *dev)
7409 {
7410         return TG3_REGDUMP_LEN;
7411 }
7412
7413 static void tg3_get_regs(struct net_device *dev,
7414                 struct ethtool_regs *regs, void *_p)
7415 {
7416         u32 *p = _p;
7417         struct tg3 *tp = netdev_priv(dev);
7418         u8 *orig_p = _p;
7419         int i;
7420
7421         regs->version = 0;
7422
7423         memset(p, 0, TG3_REGDUMP_LEN);
7424
7425         if (tp->link_config.phy_is_low_power)
7426                 return;
7427
7428         tg3_full_lock(tp, 0);
7429
7430 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7431 #define GET_REG32_LOOP(base,len)                \
7432 do {    p = (u32 *)(orig_p + (base));           \
7433         for (i = 0; i < len; i += 4)            \
7434                 __GET_REG32((base) + i);        \
7435 } while (0)
7436 #define GET_REG32_1(reg)                        \
7437 do {    p = (u32 *)(orig_p + (reg));            \
7438         __GET_REG32((reg));                     \
7439 } while (0)
7440
7441         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7442         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7443         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7444         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7445         GET_REG32_1(SNDDATAC_MODE);
7446         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7447         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7448         GET_REG32_1(SNDBDC_MODE);
7449         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7450         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7451         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7452         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7453         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7454         GET_REG32_1(RCVDCC_MODE);
7455         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7456         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7457         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7458         GET_REG32_1(MBFREE_MODE);
7459         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7460         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7461         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7462         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7463         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7464         GET_REG32_1(RX_CPU_MODE);
7465         GET_REG32_1(RX_CPU_STATE);
7466         GET_REG32_1(RX_CPU_PGMCTR);
7467         GET_REG32_1(RX_CPU_HWBKPT);
7468         GET_REG32_1(TX_CPU_MODE);
7469         GET_REG32_1(TX_CPU_STATE);
7470         GET_REG32_1(TX_CPU_PGMCTR);
7471         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7472         GET_REG32_LOOP(FTQ_RESET, 0x120);
7473         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7474         GET_REG32_1(DMAC_MODE);
7475         GET_REG32_LOOP(GRC_MODE, 0x4c);
7476         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7477                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7478
7479 #undef __GET_REG32
7480 #undef GET_REG32_LOOP
7481 #undef GET_REG32_1
7482
7483         tg3_full_unlock(tp);
7484 }
7485
7486 static int tg3_get_eeprom_len(struct net_device *dev)
7487 {
7488         struct tg3 *tp = netdev_priv(dev);
7489
7490         return tp->nvram_size;
7491 }
7492
7493 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7494 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7495
7496 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7497 {
7498         struct tg3 *tp = netdev_priv(dev);
7499         int ret;
7500         u8  *pd;
7501         u32 i, offset, len, val, b_offset, b_count;
7502
7503         if (tp->link_config.phy_is_low_power)
7504                 return -EAGAIN;
7505
7506         offset = eeprom->offset;
7507         len = eeprom->len;
7508         eeprom->len = 0;
7509
7510         eeprom->magic = TG3_EEPROM_MAGIC;
7511
7512         if (offset & 3) {
7513                 /* adjustments to start on required 4 byte boundary */
7514                 b_offset = offset & 3;
7515                 b_count = 4 - b_offset;
7516                 if (b_count > len) {
7517                         /* i.e. offset=1 len=2 */
7518                         b_count = len;
7519                 }
7520                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7521                 if (ret)
7522                         return ret;
7523                 val = cpu_to_le32(val);
7524                 memcpy(data, ((char*)&val) + b_offset, b_count);
7525                 len -= b_count;
7526                 offset += b_count;
7527                 eeprom->len += b_count;
7528         }
7529
7530         /* read bytes upto the last 4 byte boundary */
7531         pd = &data[eeprom->len];
7532         for (i = 0; i < (len - (len & 3)); i += 4) {
7533                 ret = tg3_nvram_read(tp, offset + i, &val);
7534                 if (ret) {
7535                         eeprom->len += i;
7536                         return ret;
7537                 }
7538                 val = cpu_to_le32(val);
7539                 memcpy(pd + i, &val, 4);
7540         }
7541         eeprom->len += i;
7542
7543         if (len & 3) {
7544                 /* read last bytes not ending on 4 byte boundary */
7545                 pd = &data[eeprom->len];
7546                 b_count = len & 3;
7547                 b_offset = offset + len - b_count;
7548                 ret = tg3_nvram_read(tp, b_offset, &val);
7549                 if (ret)
7550                         return ret;
7551                 val = cpu_to_le32(val);
7552                 memcpy(pd, ((char*)&val), b_count);
7553                 eeprom->len += b_count;
7554         }
7555         return 0;
7556 }
7557
7558 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7559
7560 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7561 {
7562         struct tg3 *tp = netdev_priv(dev);
7563         int ret;
7564         u32 offset, len, b_offset, odd_len, start, end;
7565         u8 *buf;
7566
7567         if (tp->link_config.phy_is_low_power)
7568                 return -EAGAIN;
7569
7570         if (eeprom->magic != TG3_EEPROM_MAGIC)
7571                 return -EINVAL;
7572
7573         offset = eeprom->offset;
7574         len = eeprom->len;
7575
7576         if ((b_offset = (offset & 3))) {
7577                 /* adjustments to start on required 4 byte boundary */
7578                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7579                 if (ret)
7580                         return ret;
7581                 start = cpu_to_le32(start);
7582                 len += b_offset;
7583                 offset &= ~3;
7584                 if (len < 4)
7585                         len = 4;
7586         }
7587
7588         odd_len = 0;
7589         if (len & 3) {
7590                 /* adjustments to end on required 4 byte boundary */
7591                 odd_len = 1;
7592                 len = (len + 3) & ~3;
7593                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7594                 if (ret)
7595                         return ret;
7596                 end = cpu_to_le32(end);
7597         }
7598
7599         buf = data;
7600         if (b_offset || odd_len) {
7601                 buf = kmalloc(len, GFP_KERNEL);
7602                 if (buf == 0)
7603                         return -ENOMEM;
7604                 if (b_offset)
7605                         memcpy(buf, &start, 4);
7606                 if (odd_len)
7607                         memcpy(buf+len-4, &end, 4);
7608                 memcpy(buf + b_offset, data, eeprom->len);
7609         }
7610
7611         ret = tg3_nvram_write_block(tp, offset, len, buf);
7612
7613         if (buf != data)
7614                 kfree(buf);
7615
7616         return ret;
7617 }
7618
7619 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7620 {
7621         struct tg3 *tp = netdev_priv(dev);
7622   
7623         cmd->supported = (SUPPORTED_Autoneg);
7624
7625         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7626                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7627                                    SUPPORTED_1000baseT_Full);
7628
7629         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7630                 cmd->supported |= (SUPPORTED_100baseT_Half |
7631                                   SUPPORTED_100baseT_Full |
7632                                   SUPPORTED_10baseT_Half |
7633                                   SUPPORTED_10baseT_Full |
7634                                   SUPPORTED_MII);
7635         else
7636                 cmd->supported |= SUPPORTED_FIBRE;
7637   
7638         cmd->advertising = tp->link_config.advertising;
7639         if (netif_running(dev)) {
7640                 cmd->speed = tp->link_config.active_speed;
7641                 cmd->duplex = tp->link_config.active_duplex;
7642         }
7643         cmd->port = 0;
7644         cmd->phy_address = PHY_ADDR;
7645         cmd->transceiver = 0;
7646         cmd->autoneg = tp->link_config.autoneg;
7647         cmd->maxtxpkt = 0;
7648         cmd->maxrxpkt = 0;
7649         return 0;
7650 }
7651   
7652 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7653 {
7654         struct tg3 *tp = netdev_priv(dev);
7655   
7656         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7657                 /* These are the only valid advertisement bits allowed.  */
7658                 if (cmd->autoneg == AUTONEG_ENABLE &&
7659                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7660                                           ADVERTISED_1000baseT_Full |
7661                                           ADVERTISED_Autoneg |
7662                                           ADVERTISED_FIBRE)))
7663                         return -EINVAL;
7664                 /* Fiber can only do SPEED_1000.  */
7665                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7666                          (cmd->speed != SPEED_1000))
7667                         return -EINVAL;
7668         /* Copper cannot force SPEED_1000.  */
7669         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7670                    (cmd->speed == SPEED_1000))
7671                 return -EINVAL;
7672         else if ((cmd->speed == SPEED_1000) &&
7673                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7674                 return -EINVAL;
7675
7676         tg3_full_lock(tp, 0);
7677
7678         tp->link_config.autoneg = cmd->autoneg;
7679         if (cmd->autoneg == AUTONEG_ENABLE) {
7680                 tp->link_config.advertising = cmd->advertising;
7681                 tp->link_config.speed = SPEED_INVALID;
7682                 tp->link_config.duplex = DUPLEX_INVALID;
7683         } else {
7684                 tp->link_config.advertising = 0;
7685                 tp->link_config.speed = cmd->speed;
7686                 tp->link_config.duplex = cmd->duplex;
7687         }
7688   
7689         if (netif_running(dev))
7690                 tg3_setup_phy(tp, 1);
7691
7692         tg3_full_unlock(tp);
7693   
7694         return 0;
7695 }
7696   
7697 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7698 {
7699         struct tg3 *tp = netdev_priv(dev);
7700   
7701         strcpy(info->driver, DRV_MODULE_NAME);
7702         strcpy(info->version, DRV_MODULE_VERSION);
7703         strcpy(info->fw_version, tp->fw_ver);
7704         strcpy(info->bus_info, pci_name(tp->pdev));
7705 }
7706   
7707 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7708 {
7709         struct tg3 *tp = netdev_priv(dev);
7710   
7711         wol->supported = WAKE_MAGIC;
7712         wol->wolopts = 0;
7713         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7714                 wol->wolopts = WAKE_MAGIC;
7715         memset(&wol->sopass, 0, sizeof(wol->sopass));
7716 }
7717   
7718 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7719 {
7720         struct tg3 *tp = netdev_priv(dev);
7721   
7722         if (wol->wolopts & ~WAKE_MAGIC)
7723                 return -EINVAL;
7724         if ((wol->wolopts & WAKE_MAGIC) &&
7725             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7726             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7727                 return -EINVAL;
7728   
7729         spin_lock_bh(&tp->lock);
7730         if (wol->wolopts & WAKE_MAGIC)
7731                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7732         else
7733                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7734         spin_unlock_bh(&tp->lock);
7735   
7736         return 0;
7737 }
7738   
7739 static u32 tg3_get_msglevel(struct net_device *dev)
7740 {
7741         struct tg3 *tp = netdev_priv(dev);
7742         return tp->msg_enable;
7743 }
7744   
7745 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7746 {
7747         struct tg3 *tp = netdev_priv(dev);
7748         tp->msg_enable = value;
7749 }
7750   
7751 #if TG3_TSO_SUPPORT != 0
7752 static int tg3_set_tso(struct net_device *dev, u32 value)
7753 {
7754         struct tg3 *tp = netdev_priv(dev);
7755
7756         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7757                 if (value)
7758                         return -EINVAL;
7759                 return 0;
7760         }
7761         return ethtool_op_set_tso(dev, value);
7762 }
7763 #endif
7764   
7765 static int tg3_nway_reset(struct net_device *dev)
7766 {
7767         struct tg3 *tp = netdev_priv(dev);
7768         u32 bmcr;
7769         int r;
7770   
7771         if (!netif_running(dev))
7772                 return -EAGAIN;
7773
7774         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7775                 return -EINVAL;
7776
7777         spin_lock_bh(&tp->lock);
7778         r = -EINVAL;
7779         tg3_readphy(tp, MII_BMCR, &bmcr);
7780         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7781             ((bmcr & BMCR_ANENABLE) ||
7782              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7783                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7784                                            BMCR_ANENABLE);
7785                 r = 0;
7786         }
7787         spin_unlock_bh(&tp->lock);
7788   
7789         return r;
7790 }
7791   
7792 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7793 {
7794         struct tg3 *tp = netdev_priv(dev);
7795   
7796         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7797         ering->rx_mini_max_pending = 0;
7798         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7799                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7800         else
7801                 ering->rx_jumbo_max_pending = 0;
7802
7803         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7804
7805         ering->rx_pending = tp->rx_pending;
7806         ering->rx_mini_pending = 0;
7807         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7808                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7809         else
7810                 ering->rx_jumbo_pending = 0;
7811
7812         ering->tx_pending = tp->tx_pending;
7813 }
7814   
7815 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7816 {
7817         struct tg3 *tp = netdev_priv(dev);
7818         int irq_sync = 0;
7819   
7820         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7821             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7822             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7823                 return -EINVAL;
7824   
7825         if (netif_running(dev)) {
7826                 tg3_netif_stop(tp);
7827                 irq_sync = 1;
7828         }
7829
7830         tg3_full_lock(tp, irq_sync);
7831   
7832         tp->rx_pending = ering->rx_pending;
7833
7834         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7835             tp->rx_pending > 63)
7836                 tp->rx_pending = 63;
7837         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7838         tp->tx_pending = ering->tx_pending;
7839
7840         if (netif_running(dev)) {
7841                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7842                 tg3_init_hw(tp);
7843                 tg3_netif_start(tp);
7844         }
7845
7846         tg3_full_unlock(tp);
7847   
7848         return 0;
7849 }
7850   
7851 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7852 {
7853         struct tg3 *tp = netdev_priv(dev);
7854   
7855         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7856         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7857         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7858 }
7859   
7860 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7861 {
7862         struct tg3 *tp = netdev_priv(dev);
7863         int irq_sync = 0;
7864   
7865         if (netif_running(dev)) {
7866                 tg3_netif_stop(tp);
7867                 irq_sync = 1;
7868         }
7869
7870         tg3_full_lock(tp, irq_sync);
7871
7872         if (epause->autoneg)
7873                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7874         else
7875                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7876         if (epause->rx_pause)
7877                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7878         else
7879                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7880         if (epause->tx_pause)
7881                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7882         else
7883                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7884
7885         if (netif_running(dev)) {
7886                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7887                 tg3_init_hw(tp);
7888                 tg3_netif_start(tp);
7889         }
7890
7891         tg3_full_unlock(tp);
7892   
7893         return 0;
7894 }
7895   
7896 static u32 tg3_get_rx_csum(struct net_device *dev)
7897 {
7898         struct tg3 *tp = netdev_priv(dev);
7899         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7900 }
7901   
7902 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7903 {
7904         struct tg3 *tp = netdev_priv(dev);
7905   
7906         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7907                 if (data != 0)
7908                         return -EINVAL;
7909                 return 0;
7910         }
7911   
7912         spin_lock_bh(&tp->lock);
7913         if (data)
7914                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7915         else
7916                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7917         spin_unlock_bh(&tp->lock);
7918   
7919         return 0;
7920 }
7921   
7922 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7923 {
7924         struct tg3 *tp = netdev_priv(dev);
7925   
7926         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7927                 if (data != 0)
7928                         return -EINVAL;
7929                 return 0;
7930         }
7931   
7932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7934                 ethtool_op_set_tx_hw_csum(dev, data);
7935         else
7936                 ethtool_op_set_tx_csum(dev, data);
7937
7938         return 0;
7939 }
7940
7941 static int tg3_get_stats_count (struct net_device *dev)
7942 {
7943         return TG3_NUM_STATS;
7944 }
7945
7946 static int tg3_get_test_count (struct net_device *dev)
7947 {
7948         return TG3_NUM_TEST;
7949 }
7950
7951 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7952 {
7953         switch (stringset) {
7954         case ETH_SS_STATS:
7955                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7956                 break;
7957         case ETH_SS_TEST:
7958                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7959                 break;
7960         default:
7961                 WARN_ON(1);     /* we need a WARN() */
7962                 break;
7963         }
7964 }
7965
7966 static int tg3_phys_id(struct net_device *dev, u32 data)
7967 {
7968         struct tg3 *tp = netdev_priv(dev);
7969         int i;
7970
7971         if (!netif_running(tp->dev))
7972                 return -EAGAIN;
7973
7974         if (data == 0)
7975                 data = 2;
7976
7977         for (i = 0; i < (data * 2); i++) {
7978                 if ((i % 2) == 0)
7979                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7980                                            LED_CTRL_1000MBPS_ON |
7981                                            LED_CTRL_100MBPS_ON |
7982                                            LED_CTRL_10MBPS_ON |
7983                                            LED_CTRL_TRAFFIC_OVERRIDE |
7984                                            LED_CTRL_TRAFFIC_BLINK |
7985                                            LED_CTRL_TRAFFIC_LED);
7986         
7987                 else
7988                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7989                                            LED_CTRL_TRAFFIC_OVERRIDE);
7990
7991                 if (msleep_interruptible(500))
7992                         break;
7993         }
7994         tw32(MAC_LED_CTRL, tp->led_ctrl);
7995         return 0;
7996 }
7997
7998 static void tg3_get_ethtool_stats (struct net_device *dev,
7999                                    struct ethtool_stats *estats, u64 *tmp_stats)
8000 {
8001         struct tg3 *tp = netdev_priv(dev);
8002         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8003 }
8004
8005 #define NVRAM_TEST_SIZE 0x100
8006 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8007
8008 static int tg3_test_nvram(struct tg3 *tp)
8009 {
8010         u32 *buf, csum, magic;
8011         int i, j, err = 0, size;
8012
8013         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8014                 return -EIO;
8015
8016         if (magic == TG3_EEPROM_MAGIC)
8017                 size = NVRAM_TEST_SIZE;
8018         else if ((magic & 0xff000000) == 0xa5000000) {
8019                 if ((magic & 0xe00000) == 0x200000)
8020                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8021                 else
8022                         return 0;
8023         } else
8024                 return -EIO;
8025
8026         buf = kmalloc(size, GFP_KERNEL);
8027         if (buf == NULL)
8028                 return -ENOMEM;
8029
8030         err = -EIO;
8031         for (i = 0, j = 0; i < size; i += 4, j++) {
8032                 u32 val;
8033
8034                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8035                         break;
8036                 buf[j] = cpu_to_le32(val);
8037         }
8038         if (i < size)
8039                 goto out;
8040
8041         /* Selfboot format */
8042         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8043                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8044
8045                 for (i = 0; i < size; i++)
8046                         csum8 += buf8[i];
8047
8048                 if (csum8 == 0) {
8049                         err = 0;
8050                         goto out;
8051                 }
8052
8053                 err = -EIO;
8054                 goto out;
8055         }
8056
8057         /* Bootstrap checksum at offset 0x10 */
8058         csum = calc_crc((unsigned char *) buf, 0x10);
8059         if(csum != cpu_to_le32(buf[0x10/4]))
8060                 goto out;
8061
8062         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8063         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8064         if (csum != cpu_to_le32(buf[0xfc/4]))
8065                  goto out;
8066
8067         err = 0;
8068
8069 out:
8070         kfree(buf);
8071         return err;
8072 }
8073
8074 #define TG3_SERDES_TIMEOUT_SEC  2
8075 #define TG3_COPPER_TIMEOUT_SEC  6
8076
8077 static int tg3_test_link(struct tg3 *tp)
8078 {
8079         int i, max;
8080
8081         if (!netif_running(tp->dev))
8082                 return -ENODEV;
8083
8084         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8085                 max = TG3_SERDES_TIMEOUT_SEC;
8086         else
8087                 max = TG3_COPPER_TIMEOUT_SEC;
8088
8089         for (i = 0; i < max; i++) {
8090                 if (netif_carrier_ok(tp->dev))
8091                         return 0;
8092
8093                 if (msleep_interruptible(1000))
8094                         break;
8095         }
8096
8097         return -EIO;
8098 }
8099
8100 /* Only test the commonly used registers */
8101 static int tg3_test_registers(struct tg3 *tp)
8102 {
8103         int i, is_5705;
8104         u32 offset, read_mask, write_mask, val, save_val, read_val;
8105         static struct {
8106                 u16 offset;
8107                 u16 flags;
8108 #define TG3_FL_5705     0x1
8109 #define TG3_FL_NOT_5705 0x2
8110 #define TG3_FL_NOT_5788 0x4
8111                 u32 read_mask;
8112                 u32 write_mask;
8113         } reg_tbl[] = {
8114                 /* MAC Control Registers */
8115                 { MAC_MODE, TG3_FL_NOT_5705,
8116                         0x00000000, 0x00ef6f8c },
8117                 { MAC_MODE, TG3_FL_5705,
8118                         0x00000000, 0x01ef6b8c },
8119                 { MAC_STATUS, TG3_FL_NOT_5705,
8120                         0x03800107, 0x00000000 },
8121                 { MAC_STATUS, TG3_FL_5705,
8122                         0x03800100, 0x00000000 },
8123                 { MAC_ADDR_0_HIGH, 0x0000,
8124                         0x00000000, 0x0000ffff },
8125                 { MAC_ADDR_0_LOW, 0x0000,
8126                         0x00000000, 0xffffffff },
8127                 { MAC_RX_MTU_SIZE, 0x0000,
8128                         0x00000000, 0x0000ffff },
8129                 { MAC_TX_MODE, 0x0000,
8130                         0x00000000, 0x00000070 },
8131                 { MAC_TX_LENGTHS, 0x0000,
8132                         0x00000000, 0x00003fff },
8133                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8134                         0x00000000, 0x000007fc },
8135                 { MAC_RX_MODE, TG3_FL_5705,
8136                         0x00000000, 0x000007dc },
8137                 { MAC_HASH_REG_0, 0x0000,
8138                         0x00000000, 0xffffffff },
8139                 { MAC_HASH_REG_1, 0x0000,
8140                         0x00000000, 0xffffffff },
8141                 { MAC_HASH_REG_2, 0x0000,
8142                         0x00000000, 0xffffffff },
8143                 { MAC_HASH_REG_3, 0x0000,
8144                         0x00000000, 0xffffffff },
8145
8146                 /* Receive Data and Receive BD Initiator Control Registers. */
8147                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8148                         0x00000000, 0xffffffff },
8149                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8150                         0x00000000, 0xffffffff },
8151                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8152                         0x00000000, 0x00000003 },
8153                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8154                         0x00000000, 0xffffffff },
8155                 { RCVDBDI_STD_BD+0, 0x0000,
8156                         0x00000000, 0xffffffff },
8157                 { RCVDBDI_STD_BD+4, 0x0000,
8158                         0x00000000, 0xffffffff },
8159                 { RCVDBDI_STD_BD+8, 0x0000,
8160                         0x00000000, 0xffff0002 },
8161                 { RCVDBDI_STD_BD+0xc, 0x0000,
8162                         0x00000000, 0xffffffff },
8163         
8164                 /* Receive BD Initiator Control Registers. */
8165                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8166                         0x00000000, 0xffffffff },
8167                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8168                         0x00000000, 0x000003ff },
8169                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8170                         0x00000000, 0xffffffff },
8171         
8172                 /* Host Coalescing Control Registers. */
8173                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8174                         0x00000000, 0x00000004 },
8175                 { HOSTCC_MODE, TG3_FL_5705,
8176                         0x00000000, 0x000000f6 },
8177                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8178                         0x00000000, 0xffffffff },
8179                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8180                         0x00000000, 0x000003ff },
8181                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8182                         0x00000000, 0xffffffff },
8183                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8184                         0x00000000, 0x000003ff },
8185                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8186                         0x00000000, 0xffffffff },
8187                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8188                         0x00000000, 0x000000ff },
8189                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8190                         0x00000000, 0xffffffff },
8191                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8192                         0x00000000, 0x000000ff },
8193                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8194                         0x00000000, 0xffffffff },
8195                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8196                         0x00000000, 0xffffffff },
8197                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8198                         0x00000000, 0xffffffff },
8199                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8200                         0x00000000, 0x000000ff },
8201                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8202                         0x00000000, 0xffffffff },
8203                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8204                         0x00000000, 0x000000ff },
8205                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8206                         0x00000000, 0xffffffff },
8207                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8208                         0x00000000, 0xffffffff },
8209                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8210                         0x00000000, 0xffffffff },
8211                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8212                         0x00000000, 0xffffffff },
8213                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8214                         0x00000000, 0xffffffff },
8215                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8216                         0xffffffff, 0x00000000 },
8217                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8218                         0xffffffff, 0x00000000 },
8219
8220                 /* Buffer Manager Control Registers. */
8221                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8222                         0x00000000, 0x007fff80 },
8223                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8224                         0x00000000, 0x007fffff },
8225                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8226                         0x00000000, 0x0000003f },
8227                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8228                         0x00000000, 0x000001ff },
8229                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8230                         0x00000000, 0x000001ff },
8231                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8232                         0xffffffff, 0x00000000 },
8233                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8234                         0xffffffff, 0x00000000 },
8235         
8236                 /* Mailbox Registers */
8237                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8238                         0x00000000, 0x000001ff },
8239                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8240                         0x00000000, 0x000001ff },
8241                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8242                         0x00000000, 0x000007ff },
8243                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8244                         0x00000000, 0x000001ff },
8245
8246                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8247         };
8248
8249         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8250                 is_5705 = 1;
8251         else
8252                 is_5705 = 0;
8253
8254         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8255                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8256                         continue;
8257
8258                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8259                         continue;
8260
8261                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8262                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8263                         continue;
8264
8265                 offset = (u32) reg_tbl[i].offset;
8266                 read_mask = reg_tbl[i].read_mask;
8267                 write_mask = reg_tbl[i].write_mask;
8268
8269                 /* Save the original register content */
8270                 save_val = tr32(offset);
8271
8272                 /* Determine the read-only value. */
8273                 read_val = save_val & read_mask;
8274
8275                 /* Write zero to the register, then make sure the read-only bits
8276                  * are not changed and the read/write bits are all zeros.
8277                  */
8278                 tw32(offset, 0);
8279
8280                 val = tr32(offset);
8281
8282                 /* Test the read-only and read/write bits. */
8283                 if (((val & read_mask) != read_val) || (val & write_mask))
8284                         goto out;
8285
8286                 /* Write ones to all the bits defined by RdMask and WrMask, then
8287                  * make sure the read-only bits are not changed and the
8288                  * read/write bits are all ones.
8289                  */
8290                 tw32(offset, read_mask | write_mask);
8291
8292                 val = tr32(offset);
8293
8294                 /* Test the read-only bits. */
8295                 if ((val & read_mask) != read_val)
8296                         goto out;
8297
8298                 /* Test the read/write bits. */
8299                 if ((val & write_mask) != write_mask)
8300                         goto out;
8301
8302                 tw32(offset, save_val);
8303         }
8304
8305         return 0;
8306
8307 out:
8308         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8309         tw32(offset, save_val);
8310         return -EIO;
8311 }
8312
8313 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8314 {
8315         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8316         int i;
8317         u32 j;
8318
8319         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8320                 for (j = 0; j < len; j += 4) {
8321                         u32 val;
8322
8323                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8324                         tg3_read_mem(tp, offset + j, &val);
8325                         if (val != test_pattern[i])
8326                                 return -EIO;
8327                 }
8328         }
8329         return 0;
8330 }
8331
8332 static int tg3_test_memory(struct tg3 *tp)
8333 {
8334         static struct mem_entry {
8335                 u32 offset;
8336                 u32 len;
8337         } mem_tbl_570x[] = {
8338                 { 0x00000000, 0x00b50},
8339                 { 0x00002000, 0x1c000},
8340                 { 0xffffffff, 0x00000}
8341         }, mem_tbl_5705[] = {
8342                 { 0x00000100, 0x0000c},
8343                 { 0x00000200, 0x00008},
8344                 { 0x00004000, 0x00800},
8345                 { 0x00006000, 0x01000},
8346                 { 0x00008000, 0x02000},
8347                 { 0x00010000, 0x0e000},
8348                 { 0xffffffff, 0x00000}
8349         }, mem_tbl_5755[] = {
8350                 { 0x00000200, 0x00008},
8351                 { 0x00004000, 0x00800},
8352                 { 0x00006000, 0x00800},
8353                 { 0x00008000, 0x02000},
8354                 { 0x00010000, 0x0c000},
8355                 { 0xffffffff, 0x00000}
8356         };
8357         struct mem_entry *mem_tbl;
8358         int err = 0;
8359         int i;
8360
8361         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8362                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8363                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8364                         mem_tbl = mem_tbl_5755;
8365                 else
8366                         mem_tbl = mem_tbl_5705;
8367         } else
8368                 mem_tbl = mem_tbl_570x;
8369
8370         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8371                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8372                     mem_tbl[i].len)) != 0)
8373                         break;
8374         }
8375         
8376         return err;
8377 }
8378
8379 #define TG3_MAC_LOOPBACK        0
8380 #define TG3_PHY_LOOPBACK        1
8381
8382 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8383 {
8384         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8385         u32 desc_idx;
8386         struct sk_buff *skb, *rx_skb;
8387         u8 *tx_data;
8388         dma_addr_t map;
8389         int num_pkts, tx_len, rx_len, i, err;
8390         struct tg3_rx_buffer_desc *desc;
8391
8392         if (loopback_mode == TG3_MAC_LOOPBACK) {
8393                 /* HW errata - mac loopback fails in some cases on 5780.
8394                  * Normal traffic and PHY loopback are not affected by
8395                  * errata.
8396                  */
8397                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8398                         return 0;
8399
8400                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8401                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8402                            MAC_MODE_PORT_MODE_GMII;
8403                 tw32(MAC_MODE, mac_mode);
8404         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8405                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8406                                            BMCR_SPEED1000);
8407                 udelay(40);
8408                 /* reset to prevent losing 1st rx packet intermittently */
8409                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8410                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8411                         udelay(10);
8412                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8413                 }
8414                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8415                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8416                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8417                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8418                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8419                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8420                 }
8421                 tw32(MAC_MODE, mac_mode);
8422         }
8423         else
8424                 return -EINVAL;
8425
8426         err = -EIO;
8427
8428         tx_len = 1514;
8429         skb = dev_alloc_skb(tx_len);
8430         tx_data = skb_put(skb, tx_len);
8431         memcpy(tx_data, tp->dev->dev_addr, 6);
8432         memset(tx_data + 6, 0x0, 8);
8433
8434         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8435
8436         for (i = 14; i < tx_len; i++)
8437                 tx_data[i] = (u8) (i & 0xff);
8438
8439         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8440
8441         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8442              HOSTCC_MODE_NOW);
8443
8444         udelay(10);
8445
8446         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8447
8448         num_pkts = 0;
8449
8450         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8451
8452         tp->tx_prod++;
8453         num_pkts++;
8454
8455         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8456                      tp->tx_prod);
8457         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8458
8459         udelay(10);
8460
8461         for (i = 0; i < 10; i++) {
8462                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8463                        HOSTCC_MODE_NOW);
8464
8465                 udelay(10);
8466
8467                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8468                 rx_idx = tp->hw_status->idx[0].rx_producer;
8469                 if ((tx_idx == tp->tx_prod) &&
8470                     (rx_idx == (rx_start_idx + num_pkts)))
8471                         break;
8472         }
8473
8474         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8475         dev_kfree_skb(skb);
8476
8477         if (tx_idx != tp->tx_prod)
8478                 goto out;
8479
8480         if (rx_idx != rx_start_idx + num_pkts)
8481                 goto out;
8482
8483         desc = &tp->rx_rcb[rx_start_idx];
8484         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8485         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8486         if (opaque_key != RXD_OPAQUE_RING_STD)
8487                 goto out;
8488
8489         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8490             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8491                 goto out;
8492
8493         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8494         if (rx_len != tx_len)
8495                 goto out;
8496
8497         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8498
8499         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8500         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8501
8502         for (i = 14; i < tx_len; i++) {
8503                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8504                         goto out;
8505         }
8506         err = 0;
8507         
8508         /* tg3_free_rings will unmap and free the rx_skb */
8509 out:
8510         return err;
8511 }
8512
8513 #define TG3_MAC_LOOPBACK_FAILED         1
8514 #define TG3_PHY_LOOPBACK_FAILED         2
8515 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8516                                          TG3_PHY_LOOPBACK_FAILED)
8517
8518 static int tg3_test_loopback(struct tg3 *tp)
8519 {
8520         int err = 0;
8521
8522         if (!netif_running(tp->dev))
8523                 return TG3_LOOPBACK_FAILED;
8524
8525         tg3_reset_hw(tp);
8526
8527         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8528                 err |= TG3_MAC_LOOPBACK_FAILED;
8529         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8530                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8531                         err |= TG3_PHY_LOOPBACK_FAILED;
8532         }
8533
8534         return err;
8535 }
8536
8537 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8538                           u64 *data)
8539 {
8540         struct tg3 *tp = netdev_priv(dev);
8541
8542         if (tp->link_config.phy_is_low_power)
8543                 tg3_set_power_state(tp, PCI_D0);
8544
8545         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8546
8547         if (tg3_test_nvram(tp) != 0) {
8548                 etest->flags |= ETH_TEST_FL_FAILED;
8549                 data[0] = 1;
8550         }
8551         if (tg3_test_link(tp) != 0) {
8552                 etest->flags |= ETH_TEST_FL_FAILED;
8553                 data[1] = 1;
8554         }
8555         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8556                 int err, irq_sync = 0;
8557
8558                 if (netif_running(dev)) {
8559                         tg3_netif_stop(tp);
8560                         irq_sync = 1;
8561                 }
8562
8563                 tg3_full_lock(tp, irq_sync);
8564
8565                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8566                 err = tg3_nvram_lock(tp);
8567                 tg3_halt_cpu(tp, RX_CPU_BASE);
8568                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8569                         tg3_halt_cpu(tp, TX_CPU_BASE);
8570                 if (!err)
8571                         tg3_nvram_unlock(tp);
8572
8573                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8574                         tg3_phy_reset(tp);
8575
8576                 if (tg3_test_registers(tp) != 0) {
8577                         etest->flags |= ETH_TEST_FL_FAILED;
8578                         data[2] = 1;
8579                 }
8580                 if (tg3_test_memory(tp) != 0) {
8581                         etest->flags |= ETH_TEST_FL_FAILED;
8582                         data[3] = 1;
8583                 }
8584                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8585                         etest->flags |= ETH_TEST_FL_FAILED;
8586
8587                 tg3_full_unlock(tp);
8588
8589                 if (tg3_test_interrupt(tp) != 0) {
8590                         etest->flags |= ETH_TEST_FL_FAILED;
8591                         data[5] = 1;
8592                 }
8593
8594                 tg3_full_lock(tp, 0);
8595
8596                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8597                 if (netif_running(dev)) {
8598                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8599                         tg3_init_hw(tp);
8600                         tg3_netif_start(tp);
8601                 }
8602
8603                 tg3_full_unlock(tp);
8604         }
8605         if (tp->link_config.phy_is_low_power)
8606                 tg3_set_power_state(tp, PCI_D3hot);
8607
8608 }
8609
8610 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8611 {
8612         struct mii_ioctl_data *data = if_mii(ifr);
8613         struct tg3 *tp = netdev_priv(dev);
8614         int err;
8615
8616         switch(cmd) {
8617         case SIOCGMIIPHY:
8618                 data->phy_id = PHY_ADDR;
8619
8620                 /* fallthru */
8621         case SIOCGMIIREG: {
8622                 u32 mii_regval;
8623
8624                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8625                         break;                  /* We have no PHY */
8626
8627                 if (tp->link_config.phy_is_low_power)
8628                         return -EAGAIN;
8629
8630                 spin_lock_bh(&tp->lock);
8631                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8632                 spin_unlock_bh(&tp->lock);
8633
8634                 data->val_out = mii_regval;
8635
8636                 return err;
8637         }
8638
8639         case SIOCSMIIREG:
8640                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8641                         break;                  /* We have no PHY */
8642
8643                 if (!capable(CAP_NET_ADMIN))
8644                         return -EPERM;
8645
8646                 if (tp->link_config.phy_is_low_power)
8647                         return -EAGAIN;
8648
8649                 spin_lock_bh(&tp->lock);
8650                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8651                 spin_unlock_bh(&tp->lock);
8652
8653                 return err;
8654
8655         default:
8656                 /* do nothing */
8657                 break;
8658         }
8659         return -EOPNOTSUPP;
8660 }
8661
8662 #if TG3_VLAN_TAG_USED
8663 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8664 {
8665         struct tg3 *tp = netdev_priv(dev);
8666
8667         tg3_full_lock(tp, 0);
8668
8669         tp->vlgrp = grp;
8670
8671         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8672         __tg3_set_rx_mode(dev);
8673
8674         tg3_full_unlock(tp);
8675 }
8676
8677 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8678 {
8679         struct tg3 *tp = netdev_priv(dev);
8680
8681         tg3_full_lock(tp, 0);
8682         if (tp->vlgrp)
8683                 tp->vlgrp->vlan_devices[vid] = NULL;
8684         tg3_full_unlock(tp);
8685 }
8686 #endif
8687
8688 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8689 {
8690         struct tg3 *tp = netdev_priv(dev);
8691
8692         memcpy(ec, &tp->coal, sizeof(*ec));
8693         return 0;
8694 }
8695
8696 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8697 {
8698         struct tg3 *tp = netdev_priv(dev);
8699         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8700         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8701
8702         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8703                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8704                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8705                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8706                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8707         }
8708
8709         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8710             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8711             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8712             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8713             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8714             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8715             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8716             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8717             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8718             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8719                 return -EINVAL;
8720
8721         /* No rx interrupts will be generated if both are zero */
8722         if ((ec->rx_coalesce_usecs == 0) &&
8723             (ec->rx_max_coalesced_frames == 0))
8724                 return -EINVAL;
8725
8726         /* No tx interrupts will be generated if both are zero */
8727         if ((ec->tx_coalesce_usecs == 0) &&
8728             (ec->tx_max_coalesced_frames == 0))
8729                 return -EINVAL;
8730
8731         /* Only copy relevant parameters, ignore all others. */
8732         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8733         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8734         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8735         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8736         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8737         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8738         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8739         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8740         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8741
8742         if (netif_running(dev)) {
8743                 tg3_full_lock(tp, 0);
8744                 __tg3_set_coalesce(tp, &tp->coal);
8745                 tg3_full_unlock(tp);
8746         }
8747         return 0;
8748 }
8749
8750 static struct ethtool_ops tg3_ethtool_ops = {
8751         .get_settings           = tg3_get_settings,
8752         .set_settings           = tg3_set_settings,
8753         .get_drvinfo            = tg3_get_drvinfo,
8754         .get_regs_len           = tg3_get_regs_len,
8755         .get_regs               = tg3_get_regs,
8756         .get_wol                = tg3_get_wol,
8757         .set_wol                = tg3_set_wol,
8758         .get_msglevel           = tg3_get_msglevel,
8759         .set_msglevel           = tg3_set_msglevel,
8760         .nway_reset             = tg3_nway_reset,
8761         .get_link               = ethtool_op_get_link,
8762         .get_eeprom_len         = tg3_get_eeprom_len,
8763         .get_eeprom             = tg3_get_eeprom,
8764         .set_eeprom             = tg3_set_eeprom,
8765         .get_ringparam          = tg3_get_ringparam,
8766         .set_ringparam          = tg3_set_ringparam,
8767         .get_pauseparam         = tg3_get_pauseparam,
8768         .set_pauseparam         = tg3_set_pauseparam,
8769         .get_rx_csum            = tg3_get_rx_csum,
8770         .set_rx_csum            = tg3_set_rx_csum,
8771         .get_tx_csum            = ethtool_op_get_tx_csum,
8772         .set_tx_csum            = tg3_set_tx_csum,
8773         .get_sg                 = ethtool_op_get_sg,
8774         .set_sg                 = ethtool_op_set_sg,
8775 #if TG3_TSO_SUPPORT != 0
8776         .get_tso                = ethtool_op_get_tso,
8777         .set_tso                = tg3_set_tso,
8778 #endif
8779         .self_test_count        = tg3_get_test_count,
8780         .self_test              = tg3_self_test,
8781         .get_strings            = tg3_get_strings,
8782         .phys_id                = tg3_phys_id,
8783         .get_stats_count        = tg3_get_stats_count,
8784         .get_ethtool_stats      = tg3_get_ethtool_stats,
8785         .get_coalesce           = tg3_get_coalesce,
8786         .set_coalesce           = tg3_set_coalesce,
8787         .get_perm_addr          = ethtool_op_get_perm_addr,
8788 };
8789
8790 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8791 {
8792         u32 cursize, val, magic;
8793
8794         tp->nvram_size = EEPROM_CHIP_SIZE;
8795
8796         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8797                 return;
8798
8799         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8800                 return;
8801
8802         /*
8803          * Size the chip by reading offsets at increasing powers of two.
8804          * When we encounter our validation signature, we know the addressing
8805          * has wrapped around, and thus have our chip size.
8806          */
8807         cursize = 0x10;
8808
8809         while (cursize < tp->nvram_size) {
8810                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8811                         return;
8812
8813                 if (val == magic)
8814                         break;
8815
8816                 cursize <<= 1;
8817         }
8818
8819         tp->nvram_size = cursize;
8820 }
8821                 
8822 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8823 {
8824         u32 val;
8825
8826         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8827                 return;
8828
8829         /* Selfboot format */
8830         if (val != TG3_EEPROM_MAGIC) {
8831                 tg3_get_eeprom_size(tp);
8832                 return;
8833         }
8834
8835         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8836                 if (val != 0) {
8837                         tp->nvram_size = (val >> 16) * 1024;
8838                         return;
8839                 }
8840         }
8841         tp->nvram_size = 0x20000;
8842 }
8843
8844 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8845 {
8846         u32 nvcfg1;
8847
8848         nvcfg1 = tr32(NVRAM_CFG1);
8849         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8850                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8851         }
8852         else {
8853                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8854                 tw32(NVRAM_CFG1, nvcfg1);
8855         }
8856
8857         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8858             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8859                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8860                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8861                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8862                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8863                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8864                                 break;
8865                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8866                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8867                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8868                                 break;
8869                         case FLASH_VENDOR_ATMEL_EEPROM:
8870                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8871                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8872                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8873                                 break;
8874                         case FLASH_VENDOR_ST:
8875                                 tp->nvram_jedecnum = JEDEC_ST;
8876                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8877                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8878                                 break;
8879                         case FLASH_VENDOR_SAIFUN:
8880                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8881                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8882                                 break;
8883                         case FLASH_VENDOR_SST_SMALL:
8884                         case FLASH_VENDOR_SST_LARGE:
8885                                 tp->nvram_jedecnum = JEDEC_SST;
8886                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8887                                 break;
8888                 }
8889         }
8890         else {
8891                 tp->nvram_jedecnum = JEDEC_ATMEL;
8892                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8893                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8894         }
8895 }
8896
8897 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8898 {
8899         u32 nvcfg1;
8900
8901         nvcfg1 = tr32(NVRAM_CFG1);
8902
8903         /* NVRAM protection for TPM */
8904         if (nvcfg1 & (1 << 27))
8905                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8906
8907         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8908                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8909                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8910                         tp->nvram_jedecnum = JEDEC_ATMEL;
8911                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8912                         break;
8913                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8914                         tp->nvram_jedecnum = JEDEC_ATMEL;
8915                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8916                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8917                         break;
8918                 case FLASH_5752VENDOR_ST_M45PE10:
8919                 case FLASH_5752VENDOR_ST_M45PE20:
8920                 case FLASH_5752VENDOR_ST_M45PE40:
8921                         tp->nvram_jedecnum = JEDEC_ST;
8922                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8923                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8924                         break;
8925         }
8926
8927         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8928                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8929                         case FLASH_5752PAGE_SIZE_256:
8930                                 tp->nvram_pagesize = 256;
8931                                 break;
8932                         case FLASH_5752PAGE_SIZE_512:
8933                                 tp->nvram_pagesize = 512;
8934                                 break;
8935                         case FLASH_5752PAGE_SIZE_1K:
8936                                 tp->nvram_pagesize = 1024;
8937                                 break;
8938                         case FLASH_5752PAGE_SIZE_2K:
8939                                 tp->nvram_pagesize = 2048;
8940                                 break;
8941                         case FLASH_5752PAGE_SIZE_4K:
8942                                 tp->nvram_pagesize = 4096;
8943                                 break;
8944                         case FLASH_5752PAGE_SIZE_264:
8945                                 tp->nvram_pagesize = 264;
8946                                 break;
8947                 }
8948         }
8949         else {
8950                 /* For eeprom, set pagesize to maximum eeprom size */
8951                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8952
8953                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8954                 tw32(NVRAM_CFG1, nvcfg1);
8955         }
8956 }
8957
8958 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8959 {
8960         u32 nvcfg1;
8961
8962         nvcfg1 = tr32(NVRAM_CFG1);
8963
8964         /* NVRAM protection for TPM */
8965         if (nvcfg1 & (1 << 27))
8966                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8967
8968         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8969                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8970                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8971                         tp->nvram_jedecnum = JEDEC_ATMEL;
8972                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8973                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8974
8975                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8976                         tw32(NVRAM_CFG1, nvcfg1);
8977                         break;
8978                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8979                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8980                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8981                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8982                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8983                         tp->nvram_jedecnum = JEDEC_ATMEL;
8984                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8985                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8986                         tp->nvram_pagesize = 264;
8987                         break;
8988                 case FLASH_5752VENDOR_ST_M45PE10:
8989                 case FLASH_5752VENDOR_ST_M45PE20:
8990                 case FLASH_5752VENDOR_ST_M45PE40:
8991                         tp->nvram_jedecnum = JEDEC_ST;
8992                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8993                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8994                         tp->nvram_pagesize = 256;
8995                         break;
8996         }
8997 }
8998
8999 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9000 {
9001         u32 nvcfg1;
9002
9003         nvcfg1 = tr32(NVRAM_CFG1);
9004
9005         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9006                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9007                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9008                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9009                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9010                         tp->nvram_jedecnum = JEDEC_ATMEL;
9011                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9012                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9013
9014                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9015                         tw32(NVRAM_CFG1, nvcfg1);
9016                         break;
9017                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9018                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9019                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9020                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9021                         tp->nvram_jedecnum = JEDEC_ATMEL;
9022                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9023                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9024                         tp->nvram_pagesize = 264;
9025                         break;
9026                 case FLASH_5752VENDOR_ST_M45PE10:
9027                 case FLASH_5752VENDOR_ST_M45PE20:
9028                 case FLASH_5752VENDOR_ST_M45PE40:
9029                         tp->nvram_jedecnum = JEDEC_ST;
9030                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9031                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9032                         tp->nvram_pagesize = 256;
9033                         break;
9034         }
9035 }
9036
9037 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9038 static void __devinit tg3_nvram_init(struct tg3 *tp)
9039 {
9040         int j;
9041
9042         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9043                 return;
9044
9045         tw32_f(GRC_EEPROM_ADDR,
9046              (EEPROM_ADDR_FSM_RESET |
9047               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9048                EEPROM_ADDR_CLKPERD_SHIFT)));
9049
9050         /* XXX schedule_timeout() ... */
9051         for (j = 0; j < 100; j++)
9052                 udelay(10);
9053
9054         /* Enable seeprom accesses. */
9055         tw32_f(GRC_LOCAL_CTRL,
9056              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9057         udelay(100);
9058
9059         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9060             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9061                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9062
9063                 if (tg3_nvram_lock(tp)) {
9064                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9065                                "tg3_nvram_init failed.\n", tp->dev->name);
9066                         return;
9067                 }
9068                 tg3_enable_nvram_access(tp);
9069
9070                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9071                         tg3_get_5752_nvram_info(tp);
9072                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9073                         tg3_get_5755_nvram_info(tp);
9074                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9075                         tg3_get_5787_nvram_info(tp);
9076                 else
9077                         tg3_get_nvram_info(tp);
9078
9079                 tg3_get_nvram_size(tp);
9080
9081                 tg3_disable_nvram_access(tp);
9082                 tg3_nvram_unlock(tp);
9083
9084         } else {
9085                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9086
9087                 tg3_get_eeprom_size(tp);
9088         }
9089 }
9090
9091 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9092                                         u32 offset, u32 *val)
9093 {
9094         u32 tmp;
9095         int i;
9096
9097         if (offset > EEPROM_ADDR_ADDR_MASK ||
9098             (offset % 4) != 0)
9099                 return -EINVAL;
9100
9101         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9102                                         EEPROM_ADDR_DEVID_MASK |
9103                                         EEPROM_ADDR_READ);
9104         tw32(GRC_EEPROM_ADDR,
9105              tmp |
9106              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9107              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9108               EEPROM_ADDR_ADDR_MASK) |
9109              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9110
9111         for (i = 0; i < 10000; i++) {
9112                 tmp = tr32(GRC_EEPROM_ADDR);
9113
9114                 if (tmp & EEPROM_ADDR_COMPLETE)
9115                         break;
9116                 udelay(100);
9117         }
9118         if (!(tmp & EEPROM_ADDR_COMPLETE))
9119                 return -EBUSY;
9120
9121         *val = tr32(GRC_EEPROM_DATA);
9122         return 0;
9123 }
9124
9125 #define NVRAM_CMD_TIMEOUT 10000
9126
9127 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9128 {
9129         int i;
9130
9131         tw32(NVRAM_CMD, nvram_cmd);
9132         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9133                 udelay(10);
9134                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9135                         udelay(10);
9136                         break;
9137                 }
9138         }
9139         if (i == NVRAM_CMD_TIMEOUT) {
9140                 return -EBUSY;
9141         }
9142         return 0;
9143 }
9144
9145 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9146 {
9147         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9148             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9149             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9150             (tp->nvram_jedecnum == JEDEC_ATMEL))
9151
9152                 addr = ((addr / tp->nvram_pagesize) <<
9153                         ATMEL_AT45DB0X1B_PAGE_POS) +
9154                        (addr % tp->nvram_pagesize);
9155
9156         return addr;
9157 }
9158
9159 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9160 {
9161         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9162             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9163             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9164             (tp->nvram_jedecnum == JEDEC_ATMEL))
9165
9166                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9167                         tp->nvram_pagesize) +
9168                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9169
9170         return addr;
9171 }
9172
9173 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9174 {
9175         int ret;
9176
9177         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9178                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9179                 return -EINVAL;
9180         }
9181
9182         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9183                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9184
9185         offset = tg3_nvram_phys_addr(tp, offset);
9186
9187         if (offset > NVRAM_ADDR_MSK)
9188                 return -EINVAL;
9189
9190         ret = tg3_nvram_lock(tp);
9191         if (ret)
9192                 return ret;
9193
9194         tg3_enable_nvram_access(tp);
9195
9196         tw32(NVRAM_ADDR, offset);
9197         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9198                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9199
9200         if (ret == 0)
9201                 *val = swab32(tr32(NVRAM_RDDATA));
9202
9203         tg3_disable_nvram_access(tp);
9204
9205         tg3_nvram_unlock(tp);
9206
9207         return ret;
9208 }
9209
9210 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9211 {
9212         int err;
9213         u32 tmp;
9214
9215         err = tg3_nvram_read(tp, offset, &tmp);
9216         *val = swab32(tmp);
9217         return err;
9218 }
9219
9220 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9221                                     u32 offset, u32 len, u8 *buf)
9222 {
9223         int i, j, rc = 0;
9224         u32 val;
9225
9226         for (i = 0; i < len; i += 4) {
9227                 u32 addr, data;
9228
9229                 addr = offset + i;
9230
9231                 memcpy(&data, buf + i, 4);
9232
9233                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9234
9235                 val = tr32(GRC_EEPROM_ADDR);
9236                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9237
9238                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9239                         EEPROM_ADDR_READ);
9240                 tw32(GRC_EEPROM_ADDR, val |
9241                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9242                         (addr & EEPROM_ADDR_ADDR_MASK) |
9243                         EEPROM_ADDR_START |
9244                         EEPROM_ADDR_WRITE);
9245                 
9246                 for (j = 0; j < 10000; j++) {
9247                         val = tr32(GRC_EEPROM_ADDR);
9248
9249                         if (val & EEPROM_ADDR_COMPLETE)
9250                                 break;
9251                         udelay(100);
9252                 }
9253                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9254                         rc = -EBUSY;
9255                         break;
9256                 }
9257         }
9258
9259         return rc;
9260 }
9261
9262 /* offset and length are dword aligned */
9263 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9264                 u8 *buf)
9265 {
9266         int ret = 0;
9267         u32 pagesize = tp->nvram_pagesize;
9268         u32 pagemask = pagesize - 1;
9269         u32 nvram_cmd;
9270         u8 *tmp;
9271
9272         tmp = kmalloc(pagesize, GFP_KERNEL);
9273         if (tmp == NULL)
9274                 return -ENOMEM;
9275
9276         while (len) {
9277                 int j;
9278                 u32 phy_addr, page_off, size;
9279
9280                 phy_addr = offset & ~pagemask;
9281         
9282                 for (j = 0; j < pagesize; j += 4) {
9283                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9284                                                 (u32 *) (tmp + j))))
9285                                 break;
9286                 }
9287                 if (ret)
9288                         break;
9289
9290                 page_off = offset & pagemask;
9291                 size = pagesize;
9292                 if (len < size)
9293                         size = len;
9294
9295                 len -= size;
9296
9297                 memcpy(tmp + page_off, buf, size);
9298
9299                 offset = offset + (pagesize - page_off);
9300
9301                 tg3_enable_nvram_access(tp);
9302
9303                 /*
9304                  * Before we can erase the flash page, we need
9305                  * to issue a special "write enable" command.
9306                  */
9307                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9308
9309                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9310                         break;
9311
9312                 /* Erase the target page */
9313                 tw32(NVRAM_ADDR, phy_addr);
9314
9315                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9316                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9317
9318                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9319                         break;
9320
9321                 /* Issue another write enable to start the write. */
9322                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9323
9324                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9325                         break;
9326
9327                 for (j = 0; j < pagesize; j += 4) {
9328                         u32 data;
9329
9330                         data = *((u32 *) (tmp + j));
9331                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9332
9333                         tw32(NVRAM_ADDR, phy_addr + j);
9334
9335                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9336                                 NVRAM_CMD_WR;
9337
9338                         if (j == 0)
9339                                 nvram_cmd |= NVRAM_CMD_FIRST;
9340                         else if (j == (pagesize - 4))
9341                                 nvram_cmd |= NVRAM_CMD_LAST;
9342
9343                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9344                                 break;
9345                 }
9346                 if (ret)
9347                         break;
9348         }
9349
9350         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9351         tg3_nvram_exec_cmd(tp, nvram_cmd);
9352
9353         kfree(tmp);
9354
9355         return ret;
9356 }
9357
9358 /* offset and length are dword aligned */
9359 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9360                 u8 *buf)
9361 {
9362         int i, ret = 0;
9363
9364         for (i = 0; i < len; i += 4, offset += 4) {
9365                 u32 data, page_off, phy_addr, nvram_cmd;
9366
9367                 memcpy(&data, buf + i, 4);
9368                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9369
9370                 page_off = offset % tp->nvram_pagesize;
9371
9372                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9373
9374                 tw32(NVRAM_ADDR, phy_addr);
9375
9376                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9377
9378                 if ((page_off == 0) || (i == 0))
9379                         nvram_cmd |= NVRAM_CMD_FIRST;
9380                 else if (page_off == (tp->nvram_pagesize - 4))
9381                         nvram_cmd |= NVRAM_CMD_LAST;
9382
9383                 if (i == (len - 4))
9384                         nvram_cmd |= NVRAM_CMD_LAST;
9385
9386                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9387                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9388                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9389                     (tp->nvram_jedecnum == JEDEC_ST) &&
9390                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9391
9392                         if ((ret = tg3_nvram_exec_cmd(tp,
9393                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9394                                 NVRAM_CMD_DONE)))
9395
9396                                 break;
9397                 }
9398                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9399                         /* We always do complete word writes to eeprom. */
9400                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9401                 }
9402
9403                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9404                         break;
9405         }
9406         return ret;
9407 }
9408
9409 /* offset and length are dword aligned */
9410 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9411 {
9412         int ret;
9413
9414         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9415                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9416                 return -EINVAL;
9417         }
9418
9419         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9420                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9421                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9422                 udelay(40);
9423         }
9424
9425         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9426                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9427         }
9428         else {
9429                 u32 grc_mode;
9430
9431                 ret = tg3_nvram_lock(tp);
9432                 if (ret)
9433                         return ret;
9434
9435                 tg3_enable_nvram_access(tp);
9436                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9437                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9438                         tw32(NVRAM_WRITE1, 0x406);
9439
9440                 grc_mode = tr32(GRC_MODE);
9441                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9442
9443                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9444                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9445
9446                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9447                                 buf);
9448                 }
9449                 else {
9450                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9451                                 buf);
9452                 }
9453
9454                 grc_mode = tr32(GRC_MODE);
9455                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9456
9457                 tg3_disable_nvram_access(tp);
9458                 tg3_nvram_unlock(tp);
9459         }
9460
9461         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9462                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9463                 udelay(40);
9464         }
9465
9466         return ret;
9467 }
9468
9469 struct subsys_tbl_ent {
9470         u16 subsys_vendor, subsys_devid;
9471         u32 phy_id;
9472 };
9473
9474 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9475         /* Broadcom boards. */
9476         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9477         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9478         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9479         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9480         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9481         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9482         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9483         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9484         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9485         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9486         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9487
9488         /* 3com boards. */
9489         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9490         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9491         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9492         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9493         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9494
9495         /* DELL boards. */
9496         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9497         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9498         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9499         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9500
9501         /* Compaq boards. */
9502         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9503         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9504         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9505         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9506         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9507
9508         /* IBM boards. */
9509         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9510 };
9511
9512 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9513 {
9514         int i;
9515
9516         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9517                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9518                      tp->pdev->subsystem_vendor) &&
9519                     (subsys_id_to_phy_id[i].subsys_devid ==
9520                      tp->pdev->subsystem_device))
9521                         return &subsys_id_to_phy_id[i];
9522         }
9523         return NULL;
9524 }
9525
9526 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9527 {
9528         u32 val;
9529         u16 pmcsr;
9530
9531         /* On some early chips the SRAM cannot be accessed in D3hot state,
9532          * so need make sure we're in D0.
9533          */
9534         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9535         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9536         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9537         msleep(1);
9538
9539         /* Make sure register accesses (indirect or otherwise)
9540          * will function correctly.
9541          */
9542         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9543                                tp->misc_host_ctrl);
9544
9545         tp->phy_id = PHY_ID_INVALID;
9546         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9547
9548         /* Do not even try poking around in here on Sun parts.  */
9549         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9550                 /* All SUN chips are built-in LOMs. */
9551                 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9552                 return;
9553         }
9554
9555         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9556         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9557                 u32 nic_cfg, led_cfg;
9558                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9559                 int eeprom_phy_serdes = 0;
9560
9561                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9562                 tp->nic_sram_data_cfg = nic_cfg;
9563
9564                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9565                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9566                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9567                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9568                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9569                     (ver > 0) && (ver < 0x100))
9570                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9571
9572                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9573                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9574                         eeprom_phy_serdes = 1;
9575
9576                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9577                 if (nic_phy_id != 0) {
9578                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9579                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9580
9581                         eeprom_phy_id  = (id1 >> 16) << 10;
9582                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9583                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9584                 } else
9585                         eeprom_phy_id = 0;
9586
9587                 tp->phy_id = eeprom_phy_id;
9588                 if (eeprom_phy_serdes) {
9589                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9590                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9591                         else
9592                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9593                 }
9594
9595                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9596                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9597                                     SHASTA_EXT_LED_MODE_MASK);
9598                 else
9599                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9600
9601                 switch (led_cfg) {
9602                 default:
9603                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9604                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9605                         break;
9606
9607                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9608                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9609                         break;
9610
9611                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9612                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9613
9614                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9615                          * read on some older 5700/5701 bootcode.
9616                          */
9617                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9618                             ASIC_REV_5700 ||
9619                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9620                             ASIC_REV_5701)
9621                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9622
9623                         break;
9624
9625                 case SHASTA_EXT_LED_SHARED:
9626                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9627                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9628                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9629                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9630                                                  LED_CTRL_MODE_PHY_2);
9631                         break;
9632
9633                 case SHASTA_EXT_LED_MAC:
9634                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9635                         break;
9636
9637                 case SHASTA_EXT_LED_COMBO:
9638                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9639                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9640                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9641                                                  LED_CTRL_MODE_PHY_2);
9642                         break;
9643
9644                 };
9645
9646                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9647                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9648                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9649                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9650
9651                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9652                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9653
9654                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9655                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9656                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9657                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9658                 }
9659                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9660                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9661
9662                 if (cfg2 & (1 << 17))
9663                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9664
9665                 /* serdes signal pre-emphasis in register 0x590 set by */
9666                 /* bootcode if bit 18 is set */
9667                 if (cfg2 & (1 << 18))
9668                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9669         }
9670 }
9671
9672 static int __devinit tg3_phy_probe(struct tg3 *tp)
9673 {
9674         u32 hw_phy_id_1, hw_phy_id_2;
9675         u32 hw_phy_id, hw_phy_id_masked;
9676         int err;
9677
9678         /* Reading the PHY ID register can conflict with ASF
9679          * firwmare access to the PHY hardware.
9680          */
9681         err = 0;
9682         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9683                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9684         } else {
9685                 /* Now read the physical PHY_ID from the chip and verify
9686                  * that it is sane.  If it doesn't look good, we fall back
9687                  * to either the hard-coded table based PHY_ID and failing
9688                  * that the value found in the eeprom area.
9689                  */
9690                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9691                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9692
9693                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9694                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9695                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9696
9697                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9698         }
9699
9700         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9701                 tp->phy_id = hw_phy_id;
9702                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9703                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9704                 else
9705                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9706         } else {
9707                 if (tp->phy_id != PHY_ID_INVALID) {
9708                         /* Do nothing, phy ID already set up in
9709                          * tg3_get_eeprom_hw_cfg().
9710                          */
9711                 } else {
9712                         struct subsys_tbl_ent *p;
9713
9714                         /* No eeprom signature?  Try the hardcoded
9715                          * subsys device table.
9716                          */
9717                         p = lookup_by_subsys(tp);
9718                         if (!p)
9719                                 return -ENODEV;
9720
9721                         tp->phy_id = p->phy_id;
9722                         if (!tp->phy_id ||
9723                             tp->phy_id == PHY_ID_BCM8002)
9724                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9725                 }
9726         }
9727
9728         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9729             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9730                 u32 bmsr, adv_reg, tg3_ctrl;
9731
9732                 tg3_readphy(tp, MII_BMSR, &bmsr);
9733                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9734                     (bmsr & BMSR_LSTATUS))
9735                         goto skip_phy_reset;
9736                     
9737                 err = tg3_phy_reset(tp);
9738                 if (err)
9739                         return err;
9740
9741                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9742                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9743                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9744                 tg3_ctrl = 0;
9745                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9746                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9747                                     MII_TG3_CTRL_ADV_1000_FULL);
9748                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9749                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9750                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9751                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9752                 }
9753
9754                 if (!tg3_copper_is_advertising_all(tp)) {
9755                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9756
9757                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9758                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9759
9760                         tg3_writephy(tp, MII_BMCR,
9761                                      BMCR_ANENABLE | BMCR_ANRESTART);
9762                 }
9763                 tg3_phy_set_wirespeed(tp);
9764
9765                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9766                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9767                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9768         }
9769
9770 skip_phy_reset:
9771         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9772                 err = tg3_init_5401phy_dsp(tp);
9773                 if (err)
9774                         return err;
9775         }
9776
9777         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9778                 err = tg3_init_5401phy_dsp(tp);
9779         }
9780
9781         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9782                 tp->link_config.advertising =
9783                         (ADVERTISED_1000baseT_Half |
9784                          ADVERTISED_1000baseT_Full |
9785                          ADVERTISED_Autoneg |
9786                          ADVERTISED_FIBRE);
9787         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9788                 tp->link_config.advertising &=
9789                         ~(ADVERTISED_1000baseT_Half |
9790                           ADVERTISED_1000baseT_Full);
9791
9792         return err;
9793 }
9794
9795 static void __devinit tg3_read_partno(struct tg3 *tp)
9796 {
9797         unsigned char vpd_data[256];
9798         int i;
9799         u32 magic;
9800
9801         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9802                 /* Sun decided not to put the necessary bits in the
9803                  * NVRAM of their onboard tg3 parts :(
9804                  */
9805                 strcpy(tp->board_part_number, "Sun 570X");
9806                 return;
9807         }
9808
9809         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9810                 return;
9811
9812         if (magic == TG3_EEPROM_MAGIC) {
9813                 for (i = 0; i < 256; i += 4) {
9814                         u32 tmp;
9815
9816                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9817                                 goto out_not_found;
9818
9819                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9820                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9821                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9822                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9823                 }
9824         } else {
9825                 int vpd_cap;
9826
9827                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9828                 for (i = 0; i < 256; i += 4) {
9829                         u32 tmp, j = 0;
9830                         u16 tmp16;
9831
9832                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9833                                               i);
9834                         while (j++ < 100) {
9835                                 pci_read_config_word(tp->pdev, vpd_cap +
9836                                                      PCI_VPD_ADDR, &tmp16);
9837                                 if (tmp16 & 0x8000)
9838                                         break;
9839                                 msleep(1);
9840                         }
9841                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9842                                               &tmp);
9843                         tmp = cpu_to_le32(tmp);
9844                         memcpy(&vpd_data[i], &tmp, 4);
9845                 }
9846         }
9847
9848         /* Now parse and find the part number. */
9849         for (i = 0; i < 256; ) {
9850                 unsigned char val = vpd_data[i];
9851                 int block_end;
9852
9853                 if (val == 0x82 || val == 0x91) {
9854                         i = (i + 3 +
9855                              (vpd_data[i + 1] +
9856                               (vpd_data[i + 2] << 8)));
9857                         continue;
9858                 }
9859
9860                 if (val != 0x90)
9861                         goto out_not_found;
9862
9863                 block_end = (i + 3 +
9864                              (vpd_data[i + 1] +
9865                               (vpd_data[i + 2] << 8)));
9866                 i += 3;
9867                 while (i < block_end) {
9868                         if (vpd_data[i + 0] == 'P' &&
9869                             vpd_data[i + 1] == 'N') {
9870                                 int partno_len = vpd_data[i + 2];
9871
9872                                 if (partno_len > 24)
9873                                         goto out_not_found;
9874
9875                                 memcpy(tp->board_part_number,
9876                                        &vpd_data[i + 3],
9877                                        partno_len);
9878
9879                                 /* Success. */
9880                                 return;
9881                         }
9882                 }
9883
9884                 /* Part number not found. */
9885                 goto out_not_found;
9886         }
9887
9888 out_not_found:
9889         strcpy(tp->board_part_number, "none");
9890 }
9891
9892 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9893 {
9894         u32 val, offset, start;
9895
9896         if (tg3_nvram_read_swab(tp, 0, &val))
9897                 return;
9898
9899         if (val != TG3_EEPROM_MAGIC)
9900                 return;
9901
9902         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9903             tg3_nvram_read_swab(tp, 0x4, &start))
9904                 return;
9905
9906         offset = tg3_nvram_logical_addr(tp, offset);
9907         if (tg3_nvram_read_swab(tp, offset, &val))
9908                 return;
9909
9910         if ((val & 0xfc000000) == 0x0c000000) {
9911                 u32 ver_offset, addr;
9912                 int i;
9913
9914                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9915                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9916                         return;
9917
9918                 if (val != 0)
9919                         return;
9920
9921                 addr = offset + ver_offset - start;
9922                 for (i = 0; i < 16; i += 4) {
9923                         if (tg3_nvram_read(tp, addr + i, &val))
9924                                 return;
9925
9926                         val = cpu_to_le32(val);
9927                         memcpy(tp->fw_ver + i, &val, 4);
9928                 }
9929         }
9930 }
9931
9932 #ifdef CONFIG_SPARC64
9933 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9934 {
9935         struct pci_dev *pdev = tp->pdev;
9936         struct pcidev_cookie *pcp = pdev->sysdata;
9937
9938         if (pcp != NULL) {
9939                 int node = pcp->prom_node;
9940                 u32 venid;
9941                 int err;
9942
9943                 err = prom_getproperty(node, "subsystem-vendor-id",
9944                                        (char *) &venid, sizeof(venid));
9945                 if (err == 0 || err == -1)
9946                         return 0;
9947                 if (venid == PCI_VENDOR_ID_SUN)
9948                         return 1;
9949
9950                 /* TG3 chips onboard the SunBlade-2500 don't have the
9951                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9952                  * are distinguishable from non-Sun variants by being
9953                  * named "network" by the firmware.  Non-Sun cards will
9954                  * show up as being named "ethernet".
9955                  */
9956                 if (!strcmp(pcp->prom_name, "network"))
9957                         return 1;
9958         }
9959         return 0;
9960 }
9961 #endif
9962
9963 static int __devinit tg3_get_invariants(struct tg3 *tp)
9964 {
9965         static struct pci_device_id write_reorder_chipsets[] = {
9966                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9967                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9968                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9969                              PCI_DEVICE_ID_VIA_8385_0) },
9970                 { },
9971         };
9972         u32 misc_ctrl_reg;
9973         u32 cacheline_sz_reg;
9974         u32 pci_state_reg, grc_misc_cfg;
9975         u32 val;
9976         u16 pci_cmd;
9977         int err;
9978
9979 #ifdef CONFIG_SPARC64
9980         if (tg3_is_sun_570X(tp))
9981                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9982 #endif
9983
9984         /* Force memory write invalidate off.  If we leave it on,
9985          * then on 5700_BX chips we have to enable a workaround.
9986          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9987          * to match the cacheline size.  The Broadcom driver have this
9988          * workaround but turns MWI off all the times so never uses
9989          * it.  This seems to suggest that the workaround is insufficient.
9990          */
9991         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9992         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9993         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9994
9995         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9996          * has the register indirect write enable bit set before
9997          * we try to access any of the MMIO registers.  It is also
9998          * critical that the PCI-X hw workaround situation is decided
9999          * before that as well.
10000          */
10001         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10002                               &misc_ctrl_reg);
10003
10004         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10005                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10006
10007         /* Wrong chip ID in 5752 A0. This code can be removed later
10008          * as A0 is not in production.
10009          */
10010         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10011                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10012
10013         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10014          * we need to disable memory and use config. cycles
10015          * only to access all registers. The 5702/03 chips
10016          * can mistakenly decode the special cycles from the
10017          * ICH chipsets as memory write cycles, causing corruption
10018          * of register and memory space. Only certain ICH bridges
10019          * will drive special cycles with non-zero data during the
10020          * address phase which can fall within the 5703's address
10021          * range. This is not an ICH bug as the PCI spec allows
10022          * non-zero address during special cycles. However, only
10023          * these ICH bridges are known to drive non-zero addresses
10024          * during special cycles.
10025          *
10026          * Since special cycles do not cross PCI bridges, we only
10027          * enable this workaround if the 5703 is on the secondary
10028          * bus of these ICH bridges.
10029          */
10030         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10031             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10032                 static struct tg3_dev_id {
10033                         u32     vendor;
10034                         u32     device;
10035                         u32     rev;
10036                 } ich_chipsets[] = {
10037                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10038                           PCI_ANY_ID },
10039                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10040                           PCI_ANY_ID },
10041                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10042                           0xa },
10043                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10044                           PCI_ANY_ID },
10045                         { },
10046                 };
10047                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10048                 struct pci_dev *bridge = NULL;
10049
10050                 while (pci_id->vendor != 0) {
10051                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10052                                                 bridge);
10053                         if (!bridge) {
10054                                 pci_id++;
10055                                 continue;
10056                         }
10057                         if (pci_id->rev != PCI_ANY_ID) {
10058                                 u8 rev;
10059
10060                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10061                                                      &rev);
10062                                 if (rev > pci_id->rev)
10063                                         continue;
10064                         }
10065                         if (bridge->subordinate &&
10066                             (bridge->subordinate->number ==
10067                              tp->pdev->bus->number)) {
10068
10069                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10070                                 pci_dev_put(bridge);
10071                                 break;
10072                         }
10073                 }
10074         }
10075
10076         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10077          * DMA addresses > 40-bit. This bridge may have other additional
10078          * 57xx devices behind it in some 4-port NIC designs for example.
10079          * Any tg3 device found behind the bridge will also need the 40-bit
10080          * DMA workaround.
10081          */
10082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10083             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10084                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10085                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10086                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10087         }
10088         else {
10089                 struct pci_dev *bridge = NULL;
10090
10091                 do {
10092                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10093                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10094                                                 bridge);
10095                         if (bridge && bridge->subordinate &&
10096                             (bridge->subordinate->number <=
10097                              tp->pdev->bus->number) &&
10098                             (bridge->subordinate->subordinate >=
10099                              tp->pdev->bus->number)) {
10100                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10101                                 pci_dev_put(bridge);
10102                                 break;
10103                         }
10104                 } while (bridge);
10105         }
10106
10107         /* Initialize misc host control in PCI block. */
10108         tp->misc_host_ctrl |= (misc_ctrl_reg &
10109                                MISC_HOST_CTRL_CHIPREV);
10110         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10111                                tp->misc_host_ctrl);
10112
10113         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10114                               &cacheline_sz_reg);
10115
10116         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10117         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10118         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10119         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10120
10121         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10125             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10126                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10127
10128         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10129             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10130                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10131
10132         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10134                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10135                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10136                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10137                 } else
10138                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10139         }
10140
10141         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10142             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10143             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10144             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10145             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10146                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10147
10148         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10149                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10150
10151         /* If we have an AMD 762 or VIA K8T800 chipset, write
10152          * reordering to the mailbox registers done by the host
10153          * controller can cause major troubles.  We read back from
10154          * every mailbox register write to force the writes to be
10155          * posted to the chip in order.
10156          */
10157         if (pci_dev_present(write_reorder_chipsets) &&
10158             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10159                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10160
10161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10162             tp->pci_lat_timer < 64) {
10163                 tp->pci_lat_timer = 64;
10164
10165                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10166                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10167                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10168                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10169
10170                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10171                                        cacheline_sz_reg);
10172         }
10173
10174         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10175                               &pci_state_reg);
10176
10177         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10178                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10179
10180                 /* If this is a 5700 BX chipset, and we are in PCI-X
10181                  * mode, enable register write workaround.
10182                  *
10183                  * The workaround is to use indirect register accesses
10184                  * for all chip writes not to mailbox registers.
10185                  */
10186                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10187                         u32 pm_reg;
10188                         u16 pci_cmd;
10189
10190                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10191
10192                         /* The chip can have it's power management PCI config
10193                          * space registers clobbered due to this bug.
10194                          * So explicitly force the chip into D0 here.
10195                          */
10196                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10197                                               &pm_reg);
10198                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10199                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10200                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10201                                                pm_reg);
10202
10203                         /* Also, force SERR#/PERR# in PCI command. */
10204                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10205                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10206                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10207                 }
10208         }
10209
10210         /* 5700 BX chips need to have their TX producer index mailboxes
10211          * written twice to workaround a bug.
10212          */
10213         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10214                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10215
10216         /* Back to back register writes can cause problems on this chip,
10217          * the workaround is to read back all reg writes except those to
10218          * mailbox regs.  See tg3_write_indirect_reg32().
10219          *
10220          * PCI Express 5750_A0 rev chips need this workaround too.
10221          */
10222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10223             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10224              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10225                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10226
10227         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10228                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10229         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10230                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10231
10232         /* Chip-specific fixup from Broadcom driver */
10233         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10234             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10235                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10236                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10237         }
10238
10239         /* Default fast path register access methods */
10240         tp->read32 = tg3_read32;
10241         tp->write32 = tg3_write32;
10242         tp->read32_mbox = tg3_read32;
10243         tp->write32_mbox = tg3_write32;
10244         tp->write32_tx_mbox = tg3_write32;
10245         tp->write32_rx_mbox = tg3_write32;
10246
10247         /* Various workaround register access methods */
10248         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10249                 tp->write32 = tg3_write_indirect_reg32;
10250         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10251                 tp->write32 = tg3_write_flush_reg32;
10252
10253         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10254             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10255                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10256                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10257                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10258         }
10259
10260         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10261                 tp->read32 = tg3_read_indirect_reg32;
10262                 tp->write32 = tg3_write_indirect_reg32;
10263                 tp->read32_mbox = tg3_read_indirect_mbox;
10264                 tp->write32_mbox = tg3_write_indirect_mbox;
10265                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10266                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10267
10268                 iounmap(tp->regs);
10269                 tp->regs = NULL;
10270
10271                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10272                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10273                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10274         }
10275
10276         if (tp->write32 == tg3_write_indirect_reg32 ||
10277             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10278              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10279               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) ||
10280             (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
10281                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10282
10283         /* Get eeprom hw config before calling tg3_set_power_state().
10284          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10285          * determined before calling tg3_set_power_state() so that
10286          * we know whether or not to switch out of Vaux power.
10287          * When the flag is set, it means that GPIO1 is used for eeprom
10288          * write protect and also implies that it is a LOM where GPIOs
10289          * are not used to switch power.
10290          */ 
10291         tg3_get_eeprom_hw_cfg(tp);
10292
10293         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10294          * GPIO1 driven high will bring 5700's external PHY out of reset.
10295          * It is also used as eeprom write protect on LOMs.
10296          */
10297         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10298         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10299             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10300                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10301                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10302         /* Unused GPIO3 must be driven as output on 5752 because there
10303          * are no pull-up resistors on unused GPIO pins.
10304          */
10305         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10306                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10307
10308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10309                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10310
10311         /* Force the chip into D0. */
10312         err = tg3_set_power_state(tp, PCI_D0);
10313         if (err) {
10314                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10315                        pci_name(tp->pdev));
10316                 return err;
10317         }
10318
10319         /* 5700 B0 chips do not support checksumming correctly due
10320          * to hardware bugs.
10321          */
10322         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10323                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10324
10325         /* Derive initial jumbo mode from MTU assigned in
10326          * ether_setup() via the alloc_etherdev() call
10327          */
10328         if (tp->dev->mtu > ETH_DATA_LEN &&
10329             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10330                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10331
10332         /* Determine WakeOnLan speed to use. */
10333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10334             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10335             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10336             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10337                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10338         } else {
10339                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10340         }
10341
10342         /* A few boards don't want Ethernet@WireSpeed phy feature */
10343         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10344             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10345              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10346              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10347             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10348                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10349
10350         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10351             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10352                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10353         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10354                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10355
10356         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10357             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10358             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10359                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10360
10361         tp->coalesce_mode = 0;
10362         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10363             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10364                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10365
10366         /* Initialize MAC MI mode, polling disabled. */
10367         tw32_f(MAC_MI_MODE, tp->mi_mode);
10368         udelay(80);
10369
10370         /* Initialize data/descriptor byte/word swapping. */
10371         val = tr32(GRC_MODE);
10372         val &= GRC_MODE_HOST_STACKUP;
10373         tw32(GRC_MODE, val | tp->grc_mode);
10374
10375         tg3_switch_clocks(tp);
10376
10377         /* Clear this out for sanity. */
10378         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10379
10380         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10381                               &pci_state_reg);
10382         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10383             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10384                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10385
10386                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10387                     chiprevid == CHIPREV_ID_5701_B0 ||
10388                     chiprevid == CHIPREV_ID_5701_B2 ||
10389                     chiprevid == CHIPREV_ID_5701_B5) {
10390                         void __iomem *sram_base;
10391
10392                         /* Write some dummy words into the SRAM status block
10393                          * area, see if it reads back correctly.  If the return
10394                          * value is bad, force enable the PCIX workaround.
10395                          */
10396                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10397
10398                         writel(0x00000000, sram_base);
10399                         writel(0x00000000, sram_base + 4);
10400                         writel(0xffffffff, sram_base + 4);
10401                         if (readl(sram_base) != 0x00000000)
10402                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10403                 }
10404         }
10405
10406         udelay(50);
10407         tg3_nvram_init(tp);
10408
10409         grc_misc_cfg = tr32(GRC_MISC_CFG);
10410         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10411
10412         /* Broadcom's driver says that CIOBE multisplit has a bug */
10413 #if 0
10414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10415             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10416                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10417                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10418         }
10419 #endif
10420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10421             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10422              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10423                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10424
10425         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10426             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10427                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10428         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10429                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10430                                       HOSTCC_MODE_CLRTICK_TXBD);
10431
10432                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10433                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10434                                        tp->misc_host_ctrl);
10435         }
10436
10437         /* these are limited to 10/100 only */
10438         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10439              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10440             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10441              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10442              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10443               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10444               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10445             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10446              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10447               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10448                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10449
10450         err = tg3_phy_probe(tp);
10451         if (err) {
10452                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10453                        pci_name(tp->pdev), err);
10454                 /* ... but do not return immediately ... */
10455         }
10456
10457         tg3_read_partno(tp);
10458         tg3_read_fw_ver(tp);
10459
10460         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10461                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10462         } else {
10463                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10464                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10465                 else
10466                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10467         }
10468
10469         /* 5700 {AX,BX} chips have a broken status block link
10470          * change bit implementation, so we must use the
10471          * status register in those cases.
10472          */
10473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10474                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10475         else
10476                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10477
10478         /* The led_ctrl is set during tg3_phy_probe, here we might
10479          * have to force the link status polling mechanism based
10480          * upon subsystem IDs.
10481          */
10482         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10483             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10484                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10485                                   TG3_FLAG_USE_LINKCHG_REG);
10486         }
10487
10488         /* For all SERDES we poll the MAC status register. */
10489         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10490                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10491         else
10492                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10493
10494         /* All chips before 5787 can get confused if TX buffers
10495          * straddle the 4GB address boundary in some cases.
10496          */
10497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10499                 tp->dev->hard_start_xmit = tg3_start_xmit;
10500         else
10501                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10502
10503         tp->rx_offset = 2;
10504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10505             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10506                 tp->rx_offset = 0;
10507
10508         /* By default, disable wake-on-lan.  User can change this
10509          * using ETHTOOL_SWOL.
10510          */
10511         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10512
10513         return err;
10514 }
10515
10516 #ifdef CONFIG_SPARC64
10517 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10518 {
10519         struct net_device *dev = tp->dev;
10520         struct pci_dev *pdev = tp->pdev;
10521         struct pcidev_cookie *pcp = pdev->sysdata;
10522
10523         if (pcp != NULL) {
10524                 int node = pcp->prom_node;
10525
10526                 if (prom_getproplen(node, "local-mac-address") == 6) {
10527                         prom_getproperty(node, "local-mac-address",
10528                                          dev->dev_addr, 6);
10529                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10530                         return 0;
10531                 }
10532         }
10533         return -ENODEV;
10534 }
10535
10536 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10537 {
10538         struct net_device *dev = tp->dev;
10539
10540         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10541         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10542         return 0;
10543 }
10544 #endif
10545
10546 static int __devinit tg3_get_device_address(struct tg3 *tp)
10547 {
10548         struct net_device *dev = tp->dev;
10549         u32 hi, lo, mac_offset;
10550         int addr_ok = 0;
10551
10552 #ifdef CONFIG_SPARC64
10553         if (!tg3_get_macaddr_sparc(tp))
10554                 return 0;
10555 #endif
10556
10557         mac_offset = 0x7c;
10558         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10559              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10560             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10561                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10562                         mac_offset = 0xcc;
10563                 if (tg3_nvram_lock(tp))
10564                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10565                 else
10566                         tg3_nvram_unlock(tp);
10567         }
10568
10569         /* First try to get it from MAC address mailbox. */
10570         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10571         if ((hi >> 16) == 0x484b) {
10572                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10573                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10574
10575                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10576                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10577                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10578                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10579                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10580
10581                 /* Some old bootcode may report a 0 MAC address in SRAM */
10582                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10583         }
10584         if (!addr_ok) {
10585                 /* Next, try NVRAM. */
10586                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10587                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10588                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10589                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10590                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10591                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10592                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10593                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10594                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10595                 }
10596                 /* Finally just fetch it out of the MAC control regs. */
10597                 else {
10598                         hi = tr32(MAC_ADDR_0_HIGH);
10599                         lo = tr32(MAC_ADDR_0_LOW);
10600
10601                         dev->dev_addr[5] = lo & 0xff;
10602                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10603                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10604                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10605                         dev->dev_addr[1] = hi & 0xff;
10606                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10607                 }
10608         }
10609
10610         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10611 #ifdef CONFIG_SPARC64
10612                 if (!tg3_get_default_macaddr_sparc(tp))
10613                         return 0;
10614 #endif
10615                 return -EINVAL;
10616         }
10617         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10618         return 0;
10619 }
10620
10621 #define BOUNDARY_SINGLE_CACHELINE       1
10622 #define BOUNDARY_MULTI_CACHELINE        2
10623
10624 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10625 {
10626         int cacheline_size;
10627         u8 byte;
10628         int goal;
10629
10630         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10631         if (byte == 0)
10632                 cacheline_size = 1024;
10633         else
10634                 cacheline_size = (int) byte * 4;
10635
10636         /* On 5703 and later chips, the boundary bits have no
10637          * effect.
10638          */
10639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10640             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10641             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10642                 goto out;
10643
10644 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10645         goal = BOUNDARY_MULTI_CACHELINE;
10646 #else
10647 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10648         goal = BOUNDARY_SINGLE_CACHELINE;
10649 #else
10650         goal = 0;
10651 #endif
10652 #endif
10653
10654         if (!goal)
10655                 goto out;
10656
10657         /* PCI controllers on most RISC systems tend to disconnect
10658          * when a device tries to burst across a cache-line boundary.
10659          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10660          *
10661          * Unfortunately, for PCI-E there are only limited
10662          * write-side controls for this, and thus for reads
10663          * we will still get the disconnects.  We'll also waste
10664          * these PCI cycles for both read and write for chips
10665          * other than 5700 and 5701 which do not implement the
10666          * boundary bits.
10667          */
10668         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10669             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10670                 switch (cacheline_size) {
10671                 case 16:
10672                 case 32:
10673                 case 64:
10674                 case 128:
10675                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10676                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10677                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10678                         } else {
10679                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10680                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10681                         }
10682                         break;
10683
10684                 case 256:
10685                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10686                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10687                         break;
10688
10689                 default:
10690                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10691                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10692                         break;
10693                 };
10694         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10695                 switch (cacheline_size) {
10696                 case 16:
10697                 case 32:
10698                 case 64:
10699                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10700                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10701                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10702                                 break;
10703                         }
10704                         /* fallthrough */
10705                 case 128:
10706                 default:
10707                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10708                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10709                         break;
10710                 };
10711         } else {
10712                 switch (cacheline_size) {
10713                 case 16:
10714                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10715                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10716                                         DMA_RWCTRL_WRITE_BNDRY_16);
10717                                 break;
10718                         }
10719                         /* fallthrough */
10720                 case 32:
10721                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10722                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10723                                         DMA_RWCTRL_WRITE_BNDRY_32);
10724                                 break;
10725                         }
10726                         /* fallthrough */
10727                 case 64:
10728                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10729                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10730                                         DMA_RWCTRL_WRITE_BNDRY_64);
10731                                 break;
10732                         }
10733                         /* fallthrough */
10734                 case 128:
10735                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10736                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10737                                         DMA_RWCTRL_WRITE_BNDRY_128);
10738                                 break;
10739                         }
10740                         /* fallthrough */
10741                 case 256:
10742                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10743                                 DMA_RWCTRL_WRITE_BNDRY_256);
10744                         break;
10745                 case 512:
10746                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10747                                 DMA_RWCTRL_WRITE_BNDRY_512);
10748                         break;
10749                 case 1024:
10750                 default:
10751                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10752                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10753                         break;
10754                 };
10755         }
10756
10757 out:
10758         return val;
10759 }
10760
10761 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10762 {
10763         struct tg3_internal_buffer_desc test_desc;
10764         u32 sram_dma_descs;
10765         int i, ret;
10766
10767         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10768
10769         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10770         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10771         tw32(RDMAC_STATUS, 0);
10772         tw32(WDMAC_STATUS, 0);
10773
10774         tw32(BUFMGR_MODE, 0);
10775         tw32(FTQ_RESET, 0);
10776
10777         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10778         test_desc.addr_lo = buf_dma & 0xffffffff;
10779         test_desc.nic_mbuf = 0x00002100;
10780         test_desc.len = size;
10781
10782         /*
10783          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10784          * the *second* time the tg3 driver was getting loaded after an
10785          * initial scan.
10786          *
10787          * Broadcom tells me:
10788          *   ...the DMA engine is connected to the GRC block and a DMA
10789          *   reset may affect the GRC block in some unpredictable way...
10790          *   The behavior of resets to individual blocks has not been tested.
10791          *
10792          * Broadcom noted the GRC reset will also reset all sub-components.
10793          */
10794         if (to_device) {
10795                 test_desc.cqid_sqid = (13 << 8) | 2;
10796
10797                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10798                 udelay(40);
10799         } else {
10800                 test_desc.cqid_sqid = (16 << 8) | 7;
10801
10802                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10803                 udelay(40);
10804         }
10805         test_desc.flags = 0x00000005;
10806
10807         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10808                 u32 val;
10809
10810                 val = *(((u32 *)&test_desc) + i);
10811                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10812                                        sram_dma_descs + (i * sizeof(u32)));
10813                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10814         }
10815         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10816
10817         if (to_device) {
10818                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10819         } else {
10820                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10821         }
10822
10823         ret = -ENODEV;
10824         for (i = 0; i < 40; i++) {
10825                 u32 val;
10826
10827                 if (to_device)
10828                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10829                 else
10830                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10831                 if ((val & 0xffff) == sram_dma_descs) {
10832                         ret = 0;
10833                         break;
10834                 }
10835
10836                 udelay(100);
10837         }
10838
10839         return ret;
10840 }
10841
10842 #define TEST_BUFFER_SIZE        0x2000
10843
10844 static int __devinit tg3_test_dma(struct tg3 *tp)
10845 {
10846         dma_addr_t buf_dma;
10847         u32 *buf, saved_dma_rwctrl;
10848         int ret;
10849
10850         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10851         if (!buf) {
10852                 ret = -ENOMEM;
10853                 goto out_nofree;
10854         }
10855
10856         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10857                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10858
10859         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10860
10861         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10862                 /* DMA read watermark not used on PCIE */
10863                 tp->dma_rwctrl |= 0x00180000;
10864         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10866                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10867                         tp->dma_rwctrl |= 0x003f0000;
10868                 else
10869                         tp->dma_rwctrl |= 0x003f000f;
10870         } else {
10871                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10873                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10874
10875                         /* If the 5704 is behind the EPB bridge, we can
10876                          * do the less restrictive ONE_DMA workaround for
10877                          * better performance.
10878                          */
10879                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10880                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10881                                 tp->dma_rwctrl |= 0x8000;
10882                         else if (ccval == 0x6 || ccval == 0x7)
10883                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10884
10885                         /* Set bit 23 to enable PCIX hw bug fix */
10886                         tp->dma_rwctrl |= 0x009f0000;
10887                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10888                         /* 5780 always in PCIX mode */
10889                         tp->dma_rwctrl |= 0x00144000;
10890                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10891                         /* 5714 always in PCIX mode */
10892                         tp->dma_rwctrl |= 0x00148000;
10893                 } else {
10894                         tp->dma_rwctrl |= 0x001b000f;
10895                 }
10896         }
10897
10898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10899             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10900                 tp->dma_rwctrl &= 0xfffffff0;
10901
10902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10904                 /* Remove this if it causes problems for some boards. */
10905                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10906
10907                 /* On 5700/5701 chips, we need to set this bit.
10908                  * Otherwise the chip will issue cacheline transactions
10909                  * to streamable DMA memory with not all the byte
10910                  * enables turned on.  This is an error on several
10911                  * RISC PCI controllers, in particular sparc64.
10912                  *
10913                  * On 5703/5704 chips, this bit has been reassigned
10914                  * a different meaning.  In particular, it is used
10915                  * on those chips to enable a PCI-X workaround.
10916                  */
10917                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10918         }
10919
10920         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10921
10922 #if 0
10923         /* Unneeded, already done by tg3_get_invariants.  */
10924         tg3_switch_clocks(tp);
10925 #endif
10926
10927         ret = 0;
10928         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10929             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10930                 goto out;
10931
10932         /* It is best to perform DMA test with maximum write burst size
10933          * to expose the 5700/5701 write DMA bug.
10934          */
10935         saved_dma_rwctrl = tp->dma_rwctrl;
10936         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10937         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10938
10939         while (1) {
10940                 u32 *p = buf, i;
10941
10942                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10943                         p[i] = i;
10944
10945                 /* Send the buffer to the chip. */
10946                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10947                 if (ret) {
10948                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10949                         break;
10950                 }
10951
10952 #if 0
10953                 /* validate data reached card RAM correctly. */
10954                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10955                         u32 val;
10956                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10957                         if (le32_to_cpu(val) != p[i]) {
10958                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10959                                 /* ret = -ENODEV here? */
10960                         }
10961                         p[i] = 0;
10962                 }
10963 #endif
10964                 /* Now read it back. */
10965                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10966                 if (ret) {
10967                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10968
10969                         break;
10970                 }
10971
10972                 /* Verify it. */
10973                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10974                         if (p[i] == i)
10975                                 continue;
10976
10977                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10978                             DMA_RWCTRL_WRITE_BNDRY_16) {
10979                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10980                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10981                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10982                                 break;
10983                         } else {
10984                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10985                                 ret = -ENODEV;
10986                                 goto out;
10987                         }
10988                 }
10989
10990                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10991                         /* Success. */
10992                         ret = 0;
10993                         break;
10994                 }
10995         }
10996         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10997             DMA_RWCTRL_WRITE_BNDRY_16) {
10998                 static struct pci_device_id dma_wait_state_chipsets[] = {
10999                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11000                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11001                         { },
11002                 };
11003
11004                 /* DMA test passed without adjusting DMA boundary,
11005                  * now look for chipsets that are known to expose the
11006                  * DMA bug without failing the test.
11007                  */
11008                 if (pci_dev_present(dma_wait_state_chipsets)) {
11009                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11010                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11011                 }
11012                 else
11013                         /* Safe to use the calculated DMA boundary. */
11014                         tp->dma_rwctrl = saved_dma_rwctrl;
11015
11016                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11017         }
11018
11019 out:
11020         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11021 out_nofree:
11022         return ret;
11023 }
11024
11025 static void __devinit tg3_init_link_config(struct tg3 *tp)
11026 {
11027         tp->link_config.advertising =
11028                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11029                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11030                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11031                  ADVERTISED_Autoneg | ADVERTISED_MII);
11032         tp->link_config.speed = SPEED_INVALID;
11033         tp->link_config.duplex = DUPLEX_INVALID;
11034         tp->link_config.autoneg = AUTONEG_ENABLE;
11035         tp->link_config.active_speed = SPEED_INVALID;
11036         tp->link_config.active_duplex = DUPLEX_INVALID;
11037         tp->link_config.phy_is_low_power = 0;
11038         tp->link_config.orig_speed = SPEED_INVALID;
11039         tp->link_config.orig_duplex = DUPLEX_INVALID;
11040         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11041 }
11042
11043 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11044 {
11045         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11046                 tp->bufmgr_config.mbuf_read_dma_low_water =
11047                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11048                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11049                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11050                 tp->bufmgr_config.mbuf_high_water =
11051                         DEFAULT_MB_HIGH_WATER_5705;
11052
11053                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11054                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11055                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11056                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11057                 tp->bufmgr_config.mbuf_high_water_jumbo =
11058                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11059         } else {
11060                 tp->bufmgr_config.mbuf_read_dma_low_water =
11061                         DEFAULT_MB_RDMA_LOW_WATER;
11062                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11063                         DEFAULT_MB_MACRX_LOW_WATER;
11064                 tp->bufmgr_config.mbuf_high_water =
11065                         DEFAULT_MB_HIGH_WATER;
11066
11067                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11068                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11069                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11070                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11071                 tp->bufmgr_config.mbuf_high_water_jumbo =
11072                         DEFAULT_MB_HIGH_WATER_JUMBO;
11073         }
11074
11075         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11076         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11077 }
11078
11079 static char * __devinit tg3_phy_string(struct tg3 *tp)
11080 {
11081         switch (tp->phy_id & PHY_ID_MASK) {
11082         case PHY_ID_BCM5400:    return "5400";
11083         case PHY_ID_BCM5401:    return "5401";
11084         case PHY_ID_BCM5411:    return "5411";
11085         case PHY_ID_BCM5701:    return "5701";
11086         case PHY_ID_BCM5703:    return "5703";
11087         case PHY_ID_BCM5704:    return "5704";
11088         case PHY_ID_BCM5705:    return "5705";
11089         case PHY_ID_BCM5750:    return "5750";
11090         case PHY_ID_BCM5752:    return "5752";
11091         case PHY_ID_BCM5714:    return "5714";
11092         case PHY_ID_BCM5780:    return "5780";
11093         case PHY_ID_BCM5755:    return "5755";
11094         case PHY_ID_BCM5787:    return "5787";
11095         case PHY_ID_BCM8002:    return "8002/serdes";
11096         case 0:                 return "serdes";
11097         default:                return "unknown";
11098         };
11099 }
11100
11101 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11102 {
11103         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11104                 strcpy(str, "PCI Express");
11105                 return str;
11106         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11107                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11108
11109                 strcpy(str, "PCIX:");
11110
11111                 if ((clock_ctrl == 7) ||
11112                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11113                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11114                         strcat(str, "133MHz");
11115                 else if (clock_ctrl == 0)
11116                         strcat(str, "33MHz");
11117                 else if (clock_ctrl == 2)
11118                         strcat(str, "50MHz");
11119                 else if (clock_ctrl == 4)
11120                         strcat(str, "66MHz");
11121                 else if (clock_ctrl == 6)
11122                         strcat(str, "100MHz");
11123         } else {
11124                 strcpy(str, "PCI:");
11125                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11126                         strcat(str, "66MHz");
11127                 else
11128                         strcat(str, "33MHz");
11129         }
11130         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11131                 strcat(str, ":32-bit");
11132         else
11133                 strcat(str, ":64-bit");
11134         return str;
11135 }
11136
11137 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11138 {
11139         struct pci_dev *peer;
11140         unsigned int func, devnr = tp->pdev->devfn & ~7;
11141
11142         for (func = 0; func < 8; func++) {
11143                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11144                 if (peer && peer != tp->pdev)
11145                         break;
11146                 pci_dev_put(peer);
11147         }
11148         /* 5704 can be configured in single-port mode, set peer to
11149          * tp->pdev in that case.
11150          */
11151         if (!peer) {
11152                 peer = tp->pdev;
11153                 return peer;
11154         }
11155
11156         /*
11157          * We don't need to keep the refcount elevated; there's no way
11158          * to remove one half of this device without removing the other
11159          */
11160         pci_dev_put(peer);
11161
11162         return peer;
11163 }
11164
11165 static void __devinit tg3_init_coal(struct tg3 *tp)
11166 {
11167         struct ethtool_coalesce *ec = &tp->coal;
11168
11169         memset(ec, 0, sizeof(*ec));
11170         ec->cmd = ETHTOOL_GCOALESCE;
11171         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11172         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11173         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11174         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11175         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11176         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11177         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11178         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11179         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11180
11181         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11182                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11183                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11184                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11185                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11186                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11187         }
11188
11189         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11190                 ec->rx_coalesce_usecs_irq = 0;
11191                 ec->tx_coalesce_usecs_irq = 0;
11192                 ec->stats_block_coalesce_usecs = 0;
11193         }
11194 }
11195
11196 static int __devinit tg3_init_one(struct pci_dev *pdev,
11197                                   const struct pci_device_id *ent)
11198 {
11199         static int tg3_version_printed = 0;
11200         unsigned long tg3reg_base, tg3reg_len;
11201         struct net_device *dev;
11202         struct tg3 *tp;
11203         int i, err, pm_cap;
11204         char str[40];
11205         u64 dma_mask, persist_dma_mask;
11206
11207         if (tg3_version_printed++ == 0)
11208                 printk(KERN_INFO "%s", version);
11209
11210         err = pci_enable_device(pdev);
11211         if (err) {
11212                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11213                        "aborting.\n");
11214                 return err;
11215         }
11216
11217         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11218                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11219                        "base address, aborting.\n");
11220                 err = -ENODEV;
11221                 goto err_out_disable_pdev;
11222         }
11223
11224         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11225         if (err) {
11226                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11227                        "aborting.\n");
11228                 goto err_out_disable_pdev;
11229         }
11230
11231         pci_set_master(pdev);
11232
11233         /* Find power-management capability. */
11234         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11235         if (pm_cap == 0) {
11236                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11237                        "aborting.\n");
11238                 err = -EIO;
11239                 goto err_out_free_res;
11240         }
11241
11242         tg3reg_base = pci_resource_start(pdev, 0);
11243         tg3reg_len = pci_resource_len(pdev, 0);
11244
11245         dev = alloc_etherdev(sizeof(*tp));
11246         if (!dev) {
11247                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11248                 err = -ENOMEM;
11249                 goto err_out_free_res;
11250         }
11251
11252         SET_MODULE_OWNER(dev);
11253         SET_NETDEV_DEV(dev, &pdev->dev);
11254
11255         dev->features |= NETIF_F_LLTX;
11256 #if TG3_VLAN_TAG_USED
11257         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11258         dev->vlan_rx_register = tg3_vlan_rx_register;
11259         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11260 #endif
11261
11262         tp = netdev_priv(dev);
11263         tp->pdev = pdev;
11264         tp->dev = dev;
11265         tp->pm_cap = pm_cap;
11266         tp->mac_mode = TG3_DEF_MAC_MODE;
11267         tp->rx_mode = TG3_DEF_RX_MODE;
11268         tp->tx_mode = TG3_DEF_TX_MODE;
11269         tp->mi_mode = MAC_MI_MODE_BASE;
11270         if (tg3_debug > 0)
11271                 tp->msg_enable = tg3_debug;
11272         else
11273                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11274
11275         /* The word/byte swap controls here control register access byte
11276          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11277          * setting below.
11278          */
11279         tp->misc_host_ctrl =
11280                 MISC_HOST_CTRL_MASK_PCI_INT |
11281                 MISC_HOST_CTRL_WORD_SWAP |
11282                 MISC_HOST_CTRL_INDIR_ACCESS |
11283                 MISC_HOST_CTRL_PCISTATE_RW;
11284
11285         /* The NONFRM (non-frame) byte/word swap controls take effect
11286          * on descriptor entries, anything which isn't packet data.
11287          *
11288          * The StrongARM chips on the board (one for tx, one for rx)
11289          * are running in big-endian mode.
11290          */
11291         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11292                         GRC_MODE_WSWAP_NONFRM_DATA);
11293 #ifdef __BIG_ENDIAN
11294         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11295 #endif
11296         spin_lock_init(&tp->lock);
11297         spin_lock_init(&tp->tx_lock);
11298         spin_lock_init(&tp->indirect_lock);
11299         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11300
11301         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11302         if (tp->regs == 0UL) {
11303                 printk(KERN_ERR PFX "Cannot map device registers, "
11304                        "aborting.\n");
11305                 err = -ENOMEM;
11306                 goto err_out_free_dev;
11307         }
11308
11309         tg3_init_link_config(tp);
11310
11311         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11312         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11313         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11314
11315         dev->open = tg3_open;
11316         dev->stop = tg3_close;
11317         dev->get_stats = tg3_get_stats;
11318         dev->set_multicast_list = tg3_set_rx_mode;
11319         dev->set_mac_address = tg3_set_mac_addr;
11320         dev->do_ioctl = tg3_ioctl;
11321         dev->tx_timeout = tg3_tx_timeout;
11322         dev->poll = tg3_poll;
11323         dev->ethtool_ops = &tg3_ethtool_ops;
11324         dev->weight = 64;
11325         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11326         dev->change_mtu = tg3_change_mtu;
11327         dev->irq = pdev->irq;
11328 #ifdef CONFIG_NET_POLL_CONTROLLER
11329         dev->poll_controller = tg3_poll_controller;
11330 #endif
11331
11332         err = tg3_get_invariants(tp);
11333         if (err) {
11334                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11335                        "aborting.\n");
11336                 goto err_out_iounmap;
11337         }
11338
11339         /* The EPB bridge inside 5714, 5715, and 5780 and any
11340          * device behind the EPB cannot support DMA addresses > 40-bit.
11341          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11342          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11343          * do DMA address check in tg3_start_xmit().
11344          */
11345         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11346                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11347         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11348                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11349 #ifdef CONFIG_HIGHMEM
11350                 dma_mask = DMA_64BIT_MASK;
11351 #endif
11352         } else
11353                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11354
11355         /* Configure DMA attributes. */
11356         if (dma_mask > DMA_32BIT_MASK) {
11357                 err = pci_set_dma_mask(pdev, dma_mask);
11358                 if (!err) {
11359                         dev->features |= NETIF_F_HIGHDMA;
11360                         err = pci_set_consistent_dma_mask(pdev,
11361                                                           persist_dma_mask);
11362                         if (err < 0) {
11363                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11364                                        "DMA for consistent allocations\n");
11365                                 goto err_out_iounmap;
11366                         }
11367                 }
11368         }
11369         if (err || dma_mask == DMA_32BIT_MASK) {
11370                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11371                 if (err) {
11372                         printk(KERN_ERR PFX "No usable DMA configuration, "
11373                                "aborting.\n");
11374                         goto err_out_iounmap;
11375                 }
11376         }
11377
11378         tg3_init_bufmgr_config(tp);
11379
11380 #if TG3_TSO_SUPPORT != 0
11381         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11382                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11383         }
11384         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11385             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11386             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11387             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11388                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11389         } else {
11390                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11391         }
11392
11393         /* TSO is on by default on chips that support hardware TSO.
11394          * Firmware TSO on older chips gives lower performance, so it
11395          * is off by default, but can be enabled using ethtool.
11396          */
11397         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11398                 dev->features |= NETIF_F_TSO;
11399
11400 #endif
11401
11402         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11403             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11404             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11405                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11406                 tp->rx_pending = 63;
11407         }
11408
11409         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11410             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11411                 tp->pdev_peer = tg3_find_peer(tp);
11412
11413         err = tg3_get_device_address(tp);
11414         if (err) {
11415                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11416                        "aborting.\n");
11417                 goto err_out_iounmap;
11418         }
11419
11420         /*
11421          * Reset chip in case UNDI or EFI driver did not shutdown
11422          * DMA self test will enable WDMAC and we'll see (spurious)
11423          * pending DMA on the PCI bus at that point.
11424          */
11425         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11426             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11427                 pci_save_state(tp->pdev);
11428                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11429                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11430         }
11431
11432         err = tg3_test_dma(tp);
11433         if (err) {
11434                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11435                 goto err_out_iounmap;
11436         }
11437
11438         /* Tigon3 can do ipv4 only... and some chips have buggy
11439          * checksumming.
11440          */
11441         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11442                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11443                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11444                         dev->features |= NETIF_F_HW_CSUM;
11445                 else
11446                         dev->features |= NETIF_F_IP_CSUM;
11447                 dev->features |= NETIF_F_SG;
11448                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11449         } else
11450                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11451
11452         /* flow control autonegotiation is default behavior */
11453         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11454
11455         tg3_init_coal(tp);
11456
11457         /* Now that we have fully setup the chip, save away a snapshot
11458          * of the PCI config space.  We need to restore this after
11459          * GRC_MISC_CFG core clock resets and some resume events.
11460          */
11461         pci_save_state(tp->pdev);
11462
11463         err = register_netdev(dev);
11464         if (err) {
11465                 printk(KERN_ERR PFX "Cannot register net device, "
11466                        "aborting.\n");
11467                 goto err_out_iounmap;
11468         }
11469
11470         pci_set_drvdata(pdev, dev);
11471
11472         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11473                dev->name,
11474                tp->board_part_number,
11475                tp->pci_chip_rev_id,
11476                tg3_phy_string(tp),
11477                tg3_bus_string(tp, str),
11478                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11479
11480         for (i = 0; i < 6; i++)
11481                 printk("%2.2x%c", dev->dev_addr[i],
11482                        i == 5 ? '\n' : ':');
11483
11484         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11485                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11486                "TSOcap[%d] \n",
11487                dev->name,
11488                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11489                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11490                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11491                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11492                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11493                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11494                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11495         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11496                dev->name, tp->dma_rwctrl,
11497                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11498                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11499
11500         netif_carrier_off(tp->dev);
11501
11502         return 0;
11503
11504 err_out_iounmap:
11505         if (tp->regs) {
11506                 iounmap(tp->regs);
11507                 tp->regs = NULL;
11508         }
11509
11510 err_out_free_dev:
11511         free_netdev(dev);
11512
11513 err_out_free_res:
11514         pci_release_regions(pdev);
11515
11516 err_out_disable_pdev:
11517         pci_disable_device(pdev);
11518         pci_set_drvdata(pdev, NULL);
11519         return err;
11520 }
11521
11522 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11523 {
11524         struct net_device *dev = pci_get_drvdata(pdev);
11525
11526         if (dev) {
11527                 struct tg3 *tp = netdev_priv(dev);
11528
11529                 flush_scheduled_work();
11530                 unregister_netdev(dev);
11531                 if (tp->regs) {
11532                         iounmap(tp->regs);
11533                         tp->regs = NULL;
11534                 }
11535                 free_netdev(dev);
11536                 pci_release_regions(pdev);
11537                 pci_disable_device(pdev);
11538                 pci_set_drvdata(pdev, NULL);
11539         }
11540 }
11541
11542 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11543 {
11544         struct net_device *dev = pci_get_drvdata(pdev);
11545         struct tg3 *tp = netdev_priv(dev);
11546         int err;
11547
11548         if (!netif_running(dev))
11549                 return 0;
11550
11551         flush_scheduled_work();
11552         tg3_netif_stop(tp);
11553
11554         del_timer_sync(&tp->timer);
11555
11556         tg3_full_lock(tp, 1);
11557         tg3_disable_ints(tp);
11558         tg3_full_unlock(tp);
11559
11560         netif_device_detach(dev);
11561
11562         tg3_full_lock(tp, 0);
11563         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11564         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11565         tg3_full_unlock(tp);
11566
11567         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11568         if (err) {
11569                 tg3_full_lock(tp, 0);
11570
11571                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11572                 tg3_init_hw(tp);
11573
11574                 tp->timer.expires = jiffies + tp->timer_offset;
11575                 add_timer(&tp->timer);
11576
11577                 netif_device_attach(dev);
11578                 tg3_netif_start(tp);
11579
11580                 tg3_full_unlock(tp);
11581         }
11582
11583         return err;
11584 }
11585
11586 static int tg3_resume(struct pci_dev *pdev)
11587 {
11588         struct net_device *dev = pci_get_drvdata(pdev);
11589         struct tg3 *tp = netdev_priv(dev);
11590         int err;
11591
11592         if (!netif_running(dev))
11593                 return 0;
11594
11595         pci_restore_state(tp->pdev);
11596
11597         err = tg3_set_power_state(tp, PCI_D0);
11598         if (err)
11599                 return err;
11600
11601         netif_device_attach(dev);
11602
11603         tg3_full_lock(tp, 0);
11604
11605         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11606         tg3_init_hw(tp);
11607
11608         tp->timer.expires = jiffies + tp->timer_offset;
11609         add_timer(&tp->timer);
11610
11611         tg3_netif_start(tp);
11612
11613         tg3_full_unlock(tp);
11614
11615         return 0;
11616 }
11617
11618 static struct pci_driver tg3_driver = {
11619         .name           = DRV_MODULE_NAME,
11620         .id_table       = tg3_pci_tbl,
11621         .probe          = tg3_init_one,
11622         .remove         = __devexit_p(tg3_remove_one),
11623         .suspend        = tg3_suspend,
11624         .resume         = tg3_resume
11625 };
11626
11627 static int __init tg3_init(void)
11628 {
11629         return pci_module_init(&tg3_driver);
11630 }
11631
11632 static void __exit tg3_cleanup(void)
11633 {
11634         pci_unregister_driver(&tg3_driver);
11635 }
11636
11637 module_init(tg3_init);
11638 module_exit(tg3_cleanup);