]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
Merge master.kernel.org:/home/rmk/linux-2.6-serial
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.55"
73 #define DRV_MODULE_RELDATE      "Mar 27, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->write32 != tg3_write_indirect_reg32) {
501                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509
510                 /* Always leave this as zero. */
511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517 {
518         unsigned long flags;
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         if (tp->write32 != tg3_write_indirect_reg32) {
522                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 *val = tr32(TG3PCI_MEM_WIN_DATA);
524
525                 /* Always leave this as zero. */
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         } else {
528                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
529                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
530
531                 /* Always leave this as zero. */
532                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
533         }
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536
537 static void tg3_disable_ints(struct tg3 *tp)
538 {
539         tw32(TG3PCI_MISC_HOST_CTRL,
540              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
541         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
542 }
543
544 static inline void tg3_cond_int(struct tg3 *tp)
545 {
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             (tp->hw_status->status & SD_STATUS_UPDATED))
548                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
549 }
550
551 static void tg3_enable_ints(struct tg3 *tp)
552 {
553         tp->irq_sync = 0;
554         wmb();
555
556         tw32(TG3PCI_MISC_HOST_CTRL,
557              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
558         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
559                        (tp->last_tag << 24));
560         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
562                                (tp->last_tag << 24));
563         tg3_cond_int(tp);
564 }
565
566 static inline unsigned int tg3_has_work(struct tg3 *tp)
567 {
568         struct tg3_hw_status *sblk = tp->hw_status;
569         unsigned int work_exists = 0;
570
571         /* check for phy events */
572         if (!(tp->tg3_flags &
573               (TG3_FLAG_USE_LINKCHG_REG |
574                TG3_FLAG_POLL_SERDES))) {
575                 if (sblk->status & SD_STATUS_LINK_CHG)
576                         work_exists = 1;
577         }
578         /* check for RX/TX work to do */
579         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
580             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
581                 work_exists = 1;
582
583         return work_exists;
584 }
585
586 /* tg3_restart_ints
587  *  similar to tg3_enable_ints, but it accurately determines whether there
588  *  is new work pending and can return without flushing the PIO write
589  *  which reenables interrupts 
590  */
591 static void tg3_restart_ints(struct tg3 *tp)
592 {
593         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
594                      tp->last_tag << 24);
595         mmiowb();
596
597         /* When doing tagged status, this work check is unnecessary.
598          * The last_tag we write above tells the chip which piece of
599          * work we've completed.
600          */
601         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
602             tg3_has_work(tp))
603                 tw32(HOSTCC_MODE, tp->coalesce_mode |
604                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
605 }
606
607 static inline void tg3_netif_stop(struct tg3 *tp)
608 {
609         tp->dev->trans_start = jiffies; /* prevent tx timeout */
610         netif_poll_disable(tp->dev);
611         netif_tx_disable(tp->dev);
612 }
613
614 static inline void tg3_netif_start(struct tg3 *tp)
615 {
616         netif_wake_queue(tp->dev);
617         /* NOTE: unconditional netif_wake_queue is only appropriate
618          * so long as all callers are assured to have free tx slots
619          * (such as after tg3_init_hw)
620          */
621         netif_poll_enable(tp->dev);
622         tp->hw_status->status |= SD_STATUS_UPDATED;
623         tg3_enable_ints(tp);
624 }
625
626 static void tg3_switch_clocks(struct tg3 *tp)
627 {
628         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
629         u32 orig_clock_ctrl;
630
631         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
632                 return;
633
634         orig_clock_ctrl = clock_ctrl;
635         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
636                        CLOCK_CTRL_CLKRUN_OENABLE |
637                        0x1f);
638         tp->pci_clock_ctrl = clock_ctrl;
639
640         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
641                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
642                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
643                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
644                 }
645         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
646                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
647                             clock_ctrl |
648                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
649                             40);
650                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
651                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
652                             40);
653         }
654         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
655 }
656
657 #define PHY_BUSY_LOOPS  5000
658
659 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
660 {
661         u32 frame_val;
662         unsigned int loops;
663         int ret;
664
665         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
666                 tw32_f(MAC_MI_MODE,
667                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
668                 udelay(80);
669         }
670
671         *val = 0x0;
672
673         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
674                       MI_COM_PHY_ADDR_MASK);
675         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
676                       MI_COM_REG_ADDR_MASK);
677         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
678         
679         tw32_f(MAC_MI_COM, frame_val);
680
681         loops = PHY_BUSY_LOOPS;
682         while (loops != 0) {
683                 udelay(10);
684                 frame_val = tr32(MAC_MI_COM);
685
686                 if ((frame_val & MI_COM_BUSY) == 0) {
687                         udelay(5);
688                         frame_val = tr32(MAC_MI_COM);
689                         break;
690                 }
691                 loops -= 1;
692         }
693
694         ret = -EBUSY;
695         if (loops != 0) {
696                 *val = frame_val & MI_COM_DATA_MASK;
697                 ret = 0;
698         }
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE, tp->mi_mode);
702                 udelay(80);
703         }
704
705         return ret;
706 }
707
708 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721                       MI_COM_PHY_ADDR_MASK);
722         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723                       MI_COM_REG_ADDR_MASK);
724         frame_val |= (val & MI_COM_DATA_MASK);
725         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
726         
727         tw32_f(MAC_MI_COM, frame_val);
728
729         loops = PHY_BUSY_LOOPS;
730         while (loops != 0) {
731                 udelay(10);
732                 frame_val = tr32(MAC_MI_COM);
733                 if ((frame_val & MI_COM_BUSY) == 0) {
734                         udelay(5);
735                         frame_val = tr32(MAC_MI_COM);
736                         break;
737                 }
738                 loops -= 1;
739         }
740
741         ret = -EBUSY;
742         if (loops != 0)
743                 ret = 0;
744
745         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
746                 tw32_f(MAC_MI_MODE, tp->mi_mode);
747                 udelay(80);
748         }
749
750         return ret;
751 }
752
753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
754 {
755         u32 val;
756
757         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
758                 return;
759
760         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
761             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
762                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
763                              (val | (1 << 15) | (1 << 4)));
764 }
765
766 static int tg3_bmcr_reset(struct tg3 *tp)
767 {
768         u32 phy_control;
769         int limit, err;
770
771         /* OK, reset it, and poll the BMCR_RESET bit until it
772          * clears or we time out.
773          */
774         phy_control = BMCR_RESET;
775         err = tg3_writephy(tp, MII_BMCR, phy_control);
776         if (err != 0)
777                 return -EBUSY;
778
779         limit = 5000;
780         while (limit--) {
781                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
782                 if (err != 0)
783                         return -EBUSY;
784
785                 if ((phy_control & BMCR_RESET) == 0) {
786                         udelay(40);
787                         break;
788                 }
789                 udelay(10);
790         }
791         if (limit <= 0)
792                 return -EBUSY;
793
794         return 0;
795 }
796
797 static int tg3_wait_macro_done(struct tg3 *tp)
798 {
799         int limit = 100;
800
801         while (limit--) {
802                 u32 tmp32;
803
804                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
805                         if ((tmp32 & 0x1000) == 0)
806                                 break;
807                 }
808         }
809         if (limit <= 0)
810                 return -EBUSY;
811
812         return 0;
813 }
814
815 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
816 {
817         static const u32 test_pat[4][6] = {
818         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
819         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
820         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
821         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
822         };
823         int chan;
824
825         for (chan = 0; chan < 4; chan++) {
826                 int i;
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0002);
831
832                 for (i = 0; i < 6; i++)
833                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
834                                      test_pat[chan][i]);
835
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
843                              (chan * 0x2000) | 0x0200);
844                 tg3_writephy(tp, 0x16, 0x0082);
845                 if (tg3_wait_macro_done(tp)) {
846                         *resetp = 1;
847                         return -EBUSY;
848                 }
849
850                 tg3_writephy(tp, 0x16, 0x0802);
851                 if (tg3_wait_macro_done(tp)) {
852                         *resetp = 1;
853                         return -EBUSY;
854                 }
855
856                 for (i = 0; i < 6; i += 2) {
857                         u32 low, high;
858
859                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
860                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
861                             tg3_wait_macro_done(tp)) {
862                                 *resetp = 1;
863                                 return -EBUSY;
864                         }
865                         low &= 0x7fff;
866                         high &= 0x000f;
867                         if (low != test_pat[chan][i] ||
868                             high != test_pat[chan][i+1]) {
869                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
870                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
872
873                                 return -EBUSY;
874                         }
875                 }
876         }
877
878         return 0;
879 }
880
881 static int tg3_phy_reset_chanpat(struct tg3 *tp)
882 {
883         int chan;
884
885         for (chan = 0; chan < 4; chan++) {
886                 int i;
887
888                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
889                              (chan * 0x2000) | 0x0200);
890                 tg3_writephy(tp, 0x16, 0x0002);
891                 for (i = 0; i < 6; i++)
892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
893                 tg3_writephy(tp, 0x16, 0x0202);
894                 if (tg3_wait_macro_done(tp))
895                         return -EBUSY;
896         }
897
898         return 0;
899 }
900
901 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
902 {
903         u32 reg32, phy9_orig;
904         int retries, do_phy_reset, err;
905
906         retries = 10;
907         do_phy_reset = 1;
908         do {
909                 if (do_phy_reset) {
910                         err = tg3_bmcr_reset(tp);
911                         if (err)
912                                 return err;
913                         do_phy_reset = 0;
914                 }
915
916                 /* Disable transmitter and interrupt.  */
917                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
918                         continue;
919
920                 reg32 |= 0x3000;
921                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
922
923                 /* Set full-duplex, 1000 mbps.  */
924                 tg3_writephy(tp, MII_BMCR,
925                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
926
927                 /* Set to master mode.  */
928                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
929                         continue;
930
931                 tg3_writephy(tp, MII_TG3_CTRL,
932                              (MII_TG3_CTRL_AS_MASTER |
933                               MII_TG3_CTRL_ENABLE_AS_MASTER));
934
935                 /* Enable SM_DSP_CLOCK and 6dB.  */
936                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
937
938                 /* Block the PHY control access.  */
939                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
940                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
941
942                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
943                 if (!err)
944                         break;
945         } while (--retries);
946
947         err = tg3_phy_reset_chanpat(tp);
948         if (err)
949                 return err;
950
951         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
953
954         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
955         tg3_writephy(tp, 0x16, 0x0000);
956
957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
959                 /* Set Extended packet length bit for jumbo frames */
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
961         }
962         else {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
964         }
965
966         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
967
968         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
969                 reg32 &= ~0x3000;
970                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
971         } else if (!err)
972                 err = -EBUSY;
973
974         return err;
975 }
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
993                 err = tg3_phy_reset_5703_4_5(tp);
994                 if (err)
995                         return err;
996                 goto out;
997         }
998
999         err = tg3_bmcr_reset(tp);
1000         if (err)
1001                 return err;
1002
1003 out:
1004         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1013                 tg3_writephy(tp, 0x1c, 0x8d68);
1014                 tg3_writephy(tp, 0x1c, 0x8d68);
1015         }
1016         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1018                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1019                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1020                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1021                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1022                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1023                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025         }
1026         /* Set Extended packet length bit (bit 14) on all chips that */
1027         /* support jumbo frames */
1028         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1029                 /* Cannot do read-modify-write on 5401 */
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1031         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 /* Set bit 14 with read-modify-write to preserve other bits */
1035                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1036                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1037                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1038         }
1039
1040         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1041          * jumbo frames transmission.
1042          */
1043         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1044                 u32 phy_reg;
1045
1046                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1047                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1048                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1049         }
1050
1051         tg3_phy_set_wirespeed(tp);
1052         return 0;
1053 }
1054
1055 static void tg3_frob_aux_power(struct tg3 *tp)
1056 {
1057         struct tg3 *tp_peer = tp;
1058
1059         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1060                 return;
1061
1062         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1063             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1064                 struct net_device *dev_peer;
1065
1066                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1067                 /* remove_one() may have been run on the peer. */
1068                 if (!dev_peer)
1069                         tp_peer = tp;
1070                 else
1071                         tp_peer = netdev_priv(dev_peer);
1072         }
1073
1074         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1075             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1076             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1077             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1080                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1081                                     (GRC_LCLCTRL_GPIO_OE0 |
1082                                      GRC_LCLCTRL_GPIO_OE1 |
1083                                      GRC_LCLCTRL_GPIO_OE2 |
1084                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1085                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1086                                     100);
1087                 } else {
1088                         u32 no_gpio2;
1089                         u32 grc_local_ctrl = 0;
1090
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         /* Workaround to prevent overdrawing Amps. */
1096                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1097                             ASIC_REV_5714) {
1098                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1099                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                             grc_local_ctrl, 100);
1101                         }
1102
1103                         /* On 5753 and variants, GPIO2 cannot be used. */
1104                         no_gpio2 = tp->nic_sram_data_cfg &
1105                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1106
1107                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1108                                          GRC_LCLCTRL_GPIO_OE1 |
1109                                          GRC_LCLCTRL_GPIO_OE2 |
1110                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1111                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1112                         if (no_gpio2) {
1113                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1114                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1115                         }
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                                     grc_local_ctrl, 100);
1118
1119                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1120
1121                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122                                                     grc_local_ctrl, 100);
1123
1124                         if (!no_gpio2) {
1125                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1126                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                             grc_local_ctrl, 100);
1128                         }
1129                 }
1130         } else {
1131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1132                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1133                         if (tp_peer != tp &&
1134                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1135                                 return;
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140
1141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                     GRC_LCLCTRL_GPIO_OE1, 100);
1143
1144                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145                                     (GRC_LCLCTRL_GPIO_OE1 |
1146                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1147                 }
1148         }
1149 }
1150
1151 static int tg3_setup_phy(struct tg3 *, int);
1152
1153 #define RESET_KIND_SHUTDOWN     0
1154 #define RESET_KIND_INIT         1
1155 #define RESET_KIND_SUSPEND      2
1156
1157 static void tg3_write_sig_post_reset(struct tg3 *, int);
1158 static int tg3_halt_cpu(struct tg3 *, u32);
1159 static int tg3_nvram_lock(struct tg3 *);
1160 static void tg3_nvram_unlock(struct tg3 *);
1161
1162 static void tg3_power_down_phy(struct tg3 *tp)
1163 {
1164         /* The PHY should not be powered down on some chips because
1165          * of bugs.
1166          */
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1170              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1171                 return;
1172         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1173 }
1174
1175 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1176 {
1177         u32 misc_host_ctrl;
1178         u16 power_control, power_caps;
1179         int pm = tp->pm_cap;
1180
1181         /* Make sure register accesses (indirect or otherwise)
1182          * will function correctly.
1183          */
1184         pci_write_config_dword(tp->pdev,
1185                                TG3PCI_MISC_HOST_CTRL,
1186                                tp->misc_host_ctrl);
1187
1188         pci_read_config_word(tp->pdev,
1189                              pm + PCI_PM_CTRL,
1190                              &power_control);
1191         power_control |= PCI_PM_CTRL_PME_STATUS;
1192         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1193         switch (state) {
1194         case PCI_D0:
1195                 power_control |= 0;
1196                 pci_write_config_word(tp->pdev,
1197                                       pm + PCI_PM_CTRL,
1198                                       power_control);
1199                 udelay(100);    /* Delay after power state change */
1200
1201                 /* Switch out of Vaux if it is not a LOM */
1202                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1204
1205                 return 0;
1206
1207         case PCI_D1:
1208                 power_control |= 1;
1209                 break;
1210
1211         case PCI_D2:
1212                 power_control |= 2;
1213                 break;
1214
1215         case PCI_D3hot:
1216                 power_control |= 3;
1217                 break;
1218
1219         default:
1220                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1221                        "requested.\n",
1222                        tp->dev->name, state);
1223                 return -EINVAL;
1224         };
1225
1226         power_control |= PCI_PM_CTRL_PME_ENABLE;
1227
1228         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1229         tw32(TG3PCI_MISC_HOST_CTRL,
1230              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1231
1232         if (tp->link_config.phy_is_low_power == 0) {
1233                 tp->link_config.phy_is_low_power = 1;
1234                 tp->link_config.orig_speed = tp->link_config.speed;
1235                 tp->link_config.orig_duplex = tp->link_config.duplex;
1236                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1237         }
1238
1239         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1240                 tp->link_config.speed = SPEED_10;
1241                 tp->link_config.duplex = DUPLEX_HALF;
1242                 tp->link_config.autoneg = AUTONEG_ENABLE;
1243                 tg3_setup_phy(tp, 0);
1244         }
1245
1246         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1247                 int i;
1248                 u32 val;
1249
1250                 for (i = 0; i < 200; i++) {
1251                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1252                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1253                                 break;
1254                         msleep(1);
1255                 }
1256         }
1257         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1258                                              WOL_DRV_STATE_SHUTDOWN |
1259                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1260
1261         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1262
1263         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1264                 u32 mac_mode;
1265
1266                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1267                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1268                         udelay(40);
1269
1270                         mac_mode = MAC_MODE_PORT_MODE_MII;
1271
1272                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1273                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1274                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1275                 } else {
1276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1277                 }
1278
1279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1281
1282                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1283                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1284                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1285
1286                 tw32_f(MAC_MODE, mac_mode);
1287                 udelay(100);
1288
1289                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1290                 udelay(10);
1291         }
1292
1293         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1294             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1296                 u32 base_val;
1297
1298                 base_val = tp->pci_clock_ctrl;
1299                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1300                              CLOCK_CTRL_TXCLK_DISABLE);
1301
1302                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1303                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1304         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1305                 /* do nothing */
1306         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1307                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1308                 u32 newbits1, newbits2;
1309
1310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1311                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1312                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1313                                     CLOCK_CTRL_TXCLK_DISABLE |
1314                                     CLOCK_CTRL_ALTCLK);
1315                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1316                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1317                         newbits1 = CLOCK_CTRL_625_CORE;
1318                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1319                 } else {
1320                         newbits1 = CLOCK_CTRL_ALTCLK;
1321                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1322                 }
1323
1324                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1325                             40);
1326
1327                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1328                             40);
1329
1330                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1331                         u32 newbits3;
1332
1333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1335                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1336                                             CLOCK_CTRL_TXCLK_DISABLE |
1337                                             CLOCK_CTRL_44MHZ_CORE);
1338                         } else {
1339                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1340                         }
1341
1342                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1343                                     tp->pci_clock_ctrl | newbits3, 40);
1344                 }
1345         }
1346
1347         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1348             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1349                 /* Turn off the PHY */
1350                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1351                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1352                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1353                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1354                         tg3_power_down_phy(tp);
1355                 }
1356         }
1357
1358         tg3_frob_aux_power(tp);
1359
1360         /* Workaround for unstable PLL clock */
1361         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1362             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1363                 u32 val = tr32(0x7d00);
1364
1365                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1366                 tw32(0x7d00, val);
1367                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1368                         int err;
1369
1370                         err = tg3_nvram_lock(tp);
1371                         tg3_halt_cpu(tp, RX_CPU_BASE);
1372                         if (!err)
1373                                 tg3_nvram_unlock(tp);
1374                 }
1375         }
1376
1377         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1378
1379         /* Finally, set the new power state. */
1380         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1381         udelay(100);    /* Delay after power state change */
1382
1383         return 0;
1384 }
1385
1386 static void tg3_link_report(struct tg3 *tp)
1387 {
1388         if (!netif_carrier_ok(tp->dev)) {
1389                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1390         } else {
1391                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1392                        tp->dev->name,
1393                        (tp->link_config.active_speed == SPEED_1000 ?
1394                         1000 :
1395                         (tp->link_config.active_speed == SPEED_100 ?
1396                          100 : 10)),
1397                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1398                         "full" : "half"));
1399
1400                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1401                        "%s for RX.\n",
1402                        tp->dev->name,
1403                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1404                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1405         }
1406 }
1407
1408 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1409 {
1410         u32 new_tg3_flags = 0;
1411         u32 old_rx_mode = tp->rx_mode;
1412         u32 old_tx_mode = tp->tx_mode;
1413
1414         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1415
1416                 /* Convert 1000BaseX flow control bits to 1000BaseT
1417                  * bits before resolving flow control.
1418                  */
1419                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1420                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1421                                        ADVERTISE_PAUSE_ASYM);
1422                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1423
1424                         if (local_adv & ADVERTISE_1000XPAUSE)
1425                                 local_adv |= ADVERTISE_PAUSE_CAP;
1426                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1427                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1428                         if (remote_adv & LPA_1000XPAUSE)
1429                                 remote_adv |= LPA_PAUSE_CAP;
1430                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1431                                 remote_adv |= LPA_PAUSE_ASYM;
1432                 }
1433
1434                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1435                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1436                                 if (remote_adv & LPA_PAUSE_CAP)
1437                                         new_tg3_flags |=
1438                                                 (TG3_FLAG_RX_PAUSE |
1439                                                 TG3_FLAG_TX_PAUSE);
1440                                 else if (remote_adv & LPA_PAUSE_ASYM)
1441                                         new_tg3_flags |=
1442                                                 (TG3_FLAG_RX_PAUSE);
1443                         } else {
1444                                 if (remote_adv & LPA_PAUSE_CAP)
1445                                         new_tg3_flags |=
1446                                                 (TG3_FLAG_RX_PAUSE |
1447                                                 TG3_FLAG_TX_PAUSE);
1448                         }
1449                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                         if ((remote_adv & LPA_PAUSE_CAP) &&
1451                         (remote_adv & LPA_PAUSE_ASYM))
1452                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1453                 }
1454
1455                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1456                 tp->tg3_flags |= new_tg3_flags;
1457         } else {
1458                 new_tg3_flags = tp->tg3_flags;
1459         }
1460
1461         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1462                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1463         else
1464                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1465
1466         if (old_rx_mode != tp->rx_mode) {
1467                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1468         }
1469         
1470         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1471                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_tx_mode != tp->tx_mode) {
1476                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1477         }
1478 }
1479
1480 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1481 {
1482         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1483         case MII_TG3_AUX_STAT_10HALF:
1484                 *speed = SPEED_10;
1485                 *duplex = DUPLEX_HALF;
1486                 break;
1487
1488         case MII_TG3_AUX_STAT_10FULL:
1489                 *speed = SPEED_10;
1490                 *duplex = DUPLEX_FULL;
1491                 break;
1492
1493         case MII_TG3_AUX_STAT_100HALF:
1494                 *speed = SPEED_100;
1495                 *duplex = DUPLEX_HALF;
1496                 break;
1497
1498         case MII_TG3_AUX_STAT_100FULL:
1499                 *speed = SPEED_100;
1500                 *duplex = DUPLEX_FULL;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_1000HALF:
1504                 *speed = SPEED_1000;
1505                 *duplex = DUPLEX_HALF;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_1000FULL:
1509                 *speed = SPEED_1000;
1510                 *duplex = DUPLEX_FULL;
1511                 break;
1512
1513         default:
1514                 *speed = SPEED_INVALID;
1515                 *duplex = DUPLEX_INVALID;
1516                 break;
1517         };
1518 }
1519
1520 static void tg3_phy_copper_begin(struct tg3 *tp)
1521 {
1522         u32 new_adv;
1523         int i;
1524
1525         if (tp->link_config.phy_is_low_power) {
1526                 /* Entering low power mode.  Disable gigabit and
1527                  * 100baseT advertisements.
1528                  */
1529                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1530
1531                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1532                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1533                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1534                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1535
1536                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537         } else if (tp->link_config.speed == SPEED_INVALID) {
1538                 tp->link_config.advertising =
1539                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1540                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1541                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1542                          ADVERTISED_Autoneg | ADVERTISED_MII);
1543
1544                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1545                         tp->link_config.advertising &=
1546                                 ~(ADVERTISED_1000baseT_Half |
1547                                   ADVERTISED_1000baseT_Full);
1548
1549                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1550                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1551                         new_adv |= ADVERTISE_10HALF;
1552                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1553                         new_adv |= ADVERTISE_10FULL;
1554                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1555                         new_adv |= ADVERTISE_100HALF;
1556                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1557                         new_adv |= ADVERTISE_100FULL;
1558                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1559
1560                 if (tp->link_config.advertising &
1561                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1562                         new_adv = 0;
1563                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1564                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1565                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1566                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1567                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1568                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1569                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1570                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1571                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1572                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1573                 } else {
1574                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1575                 }
1576         } else {
1577                 /* Asking for a specific link mode. */
1578                 if (tp->link_config.speed == SPEED_1000) {
1579                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581
1582                         if (tp->link_config.duplex == DUPLEX_FULL)
1583                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1584                         else
1585                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1586                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1587                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1588                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1589                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1590                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1591                 } else {
1592                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1593
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         if (tp->link_config.speed == SPEED_100) {
1596                                 if (tp->link_config.duplex == DUPLEX_FULL)
1597                                         new_adv |= ADVERTISE_100FULL;
1598                                 else
1599                                         new_adv |= ADVERTISE_100HALF;
1600                         } else {
1601                                 if (tp->link_config.duplex == DUPLEX_FULL)
1602                                         new_adv |= ADVERTISE_10FULL;
1603                                 else
1604                                         new_adv |= ADVERTISE_10HALF;
1605                         }
1606                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1607                 }
1608         }
1609
1610         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1611             tp->link_config.speed != SPEED_INVALID) {
1612                 u32 bmcr, orig_bmcr;
1613
1614                 tp->link_config.active_speed = tp->link_config.speed;
1615                 tp->link_config.active_duplex = tp->link_config.duplex;
1616
1617                 bmcr = 0;
1618                 switch (tp->link_config.speed) {
1619                 default:
1620                 case SPEED_10:
1621                         break;
1622
1623                 case SPEED_100:
1624                         bmcr |= BMCR_SPEED100;
1625                         break;
1626
1627                 case SPEED_1000:
1628                         bmcr |= TG3_BMCR_SPEED1000;
1629                         break;
1630                 };
1631
1632                 if (tp->link_config.duplex == DUPLEX_FULL)
1633                         bmcr |= BMCR_FULLDPLX;
1634
1635                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1636                     (bmcr != orig_bmcr)) {
1637                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1638                         for (i = 0; i < 1500; i++) {
1639                                 u32 tmp;
1640
1641                                 udelay(10);
1642                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1643                                     tg3_readphy(tp, MII_BMSR, &tmp))
1644                                         continue;
1645                                 if (!(tmp & BMSR_LSTATUS)) {
1646                                         udelay(40);
1647                                         break;
1648                                 }
1649                         }
1650                         tg3_writephy(tp, MII_BMCR, bmcr);
1651                         udelay(40);
1652                 }
1653         } else {
1654                 tg3_writephy(tp, MII_BMCR,
1655                              BMCR_ANENABLE | BMCR_ANRESTART);
1656         }
1657 }
1658
1659 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1660 {
1661         int err;
1662
1663         /* Turn off tap power management. */
1664         /* Set Extended packet length bit */
1665         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1666
1667         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1668         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1669
1670         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1671         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1672
1673         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1674         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1675
1676         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1677         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1678
1679         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1680         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1681
1682         udelay(40);
1683
1684         return err;
1685 }
1686
1687 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1688 {
1689         u32 adv_reg, all_mask;
1690
1691         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1692                 return 0;
1693
1694         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1695                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1696         if ((adv_reg & all_mask) != all_mask)
1697                 return 0;
1698         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1699                 u32 tg3_ctrl;
1700
1701                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1702                         return 0;
1703
1704                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1705                             MII_TG3_CTRL_ADV_1000_FULL);
1706                 if ((tg3_ctrl & all_mask) != all_mask)
1707                         return 0;
1708         }
1709         return 1;
1710 }
1711
1712 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1713 {
1714         int current_link_up;
1715         u32 bmsr, dummy;
1716         u16 current_speed;
1717         u8 current_duplex;
1718         int i, err;
1719
1720         tw32(MAC_EVENT, 0);
1721
1722         tw32_f(MAC_STATUS,
1723              (MAC_STATUS_SYNC_CHANGED |
1724               MAC_STATUS_CFG_CHANGED |
1725               MAC_STATUS_MI_COMPLETION |
1726               MAC_STATUS_LNKSTATE_CHANGED));
1727         udelay(40);
1728
1729         tp->mi_mode = MAC_MI_MODE_BASE;
1730         tw32_f(MAC_MI_MODE, tp->mi_mode);
1731         udelay(80);
1732
1733         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1734
1735         /* Some third-party PHYs need to be reset on link going
1736          * down.
1737          */
1738         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1739              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1740              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1741             netif_carrier_ok(tp->dev)) {
1742                 tg3_readphy(tp, MII_BMSR, &bmsr);
1743                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1744                     !(bmsr & BMSR_LSTATUS))
1745                         force_reset = 1;
1746         }
1747         if (force_reset)
1748                 tg3_phy_reset(tp);
1749
1750         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1751                 tg3_readphy(tp, MII_BMSR, &bmsr);
1752                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1753                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1754                         bmsr = 0;
1755
1756                 if (!(bmsr & BMSR_LSTATUS)) {
1757                         err = tg3_init_5401phy_dsp(tp);
1758                         if (err)
1759                                 return err;
1760
1761                         tg3_readphy(tp, MII_BMSR, &bmsr);
1762                         for (i = 0; i < 1000; i++) {
1763                                 udelay(10);
1764                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1765                                     (bmsr & BMSR_LSTATUS)) {
1766                                         udelay(40);
1767                                         break;
1768                                 }
1769                         }
1770
1771                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1772                             !(bmsr & BMSR_LSTATUS) &&
1773                             tp->link_config.active_speed == SPEED_1000) {
1774                                 err = tg3_phy_reset(tp);
1775                                 if (!err)
1776                                         err = tg3_init_5401phy_dsp(tp);
1777                                 if (err)
1778                                         return err;
1779                         }
1780                 }
1781         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1782                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1783                 /* 5701 {A0,B0} CRC bug workaround */
1784                 tg3_writephy(tp, 0x15, 0x0a75);
1785                 tg3_writephy(tp, 0x1c, 0x8c68);
1786                 tg3_writephy(tp, 0x1c, 0x8d68);
1787                 tg3_writephy(tp, 0x1c, 0x8c68);
1788         }
1789
1790         /* Clear pending interrupts... */
1791         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1792         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1793
1794         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1795                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1796         else
1797                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1798
1799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1801                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1802                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1803                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1804                 else
1805                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1806         }
1807
1808         current_link_up = 0;
1809         current_speed = SPEED_INVALID;
1810         current_duplex = DUPLEX_INVALID;
1811
1812         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1813                 u32 val;
1814
1815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1816                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1817                 if (!(val & (1 << 10))) {
1818                         val |= (1 << 10);
1819                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1820                         goto relink;
1821                 }
1822         }
1823
1824         bmsr = 0;
1825         for (i = 0; i < 100; i++) {
1826                 tg3_readphy(tp, MII_BMSR, &bmsr);
1827                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1828                     (bmsr & BMSR_LSTATUS))
1829                         break;
1830                 udelay(40);
1831         }
1832
1833         if (bmsr & BMSR_LSTATUS) {
1834                 u32 aux_stat, bmcr;
1835
1836                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1837                 for (i = 0; i < 2000; i++) {
1838                         udelay(10);
1839                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1840                             aux_stat)
1841                                 break;
1842                 }
1843
1844                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1845                                              &current_speed,
1846                                              &current_duplex);
1847
1848                 bmcr = 0;
1849                 for (i = 0; i < 200; i++) {
1850                         tg3_readphy(tp, MII_BMCR, &bmcr);
1851                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1852                                 continue;
1853                         if (bmcr && bmcr != 0x7fff)
1854                                 break;
1855                         udelay(10);
1856                 }
1857
1858                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1859                         if (bmcr & BMCR_ANENABLE) {
1860                                 current_link_up = 1;
1861
1862                                 /* Force autoneg restart if we are exiting
1863                                  * low power mode.
1864                                  */
1865                                 if (!tg3_copper_is_advertising_all(tp))
1866                                         current_link_up = 0;
1867                         } else {
1868                                 current_link_up = 0;
1869                         }
1870                 } else {
1871                         if (!(bmcr & BMCR_ANENABLE) &&
1872                             tp->link_config.speed == current_speed &&
1873                             tp->link_config.duplex == current_duplex) {
1874                                 current_link_up = 1;
1875                         } else {
1876                                 current_link_up = 0;
1877                         }
1878                 }
1879
1880                 tp->link_config.active_speed = current_speed;
1881                 tp->link_config.active_duplex = current_duplex;
1882         }
1883
1884         if (current_link_up == 1 &&
1885             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1886             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1887                 u32 local_adv, remote_adv;
1888
1889                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1890                         local_adv = 0;
1891                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1892
1893                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1894                         remote_adv = 0;
1895
1896                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1897
1898                 /* If we are not advertising full pause capability,
1899                  * something is wrong.  Bring the link down and reconfigure.
1900                  */
1901                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1902                         current_link_up = 0;
1903                 } else {
1904                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1905                 }
1906         }
1907 relink:
1908         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1909                 u32 tmp;
1910
1911                 tg3_phy_copper_begin(tp);
1912
1913                 tg3_readphy(tp, MII_BMSR, &tmp);
1914                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1915                     (tmp & BMSR_LSTATUS))
1916                         current_link_up = 1;
1917         }
1918
1919         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1920         if (current_link_up == 1) {
1921                 if (tp->link_config.active_speed == SPEED_100 ||
1922                     tp->link_config.active_speed == SPEED_10)
1923                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1924                 else
1925                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1926         } else
1927                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1928
1929         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1930         if (tp->link_config.active_duplex == DUPLEX_HALF)
1931                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1932
1933         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1935                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1936                     (current_link_up == 1 &&
1937                      tp->link_config.active_speed == SPEED_10))
1938                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1939         } else {
1940                 if (current_link_up == 1)
1941                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1942         }
1943
1944         /* ??? Without this setting Netgear GA302T PHY does not
1945          * ??? send/receive packets...
1946          */
1947         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1948             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1949                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1950                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1951                 udelay(80);
1952         }
1953
1954         tw32_f(MAC_MODE, tp->mac_mode);
1955         udelay(40);
1956
1957         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1958                 /* Polled via timer. */
1959                 tw32_f(MAC_EVENT, 0);
1960         } else {
1961                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1962         }
1963         udelay(40);
1964
1965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1966             current_link_up == 1 &&
1967             tp->link_config.active_speed == SPEED_1000 &&
1968             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1969              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1970                 udelay(120);
1971                 tw32_f(MAC_STATUS,
1972                      (MAC_STATUS_SYNC_CHANGED |
1973                       MAC_STATUS_CFG_CHANGED));
1974                 udelay(40);
1975                 tg3_write_mem(tp,
1976                               NIC_SRAM_FIRMWARE_MBOX,
1977                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1978         }
1979
1980         if (current_link_up != netif_carrier_ok(tp->dev)) {
1981                 if (current_link_up)
1982                         netif_carrier_on(tp->dev);
1983                 else
1984                         netif_carrier_off(tp->dev);
1985                 tg3_link_report(tp);
1986         }
1987
1988         return 0;
1989 }
1990
1991 struct tg3_fiber_aneginfo {
1992         int state;
1993 #define ANEG_STATE_UNKNOWN              0
1994 #define ANEG_STATE_AN_ENABLE            1
1995 #define ANEG_STATE_RESTART_INIT         2
1996 #define ANEG_STATE_RESTART              3
1997 #define ANEG_STATE_DISABLE_LINK_OK      4
1998 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1999 #define ANEG_STATE_ABILITY_DETECT       6
2000 #define ANEG_STATE_ACK_DETECT_INIT      7
2001 #define ANEG_STATE_ACK_DETECT           8
2002 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2003 #define ANEG_STATE_COMPLETE_ACK         10
2004 #define ANEG_STATE_IDLE_DETECT_INIT     11
2005 #define ANEG_STATE_IDLE_DETECT          12
2006 #define ANEG_STATE_LINK_OK              13
2007 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2008 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2009
2010         u32 flags;
2011 #define MR_AN_ENABLE            0x00000001
2012 #define MR_RESTART_AN           0x00000002
2013 #define MR_AN_COMPLETE          0x00000004
2014 #define MR_PAGE_RX              0x00000008
2015 #define MR_NP_LOADED            0x00000010
2016 #define MR_TOGGLE_TX            0x00000020
2017 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2018 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2019 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2020 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2021 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2022 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2023 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2024 #define MR_TOGGLE_RX            0x00002000
2025 #define MR_NP_RX                0x00004000
2026
2027 #define MR_LINK_OK              0x80000000
2028
2029         unsigned long link_time, cur_time;
2030
2031         u32 ability_match_cfg;
2032         int ability_match_count;
2033
2034         char ability_match, idle_match, ack_match;
2035
2036         u32 txconfig, rxconfig;
2037 #define ANEG_CFG_NP             0x00000080
2038 #define ANEG_CFG_ACK            0x00000040
2039 #define ANEG_CFG_RF2            0x00000020
2040 #define ANEG_CFG_RF1            0x00000010
2041 #define ANEG_CFG_PS2            0x00000001
2042 #define ANEG_CFG_PS1            0x00008000
2043 #define ANEG_CFG_HD             0x00004000
2044 #define ANEG_CFG_FD             0x00002000
2045 #define ANEG_CFG_INVAL          0x00001f06
2046
2047 };
2048 #define ANEG_OK         0
2049 #define ANEG_DONE       1
2050 #define ANEG_TIMER_ENAB 2
2051 #define ANEG_FAILED     -1
2052
2053 #define ANEG_STATE_SETTLE_TIME  10000
2054
2055 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2056                                    struct tg3_fiber_aneginfo *ap)
2057 {
2058         unsigned long delta;
2059         u32 rx_cfg_reg;
2060         int ret;
2061
2062         if (ap->state == ANEG_STATE_UNKNOWN) {
2063                 ap->rxconfig = 0;
2064                 ap->link_time = 0;
2065                 ap->cur_time = 0;
2066                 ap->ability_match_cfg = 0;
2067                 ap->ability_match_count = 0;
2068                 ap->ability_match = 0;
2069                 ap->idle_match = 0;
2070                 ap->ack_match = 0;
2071         }
2072         ap->cur_time++;
2073
2074         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2075                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2076
2077                 if (rx_cfg_reg != ap->ability_match_cfg) {
2078                         ap->ability_match_cfg = rx_cfg_reg;
2079                         ap->ability_match = 0;
2080                         ap->ability_match_count = 0;
2081                 } else {
2082                         if (++ap->ability_match_count > 1) {
2083                                 ap->ability_match = 1;
2084                                 ap->ability_match_cfg = rx_cfg_reg;
2085                         }
2086                 }
2087                 if (rx_cfg_reg & ANEG_CFG_ACK)
2088                         ap->ack_match = 1;
2089                 else
2090                         ap->ack_match = 0;
2091
2092                 ap->idle_match = 0;
2093         } else {
2094                 ap->idle_match = 1;
2095                 ap->ability_match_cfg = 0;
2096                 ap->ability_match_count = 0;
2097                 ap->ability_match = 0;
2098                 ap->ack_match = 0;
2099
2100                 rx_cfg_reg = 0;
2101         }
2102
2103         ap->rxconfig = rx_cfg_reg;
2104         ret = ANEG_OK;
2105
2106         switch(ap->state) {
2107         case ANEG_STATE_UNKNOWN:
2108                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2109                         ap->state = ANEG_STATE_AN_ENABLE;
2110
2111                 /* fallthru */
2112         case ANEG_STATE_AN_ENABLE:
2113                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2114                 if (ap->flags & MR_AN_ENABLE) {
2115                         ap->link_time = 0;
2116                         ap->cur_time = 0;
2117                         ap->ability_match_cfg = 0;
2118                         ap->ability_match_count = 0;
2119                         ap->ability_match = 0;
2120                         ap->idle_match = 0;
2121                         ap->ack_match = 0;
2122
2123                         ap->state = ANEG_STATE_RESTART_INIT;
2124                 } else {
2125                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2126                 }
2127                 break;
2128
2129         case ANEG_STATE_RESTART_INIT:
2130                 ap->link_time = ap->cur_time;
2131                 ap->flags &= ~(MR_NP_LOADED);
2132                 ap->txconfig = 0;
2133                 tw32(MAC_TX_AUTO_NEG, 0);
2134                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2135                 tw32_f(MAC_MODE, tp->mac_mode);
2136                 udelay(40);
2137
2138                 ret = ANEG_TIMER_ENAB;
2139                 ap->state = ANEG_STATE_RESTART;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_RESTART:
2143                 delta = ap->cur_time - ap->link_time;
2144                 if (delta > ANEG_STATE_SETTLE_TIME) {
2145                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2146                 } else {
2147                         ret = ANEG_TIMER_ENAB;
2148                 }
2149                 break;
2150
2151         case ANEG_STATE_DISABLE_LINK_OK:
2152                 ret = ANEG_DONE;
2153                 break;
2154
2155         case ANEG_STATE_ABILITY_DETECT_INIT:
2156                 ap->flags &= ~(MR_TOGGLE_TX);
2157                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2158                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2159                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2160                 tw32_f(MAC_MODE, tp->mac_mode);
2161                 udelay(40);
2162
2163                 ap->state = ANEG_STATE_ABILITY_DETECT;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT:
2167                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2168                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2169                 }
2170                 break;
2171
2172         case ANEG_STATE_ACK_DETECT_INIT:
2173                 ap->txconfig |= ANEG_CFG_ACK;
2174                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2175                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2176                 tw32_f(MAC_MODE, tp->mac_mode);
2177                 udelay(40);
2178
2179                 ap->state = ANEG_STATE_ACK_DETECT;
2180
2181                 /* fallthru */
2182         case ANEG_STATE_ACK_DETECT:
2183                 if (ap->ack_match != 0) {
2184                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2185                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2186                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2187                         } else {
2188                                 ap->state = ANEG_STATE_AN_ENABLE;
2189                         }
2190                 } else if (ap->ability_match != 0 &&
2191                            ap->rxconfig == 0) {
2192                         ap->state = ANEG_STATE_AN_ENABLE;
2193                 }
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK_INIT:
2197                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2198                         ret = ANEG_FAILED;
2199                         break;
2200                 }
2201                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2202                                MR_LP_ADV_HALF_DUPLEX |
2203                                MR_LP_ADV_SYM_PAUSE |
2204                                MR_LP_ADV_ASYM_PAUSE |
2205                                MR_LP_ADV_REMOTE_FAULT1 |
2206                                MR_LP_ADV_REMOTE_FAULT2 |
2207                                MR_LP_ADV_NEXT_PAGE |
2208                                MR_TOGGLE_RX |
2209                                MR_NP_RX);
2210                 if (ap->rxconfig & ANEG_CFG_FD)
2211                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2212                 if (ap->rxconfig & ANEG_CFG_HD)
2213                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2214                 if (ap->rxconfig & ANEG_CFG_PS1)
2215                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2216                 if (ap->rxconfig & ANEG_CFG_PS2)
2217                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2218                 if (ap->rxconfig & ANEG_CFG_RF1)
2219                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2220                 if (ap->rxconfig & ANEG_CFG_RF2)
2221                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2222                 if (ap->rxconfig & ANEG_CFG_NP)
2223                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2224
2225                 ap->link_time = ap->cur_time;
2226
2227                 ap->flags ^= (MR_TOGGLE_TX);
2228                 if (ap->rxconfig & 0x0008)
2229                         ap->flags |= MR_TOGGLE_RX;
2230                 if (ap->rxconfig & ANEG_CFG_NP)
2231                         ap->flags |= MR_NP_RX;
2232                 ap->flags |= MR_PAGE_RX;
2233
2234                 ap->state = ANEG_STATE_COMPLETE_ACK;
2235                 ret = ANEG_TIMER_ENAB;
2236                 break;
2237
2238         case ANEG_STATE_COMPLETE_ACK:
2239                 if (ap->ability_match != 0 &&
2240                     ap->rxconfig == 0) {
2241                         ap->state = ANEG_STATE_AN_ENABLE;
2242                         break;
2243                 }
2244                 delta = ap->cur_time - ap->link_time;
2245                 if (delta > ANEG_STATE_SETTLE_TIME) {
2246                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2247                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2248                         } else {
2249                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2250                                     !(ap->flags & MR_NP_RX)) {
2251                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2252                                 } else {
2253                                         ret = ANEG_FAILED;
2254                                 }
2255                         }
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_IDLE_DETECT_INIT:
2260                 ap->link_time = ap->cur_time;
2261                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2262                 tw32_f(MAC_MODE, tp->mac_mode);
2263                 udelay(40);
2264
2265                 ap->state = ANEG_STATE_IDLE_DETECT;
2266                 ret = ANEG_TIMER_ENAB;
2267                 break;
2268
2269         case ANEG_STATE_IDLE_DETECT:
2270                 if (ap->ability_match != 0 &&
2271                     ap->rxconfig == 0) {
2272                         ap->state = ANEG_STATE_AN_ENABLE;
2273                         break;
2274                 }
2275                 delta = ap->cur_time - ap->link_time;
2276                 if (delta > ANEG_STATE_SETTLE_TIME) {
2277                         /* XXX another gem from the Broadcom driver :( */
2278                         ap->state = ANEG_STATE_LINK_OK;
2279                 }
2280                 break;
2281
2282         case ANEG_STATE_LINK_OK:
2283                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2284                 ret = ANEG_DONE;
2285                 break;
2286
2287         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2288                 /* ??? unimplemented */
2289                 break;
2290
2291         case ANEG_STATE_NEXT_PAGE_WAIT:
2292                 /* ??? unimplemented */
2293                 break;
2294
2295         default:
2296                 ret = ANEG_FAILED;
2297                 break;
2298         };
2299
2300         return ret;
2301 }
2302
2303 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2304 {
2305         int res = 0;
2306         struct tg3_fiber_aneginfo aninfo;
2307         int status = ANEG_FAILED;
2308         unsigned int tick;
2309         u32 tmp;
2310
2311         tw32_f(MAC_TX_AUTO_NEG, 0);
2312
2313         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2314         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2315         udelay(40);
2316
2317         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2318         udelay(40);
2319
2320         memset(&aninfo, 0, sizeof(aninfo));
2321         aninfo.flags |= MR_AN_ENABLE;
2322         aninfo.state = ANEG_STATE_UNKNOWN;
2323         aninfo.cur_time = 0;
2324         tick = 0;
2325         while (++tick < 195000) {
2326                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2327                 if (status == ANEG_DONE || status == ANEG_FAILED)
2328                         break;
2329
2330                 udelay(1);
2331         }
2332
2333         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2334         tw32_f(MAC_MODE, tp->mac_mode);
2335         udelay(40);
2336
2337         *flags = aninfo.flags;
2338
2339         if (status == ANEG_DONE &&
2340             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2341                              MR_LP_ADV_FULL_DUPLEX)))
2342                 res = 1;
2343
2344         return res;
2345 }
2346
2347 static void tg3_init_bcm8002(struct tg3 *tp)
2348 {
2349         u32 mac_status = tr32(MAC_STATUS);
2350         int i;
2351
2352         /* Reset when initting first time or we have a link. */
2353         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2354             !(mac_status & MAC_STATUS_PCS_SYNCED))
2355                 return;
2356
2357         /* Set PLL lock range. */
2358         tg3_writephy(tp, 0x16, 0x8007);
2359
2360         /* SW reset */
2361         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2362
2363         /* Wait for reset to complete. */
2364         /* XXX schedule_timeout() ... */
2365         for (i = 0; i < 500; i++)
2366                 udelay(10);
2367
2368         /* Config mode; select PMA/Ch 1 regs. */
2369         tg3_writephy(tp, 0x10, 0x8411);
2370
2371         /* Enable auto-lock and comdet, select txclk for tx. */
2372         tg3_writephy(tp, 0x11, 0x0a10);
2373
2374         tg3_writephy(tp, 0x18, 0x00a0);
2375         tg3_writephy(tp, 0x16, 0x41ff);
2376
2377         /* Assert and deassert POR. */
2378         tg3_writephy(tp, 0x13, 0x0400);
2379         udelay(40);
2380         tg3_writephy(tp, 0x13, 0x0000);
2381
2382         tg3_writephy(tp, 0x11, 0x0a50);
2383         udelay(40);
2384         tg3_writephy(tp, 0x11, 0x0a10);
2385
2386         /* Wait for signal to stabilize */
2387         /* XXX schedule_timeout() ... */
2388         for (i = 0; i < 15000; i++)
2389                 udelay(10);
2390
2391         /* Deselect the channel register so we can read the PHYID
2392          * later.
2393          */
2394         tg3_writephy(tp, 0x10, 0x8011);
2395 }
2396
2397 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2398 {
2399         u32 sg_dig_ctrl, sg_dig_status;
2400         u32 serdes_cfg, expected_sg_dig_ctrl;
2401         int workaround, port_a;
2402         int current_link_up;
2403
2404         serdes_cfg = 0;
2405         expected_sg_dig_ctrl = 0;
2406         workaround = 0;
2407         port_a = 1;
2408         current_link_up = 0;
2409
2410         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2411             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2412                 workaround = 1;
2413                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2414                         port_a = 0;
2415
2416                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2417                 /* preserve bits 20-23 for voltage regulator */
2418                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2419         }
2420
2421         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2422
2423         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2424                 if (sg_dig_ctrl & (1 << 31)) {
2425                         if (workaround) {
2426                                 u32 val = serdes_cfg;
2427
2428                                 if (port_a)
2429                                         val |= 0xc010000;
2430                                 else
2431                                         val |= 0x4010000;
2432                                 tw32_f(MAC_SERDES_CFG, val);
2433                         }
2434                         tw32_f(SG_DIG_CTRL, 0x01388400);
2435                 }
2436                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2437                         tg3_setup_flow_control(tp, 0, 0);
2438                         current_link_up = 1;
2439                 }
2440                 goto out;
2441         }
2442
2443         /* Want auto-negotiation.  */
2444         expected_sg_dig_ctrl = 0x81388400;
2445
2446         /* Pause capability */
2447         expected_sg_dig_ctrl |= (1 << 11);
2448
2449         /* Asymettric pause */
2450         expected_sg_dig_ctrl |= (1 << 12);
2451
2452         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2453                 if (workaround)
2454                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2455                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2456                 udelay(5);
2457                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2458
2459                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2460         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2461                                  MAC_STATUS_SIGNAL_DET)) {
2462                 int i;
2463
2464                 /* Giver time to negotiate (~200ms) */
2465                 for (i = 0; i < 40000; i++) {
2466                         sg_dig_status = tr32(SG_DIG_STATUS);
2467                         if (sg_dig_status & (0x3))
2468                                 break;
2469                         udelay(5);
2470                 }
2471                 mac_status = tr32(MAC_STATUS);
2472
2473                 if ((sg_dig_status & (1 << 1)) &&
2474                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2475                         u32 local_adv, remote_adv;
2476
2477                         local_adv = ADVERTISE_PAUSE_CAP;
2478                         remote_adv = 0;
2479                         if (sg_dig_status & (1 << 19))
2480                                 remote_adv |= LPA_PAUSE_CAP;
2481                         if (sg_dig_status & (1 << 20))
2482                                 remote_adv |= LPA_PAUSE_ASYM;
2483
2484                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2485                         current_link_up = 1;
2486                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2487                 } else if (!(sg_dig_status & (1 << 1))) {
2488                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2489                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2490                         else {
2491                                 if (workaround) {
2492                                         u32 val = serdes_cfg;
2493
2494                                         if (port_a)
2495                                                 val |= 0xc010000;
2496                                         else
2497                                                 val |= 0x4010000;
2498
2499                                         tw32_f(MAC_SERDES_CFG, val);
2500                                 }
2501
2502                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2503                                 udelay(40);
2504
2505                                 /* Link parallel detection - link is up */
2506                                 /* only if we have PCS_SYNC and not */
2507                                 /* receiving config code words */
2508                                 mac_status = tr32(MAC_STATUS);
2509                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2510                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2511                                         tg3_setup_flow_control(tp, 0, 0);
2512                                         current_link_up = 1;
2513                                 }
2514                         }
2515                 }
2516         }
2517
2518 out:
2519         return current_link_up;
2520 }
2521
2522 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2523 {
2524         int current_link_up = 0;
2525
2526         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2527                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2528                 goto out;
2529         }
2530
2531         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2532                 u32 flags;
2533                 int i;
2534   
2535                 if (fiber_autoneg(tp, &flags)) {
2536                         u32 local_adv, remote_adv;
2537
2538                         local_adv = ADVERTISE_PAUSE_CAP;
2539                         remote_adv = 0;
2540                         if (flags & MR_LP_ADV_SYM_PAUSE)
2541                                 remote_adv |= LPA_PAUSE_CAP;
2542                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2543                                 remote_adv |= LPA_PAUSE_ASYM;
2544
2545                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2546
2547                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2548                         current_link_up = 1;
2549                 }
2550                 for (i = 0; i < 30; i++) {
2551                         udelay(20);
2552                         tw32_f(MAC_STATUS,
2553                                (MAC_STATUS_SYNC_CHANGED |
2554                                 MAC_STATUS_CFG_CHANGED));
2555                         udelay(40);
2556                         if ((tr32(MAC_STATUS) &
2557                              (MAC_STATUS_SYNC_CHANGED |
2558                               MAC_STATUS_CFG_CHANGED)) == 0)
2559                                 break;
2560                 }
2561
2562                 mac_status = tr32(MAC_STATUS);
2563                 if (current_link_up == 0 &&
2564                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2565                     !(mac_status & MAC_STATUS_RCVD_CFG))
2566                         current_link_up = 1;
2567         } else {
2568                 /* Forcing 1000FD link up. */
2569                 current_link_up = 1;
2570                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2571
2572                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2573                 udelay(40);
2574         }
2575
2576 out:
2577         return current_link_up;
2578 }
2579
2580 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2581 {
2582         u32 orig_pause_cfg;
2583         u16 orig_active_speed;
2584         u8 orig_active_duplex;
2585         u32 mac_status;
2586         int current_link_up;
2587         int i;
2588
2589         orig_pause_cfg =
2590                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                   TG3_FLAG_TX_PAUSE));
2592         orig_active_speed = tp->link_config.active_speed;
2593         orig_active_duplex = tp->link_config.active_duplex;
2594
2595         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2596             netif_carrier_ok(tp->dev) &&
2597             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2598                 mac_status = tr32(MAC_STATUS);
2599                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2600                                MAC_STATUS_SIGNAL_DET |
2601                                MAC_STATUS_CFG_CHANGED |
2602                                MAC_STATUS_RCVD_CFG);
2603                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2604                                    MAC_STATUS_SIGNAL_DET)) {
2605                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2606                                             MAC_STATUS_CFG_CHANGED));
2607                         return 0;
2608                 }
2609         }
2610
2611         tw32_f(MAC_TX_AUTO_NEG, 0);
2612
2613         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2614         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2615         tw32_f(MAC_MODE, tp->mac_mode);
2616         udelay(40);
2617
2618         if (tp->phy_id == PHY_ID_BCM8002)
2619                 tg3_init_bcm8002(tp);
2620
2621         /* Enable link change event even when serdes polling.  */
2622         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2623         udelay(40);
2624
2625         current_link_up = 0;
2626         mac_status = tr32(MAC_STATUS);
2627
2628         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2629                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2630         else
2631                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2632
2633         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2634         tw32_f(MAC_MODE, tp->mac_mode);
2635         udelay(40);
2636
2637         tp->hw_status->status =
2638                 (SD_STATUS_UPDATED |
2639                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2640
2641         for (i = 0; i < 100; i++) {
2642                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2643                                     MAC_STATUS_CFG_CHANGED));
2644                 udelay(5);
2645                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2646                                          MAC_STATUS_CFG_CHANGED)) == 0)
2647                         break;
2648         }
2649
2650         mac_status = tr32(MAC_STATUS);
2651         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2652                 current_link_up = 0;
2653                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2654                         tw32_f(MAC_MODE, (tp->mac_mode |
2655                                           MAC_MODE_SEND_CONFIGS));
2656                         udelay(1);
2657                         tw32_f(MAC_MODE, tp->mac_mode);
2658                 }
2659         }
2660
2661         if (current_link_up == 1) {
2662                 tp->link_config.active_speed = SPEED_1000;
2663                 tp->link_config.active_duplex = DUPLEX_FULL;
2664                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2665                                     LED_CTRL_LNKLED_OVERRIDE |
2666                                     LED_CTRL_1000MBPS_ON));
2667         } else {
2668                 tp->link_config.active_speed = SPEED_INVALID;
2669                 tp->link_config.active_duplex = DUPLEX_INVALID;
2670                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2671                                     LED_CTRL_LNKLED_OVERRIDE |
2672                                     LED_CTRL_TRAFFIC_OVERRIDE));
2673         }
2674
2675         if (current_link_up != netif_carrier_ok(tp->dev)) {
2676                 if (current_link_up)
2677                         netif_carrier_on(tp->dev);
2678                 else
2679                         netif_carrier_off(tp->dev);
2680                 tg3_link_report(tp);
2681         } else {
2682                 u32 now_pause_cfg =
2683                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2684                                          TG3_FLAG_TX_PAUSE);
2685                 if (orig_pause_cfg != now_pause_cfg ||
2686                     orig_active_speed != tp->link_config.active_speed ||
2687                     orig_active_duplex != tp->link_config.active_duplex)
2688                         tg3_link_report(tp);
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2695 {
2696         int current_link_up, err = 0;
2697         u32 bmsr, bmcr;
2698         u16 current_speed;
2699         u8 current_duplex;
2700
2701         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2702         tw32_f(MAC_MODE, tp->mac_mode);
2703         udelay(40);
2704
2705         tw32(MAC_EVENT, 0);
2706
2707         tw32_f(MAC_STATUS,
2708              (MAC_STATUS_SYNC_CHANGED |
2709               MAC_STATUS_CFG_CHANGED |
2710               MAC_STATUS_MI_COMPLETION |
2711               MAC_STATUS_LNKSTATE_CHANGED));
2712         udelay(40);
2713
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         current_link_up = 0;
2718         current_speed = SPEED_INVALID;
2719         current_duplex = DUPLEX_INVALID;
2720
2721         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2722         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2724                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2725                         bmsr |= BMSR_LSTATUS;
2726                 else
2727                         bmsr &= ~BMSR_LSTATUS;
2728         }
2729
2730         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2731
2732         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2733             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2734                 /* do nothing, just check for link up at the end */
2735         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2736                 u32 adv, new_adv;
2737
2738                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2739                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2740                                   ADVERTISE_1000XPAUSE |
2741                                   ADVERTISE_1000XPSE_ASYM |
2742                                   ADVERTISE_SLCT);
2743
2744                 /* Always advertise symmetric PAUSE just like copper */
2745                 new_adv |= ADVERTISE_1000XPAUSE;
2746
2747                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2748                         new_adv |= ADVERTISE_1000XHALF;
2749                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2750                         new_adv |= ADVERTISE_1000XFULL;
2751
2752                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2753                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2754                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2755                         tg3_writephy(tp, MII_BMCR, bmcr);
2756
2757                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2758                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2759                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2760
2761                         return err;
2762                 }
2763         } else {
2764                 u32 new_bmcr;
2765
2766                 bmcr &= ~BMCR_SPEED1000;
2767                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2768
2769                 if (tp->link_config.duplex == DUPLEX_FULL)
2770                         new_bmcr |= BMCR_FULLDPLX;
2771
2772                 if (new_bmcr != bmcr) {
2773                         /* BMCR_SPEED1000 is a reserved bit that needs
2774                          * to be set on write.
2775                          */
2776                         new_bmcr |= BMCR_SPEED1000;
2777
2778                         /* Force a linkdown */
2779                         if (netif_carrier_ok(tp->dev)) {
2780                                 u32 adv;
2781
2782                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2783                                 adv &= ~(ADVERTISE_1000XFULL |
2784                                          ADVERTISE_1000XHALF |
2785                                          ADVERTISE_SLCT);
2786                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2787                                 tg3_writephy(tp, MII_BMCR, bmcr |
2788                                                            BMCR_ANRESTART |
2789                                                            BMCR_ANENABLE);
2790                                 udelay(10);
2791                                 netif_carrier_off(tp->dev);
2792                         }
2793                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2794                         bmcr = new_bmcr;
2795                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2796                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2797                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2798                             ASIC_REV_5714) {
2799                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2800                                         bmsr |= BMSR_LSTATUS;
2801                                 else
2802                                         bmsr &= ~BMSR_LSTATUS;
2803                         }
2804                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2805                 }
2806         }
2807
2808         if (bmsr & BMSR_LSTATUS) {
2809                 current_speed = SPEED_1000;
2810                 current_link_up = 1;
2811                 if (bmcr & BMCR_FULLDPLX)
2812                         current_duplex = DUPLEX_FULL;
2813                 else
2814                         current_duplex = DUPLEX_HALF;
2815
2816                 if (bmcr & BMCR_ANENABLE) {
2817                         u32 local_adv, remote_adv, common;
2818
2819                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2820                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2821                         common = local_adv & remote_adv;
2822                         if (common & (ADVERTISE_1000XHALF |
2823                                       ADVERTISE_1000XFULL)) {
2824                                 if (common & ADVERTISE_1000XFULL)
2825                                         current_duplex = DUPLEX_FULL;
2826                                 else
2827                                         current_duplex = DUPLEX_HALF;
2828
2829                                 tg3_setup_flow_control(tp, local_adv,
2830                                                        remote_adv);
2831                         }
2832                         else
2833                                 current_link_up = 0;
2834                 }
2835         }
2836
2837         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2838         if (tp->link_config.active_duplex == DUPLEX_HALF)
2839                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2840
2841         tw32_f(MAC_MODE, tp->mac_mode);
2842         udelay(40);
2843
2844         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845
2846         tp->link_config.active_speed = current_speed;
2847         tp->link_config.active_duplex = current_duplex;
2848
2849         if (current_link_up != netif_carrier_ok(tp->dev)) {
2850                 if (current_link_up)
2851                         netif_carrier_on(tp->dev);
2852                 else {
2853                         netif_carrier_off(tp->dev);
2854                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2855                 }
2856                 tg3_link_report(tp);
2857         }
2858         return err;
2859 }
2860
2861 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2862 {
2863         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2864                 /* Give autoneg time to complete. */
2865                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2866                 return;
2867         }
2868         if (!netif_carrier_ok(tp->dev) &&
2869             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2870                 u32 bmcr;
2871
2872                 tg3_readphy(tp, MII_BMCR, &bmcr);
2873                 if (bmcr & BMCR_ANENABLE) {
2874                         u32 phy1, phy2;
2875
2876                         /* Select shadow register 0x1f */
2877                         tg3_writephy(tp, 0x1c, 0x7c00);
2878                         tg3_readphy(tp, 0x1c, &phy1);
2879
2880                         /* Select expansion interrupt status register */
2881                         tg3_writephy(tp, 0x17, 0x0f01);
2882                         tg3_readphy(tp, 0x15, &phy2);
2883                         tg3_readphy(tp, 0x15, &phy2);
2884
2885                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2886                                 /* We have signal detect and not receiving
2887                                  * config code words, link is up by parallel
2888                                  * detection.
2889                                  */
2890
2891                                 bmcr &= ~BMCR_ANENABLE;
2892                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2893                                 tg3_writephy(tp, MII_BMCR, bmcr);
2894                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2895                         }
2896                 }
2897         }
2898         else if (netif_carrier_ok(tp->dev) &&
2899                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2900                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2901                 u32 phy2;
2902
2903                 /* Select expansion interrupt status register */
2904                 tg3_writephy(tp, 0x17, 0x0f01);
2905                 tg3_readphy(tp, 0x15, &phy2);
2906                 if (phy2 & 0x20) {
2907                         u32 bmcr;
2908
2909                         /* Config code words received, turn on autoneg. */
2910                         tg3_readphy(tp, MII_BMCR, &bmcr);
2911                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2912
2913                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2914
2915                 }
2916         }
2917 }
2918
2919 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2920 {
2921         int err;
2922
2923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2924                 err = tg3_setup_fiber_phy(tp, force_reset);
2925         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2926                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2927         } else {
2928                 err = tg3_setup_copper_phy(tp, force_reset);
2929         }
2930
2931         if (tp->link_config.active_speed == SPEED_1000 &&
2932             tp->link_config.active_duplex == DUPLEX_HALF)
2933                 tw32(MAC_TX_LENGTHS,
2934                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2935                       (6 << TX_LENGTHS_IPG_SHIFT) |
2936                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2937         else
2938                 tw32(MAC_TX_LENGTHS,
2939                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2940                       (6 << TX_LENGTHS_IPG_SHIFT) |
2941                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2942
2943         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2944                 if (netif_carrier_ok(tp->dev)) {
2945                         tw32(HOSTCC_STAT_COAL_TICKS,
2946                              tp->coal.stats_block_coalesce_usecs);
2947                 } else {
2948                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2949                 }
2950         }
2951
2952         return err;
2953 }
2954
2955 /* Tigon3 never reports partial packet sends.  So we do not
2956  * need special logic to handle SKBs that have not had all
2957  * of their frags sent yet, like SunGEM does.
2958  */
2959 static void tg3_tx(struct tg3 *tp)
2960 {
2961         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2962         u32 sw_idx = tp->tx_cons;
2963
2964         while (sw_idx != hw_idx) {
2965                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2966                 struct sk_buff *skb = ri->skb;
2967                 int i;
2968
2969                 if (unlikely(skb == NULL))
2970                         BUG();
2971
2972                 pci_unmap_single(tp->pdev,
2973                                  pci_unmap_addr(ri, mapping),
2974                                  skb_headlen(skb),
2975                                  PCI_DMA_TODEVICE);
2976
2977                 ri->skb = NULL;
2978
2979                 sw_idx = NEXT_TX(sw_idx);
2980
2981                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2982                         if (unlikely(sw_idx == hw_idx))
2983                                 BUG();
2984
2985                         ri = &tp->tx_buffers[sw_idx];
2986                         if (unlikely(ri->skb != NULL))
2987                                 BUG();
2988
2989                         pci_unmap_page(tp->pdev,
2990                                        pci_unmap_addr(ri, mapping),
2991                                        skb_shinfo(skb)->frags[i].size,
2992                                        PCI_DMA_TODEVICE);
2993
2994                         sw_idx = NEXT_TX(sw_idx);
2995                 }
2996
2997                 dev_kfree_skb(skb);
2998         }
2999
3000         tp->tx_cons = sw_idx;
3001
3002         if (unlikely(netif_queue_stopped(tp->dev))) {
3003                 spin_lock(&tp->tx_lock);
3004                 if (netif_queue_stopped(tp->dev) &&
3005                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3006                         netif_wake_queue(tp->dev);
3007                 spin_unlock(&tp->tx_lock);
3008         }
3009 }
3010
3011 /* Returns size of skb allocated or < 0 on error.
3012  *
3013  * We only need to fill in the address because the other members
3014  * of the RX descriptor are invariant, see tg3_init_rings.
3015  *
3016  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3017  * posting buffers we only dirty the first cache line of the RX
3018  * descriptor (containing the address).  Whereas for the RX status
3019  * buffers the cpu only reads the last cacheline of the RX descriptor
3020  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3021  */
3022 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3023                             int src_idx, u32 dest_idx_unmasked)
3024 {
3025         struct tg3_rx_buffer_desc *desc;
3026         struct ring_info *map, *src_map;
3027         struct sk_buff *skb;
3028         dma_addr_t mapping;
3029         int skb_size, dest_idx;
3030
3031         src_map = NULL;
3032         switch (opaque_key) {
3033         case RXD_OPAQUE_RING_STD:
3034                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3035                 desc = &tp->rx_std[dest_idx];
3036                 map = &tp->rx_std_buffers[dest_idx];
3037                 if (src_idx >= 0)
3038                         src_map = &tp->rx_std_buffers[src_idx];
3039                 skb_size = tp->rx_pkt_buf_sz;
3040                 break;
3041
3042         case RXD_OPAQUE_RING_JUMBO:
3043                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3044                 desc = &tp->rx_jumbo[dest_idx];
3045                 map = &tp->rx_jumbo_buffers[dest_idx];
3046                 if (src_idx >= 0)
3047                         src_map = &tp->rx_jumbo_buffers[src_idx];
3048                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3049                 break;
3050
3051         default:
3052                 return -EINVAL;
3053         };
3054
3055         /* Do not overwrite any of the map or rp information
3056          * until we are sure we can commit to a new buffer.
3057          *
3058          * Callers depend upon this behavior and assume that
3059          * we leave everything unchanged if we fail.
3060          */
3061         skb = dev_alloc_skb(skb_size);
3062         if (skb == NULL)
3063                 return -ENOMEM;
3064
3065         skb->dev = tp->dev;
3066         skb_reserve(skb, tp->rx_offset);
3067
3068         mapping = pci_map_single(tp->pdev, skb->data,
3069                                  skb_size - tp->rx_offset,
3070                                  PCI_DMA_FROMDEVICE);
3071
3072         map->skb = skb;
3073         pci_unmap_addr_set(map, mapping, mapping);
3074
3075         if (src_map != NULL)
3076                 src_map->skb = NULL;
3077
3078         desc->addr_hi = ((u64)mapping >> 32);
3079         desc->addr_lo = ((u64)mapping & 0xffffffff);
3080
3081         return skb_size;
3082 }
3083
3084 /* We only need to move over in the address because the other
3085  * members of the RX descriptor are invariant.  See notes above
3086  * tg3_alloc_rx_skb for full details.
3087  */
3088 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3089                            int src_idx, u32 dest_idx_unmasked)
3090 {
3091         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3092         struct ring_info *src_map, *dest_map;
3093         int dest_idx;
3094
3095         switch (opaque_key) {
3096         case RXD_OPAQUE_RING_STD:
3097                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3098                 dest_desc = &tp->rx_std[dest_idx];
3099                 dest_map = &tp->rx_std_buffers[dest_idx];
3100                 src_desc = &tp->rx_std[src_idx];
3101                 src_map = &tp->rx_std_buffers[src_idx];
3102                 break;
3103
3104         case RXD_OPAQUE_RING_JUMBO:
3105                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3106                 dest_desc = &tp->rx_jumbo[dest_idx];
3107                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3108                 src_desc = &tp->rx_jumbo[src_idx];
3109                 src_map = &tp->rx_jumbo_buffers[src_idx];
3110                 break;
3111
3112         default:
3113                 return;
3114         };
3115
3116         dest_map->skb = src_map->skb;
3117         pci_unmap_addr_set(dest_map, mapping,
3118                            pci_unmap_addr(src_map, mapping));
3119         dest_desc->addr_hi = src_desc->addr_hi;
3120         dest_desc->addr_lo = src_desc->addr_lo;
3121
3122         src_map->skb = NULL;
3123 }
3124
3125 #if TG3_VLAN_TAG_USED
3126 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3127 {
3128         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3129 }
3130 #endif
3131
3132 /* The RX ring scheme is composed of multiple rings which post fresh
3133  * buffers to the chip, and one special ring the chip uses to report
3134  * status back to the host.
3135  *
3136  * The special ring reports the status of received packets to the
3137  * host.  The chip does not write into the original descriptor the
3138  * RX buffer was obtained from.  The chip simply takes the original
3139  * descriptor as provided by the host, updates the status and length
3140  * field, then writes this into the next status ring entry.
3141  *
3142  * Each ring the host uses to post buffers to the chip is described
3143  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3144  * it is first placed into the on-chip ram.  When the packet's length
3145  * is known, it walks down the TG3_BDINFO entries to select the ring.
3146  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3147  * which is within the range of the new packet's length is chosen.
3148  *
3149  * The "separate ring for rx status" scheme may sound queer, but it makes
3150  * sense from a cache coherency perspective.  If only the host writes
3151  * to the buffer post rings, and only the chip writes to the rx status
3152  * rings, then cache lines never move beyond shared-modified state.
3153  * If both the host and chip were to write into the same ring, cache line
3154  * eviction could occur since both entities want it in an exclusive state.
3155  */
3156 static int tg3_rx(struct tg3 *tp, int budget)
3157 {
3158         u32 work_mask;
3159         u32 sw_idx = tp->rx_rcb_ptr;
3160         u16 hw_idx;
3161         int received;
3162
3163         hw_idx = tp->hw_status->idx[0].rx_producer;
3164         /*
3165          * We need to order the read of hw_idx and the read of
3166          * the opaque cookie.
3167          */
3168         rmb();
3169         work_mask = 0;
3170         received = 0;
3171         while (sw_idx != hw_idx && budget > 0) {
3172                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3173                 unsigned int len;
3174                 struct sk_buff *skb;
3175                 dma_addr_t dma_addr;
3176                 u32 opaque_key, desc_idx, *post_ptr;
3177
3178                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3179                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3180                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3181                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3182                                                   mapping);
3183                         skb = tp->rx_std_buffers[desc_idx].skb;
3184                         post_ptr = &tp->rx_std_ptr;
3185                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3186                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3187                                                   mapping);
3188                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3189                         post_ptr = &tp->rx_jumbo_ptr;
3190                 }
3191                 else {
3192                         goto next_pkt_nopost;
3193                 }
3194
3195                 work_mask |= opaque_key;
3196
3197                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3198                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3199                 drop_it:
3200                         tg3_recycle_rx(tp, opaque_key,
3201                                        desc_idx, *post_ptr);
3202                 drop_it_no_recycle:
3203                         /* Other statistics kept track of by card. */
3204                         tp->net_stats.rx_dropped++;
3205                         goto next_pkt;
3206                 }
3207
3208                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3209
3210                 if (len > RX_COPY_THRESHOLD 
3211                         && tp->rx_offset == 2
3212                         /* rx_offset != 2 iff this is a 5701 card running
3213                          * in PCI-X mode [see tg3_get_invariants()] */
3214                 ) {
3215                         int skb_size;
3216
3217                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3218                                                     desc_idx, *post_ptr);
3219                         if (skb_size < 0)
3220                                 goto drop_it;
3221
3222                         pci_unmap_single(tp->pdev, dma_addr,
3223                                          skb_size - tp->rx_offset,
3224                                          PCI_DMA_FROMDEVICE);
3225
3226                         skb_put(skb, len);
3227                 } else {
3228                         struct sk_buff *copy_skb;
3229
3230                         tg3_recycle_rx(tp, opaque_key,
3231                                        desc_idx, *post_ptr);
3232
3233                         copy_skb = dev_alloc_skb(len + 2);
3234                         if (copy_skb == NULL)
3235                                 goto drop_it_no_recycle;
3236
3237                         copy_skb->dev = tp->dev;
3238                         skb_reserve(copy_skb, 2);
3239                         skb_put(copy_skb, len);
3240                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3241                         memcpy(copy_skb->data, skb->data, len);
3242                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3243
3244                         /* We'll reuse the original ring buffer. */
3245                         skb = copy_skb;
3246                 }
3247
3248                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3249                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3250                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3251                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3252                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3253                 else
3254                         skb->ip_summed = CHECKSUM_NONE;
3255
3256                 skb->protocol = eth_type_trans(skb, tp->dev);
3257 #if TG3_VLAN_TAG_USED
3258                 if (tp->vlgrp != NULL &&
3259                     desc->type_flags & RXD_FLAG_VLAN) {
3260                         tg3_vlan_rx(tp, skb,
3261                                     desc->err_vlan & RXD_VLAN_MASK);
3262                 } else
3263 #endif
3264                         netif_receive_skb(skb);
3265
3266                 tp->dev->last_rx = jiffies;
3267                 received++;
3268                 budget--;
3269
3270 next_pkt:
3271                 (*post_ptr)++;
3272 next_pkt_nopost:
3273                 sw_idx++;
3274                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3275
3276                 /* Refresh hw_idx to see if there is new work */
3277                 if (sw_idx == hw_idx) {
3278                         hw_idx = tp->hw_status->idx[0].rx_producer;
3279                         rmb();
3280                 }
3281         }
3282
3283         /* ACK the status ring. */
3284         tp->rx_rcb_ptr = sw_idx;
3285         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3286
3287         /* Refill RX ring(s). */
3288         if (work_mask & RXD_OPAQUE_RING_STD) {
3289                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3290                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3291                              sw_idx);
3292         }
3293         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3294                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3295                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3296                              sw_idx);
3297         }
3298         mmiowb();
3299
3300         return received;
3301 }
3302
3303 static int tg3_poll(struct net_device *netdev, int *budget)
3304 {
3305         struct tg3 *tp = netdev_priv(netdev);
3306         struct tg3_hw_status *sblk = tp->hw_status;
3307         int done;
3308
3309         /* handle link change and other phy events */
3310         if (!(tp->tg3_flags &
3311               (TG3_FLAG_USE_LINKCHG_REG |
3312                TG3_FLAG_POLL_SERDES))) {
3313                 if (sblk->status & SD_STATUS_LINK_CHG) {
3314                         sblk->status = SD_STATUS_UPDATED |
3315                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3316                         spin_lock(&tp->lock);
3317                         tg3_setup_phy(tp, 0);
3318                         spin_unlock(&tp->lock);
3319                 }
3320         }
3321
3322         /* run TX completion thread */
3323         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3324                 tg3_tx(tp);
3325         }
3326
3327         /* run RX thread, within the bounds set by NAPI.
3328          * All RX "locking" is done by ensuring outside
3329          * code synchronizes with dev->poll()
3330          */
3331         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3332                 int orig_budget = *budget;
3333                 int work_done;
3334
3335                 if (orig_budget > netdev->quota)
3336                         orig_budget = netdev->quota;
3337
3338                 work_done = tg3_rx(tp, orig_budget);
3339
3340                 *budget -= work_done;
3341                 netdev->quota -= work_done;
3342         }
3343
3344         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3345                 tp->last_tag = sblk->status_tag;
3346                 rmb();
3347         } else
3348                 sblk->status &= ~SD_STATUS_UPDATED;
3349
3350         /* if no more work, tell net stack and NIC we're done */
3351         done = !tg3_has_work(tp);
3352         if (done) {
3353                 netif_rx_complete(netdev);
3354                 tg3_restart_ints(tp);
3355         }
3356
3357         return (done ? 0 : 1);
3358 }
3359
3360 static void tg3_irq_quiesce(struct tg3 *tp)
3361 {
3362         BUG_ON(tp->irq_sync);
3363
3364         tp->irq_sync = 1;
3365         smp_mb();
3366
3367         synchronize_irq(tp->pdev->irq);
3368 }
3369
3370 static inline int tg3_irq_sync(struct tg3 *tp)
3371 {
3372         return tp->irq_sync;
3373 }
3374
3375 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3376  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3377  * with as well.  Most of the time, this is not necessary except when
3378  * shutting down the device.
3379  */
3380 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3381 {
3382         if (irq_sync)
3383                 tg3_irq_quiesce(tp);
3384         spin_lock_bh(&tp->lock);
3385         spin_lock(&tp->tx_lock);
3386 }
3387
3388 static inline void tg3_full_unlock(struct tg3 *tp)
3389 {
3390         spin_unlock(&tp->tx_lock);
3391         spin_unlock_bh(&tp->lock);
3392 }
3393
3394 /* One-shot MSI handler - Chip automatically disables interrupt
3395  * after sending MSI so driver doesn't have to do it.
3396  */
3397 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3398 {
3399         struct net_device *dev = dev_id;
3400         struct tg3 *tp = netdev_priv(dev);
3401
3402         prefetch(tp->hw_status);
3403         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3404
3405         if (likely(!tg3_irq_sync(tp)))
3406                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3407
3408         return IRQ_HANDLED;
3409 }
3410
3411 /* MSI ISR - No need to check for interrupt sharing and no need to
3412  * flush status block and interrupt mailbox. PCI ordering rules
3413  * guarantee that MSI will arrive after the status block.
3414  */
3415 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3416 {
3417         struct net_device *dev = dev_id;
3418         struct tg3 *tp = netdev_priv(dev);
3419
3420         prefetch(tp->hw_status);
3421         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3422         /*
3423          * Writing any value to intr-mbox-0 clears PCI INTA# and
3424          * chip-internal interrupt pending events.
3425          * Writing non-zero to intr-mbox-0 additional tells the
3426          * NIC to stop sending us irqs, engaging "in-intr-handler"
3427          * event coalescing.
3428          */
3429         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3430         if (likely(!tg3_irq_sync(tp)))
3431                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3432
3433         return IRQ_RETVAL(1);
3434 }
3435
3436 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440         struct tg3_hw_status *sblk = tp->hw_status;
3441         unsigned int handled = 1;
3442
3443         /* In INTx mode, it is possible for the interrupt to arrive at
3444          * the CPU before the status block posted prior to the interrupt.
3445          * Reading the PCI State register will confirm whether the
3446          * interrupt is ours and will flush the status block.
3447          */
3448         if ((sblk->status & SD_STATUS_UPDATED) ||
3449             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3450                 /*
3451                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3452                  * chip-internal interrupt pending events.
3453                  * Writing non-zero to intr-mbox-0 additional tells the
3454                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3455                  * event coalescing.
3456                  */
3457                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3458                              0x00000001);
3459                 if (tg3_irq_sync(tp))
3460                         goto out;
3461                 sblk->status &= ~SD_STATUS_UPDATED;
3462                 if (likely(tg3_has_work(tp))) {
3463                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3464                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3465                 } else {
3466                         /* No work, shared interrupt perhaps?  re-enable
3467                          * interrupts, and flush that PCI write
3468                          */
3469                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3470                                 0x00000000);
3471                 }
3472         } else {        /* shared interrupt */
3473                 handled = 0;
3474         }
3475 out:
3476         return IRQ_RETVAL(handled);
3477 }
3478
3479 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3480 {
3481         struct net_device *dev = dev_id;
3482         struct tg3 *tp = netdev_priv(dev);
3483         struct tg3_hw_status *sblk = tp->hw_status;
3484         unsigned int handled = 1;
3485
3486         /* In INTx mode, it is possible for the interrupt to arrive at
3487          * the CPU before the status block posted prior to the interrupt.
3488          * Reading the PCI State register will confirm whether the
3489          * interrupt is ours and will flush the status block.
3490          */
3491         if ((sblk->status_tag != tp->last_tag) ||
3492             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3493                 /*
3494                  * writing any value to intr-mbox-0 clears PCI INTA# and
3495                  * chip-internal interrupt pending events.
3496                  * writing non-zero to intr-mbox-0 additional tells the
3497                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3498                  * event coalescing.
3499                  */
3500                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3501                              0x00000001);
3502                 if (tg3_irq_sync(tp))
3503                         goto out;
3504                 if (netif_rx_schedule_prep(dev)) {
3505                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3506                         /* Update last_tag to mark that this status has been
3507                          * seen. Because interrupt may be shared, we may be
3508                          * racing with tg3_poll(), so only update last_tag
3509                          * if tg3_poll() is not scheduled.
3510                          */
3511                         tp->last_tag = sblk->status_tag;
3512                         __netif_rx_schedule(dev);
3513                 }
3514         } else {        /* shared interrupt */
3515                 handled = 0;
3516         }
3517 out:
3518         return IRQ_RETVAL(handled);
3519 }
3520
3521 /* ISR for interrupt test */
3522 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3523                 struct pt_regs *regs)
3524 {
3525         struct net_device *dev = dev_id;
3526         struct tg3 *tp = netdev_priv(dev);
3527         struct tg3_hw_status *sblk = tp->hw_status;
3528
3529         if ((sblk->status & SD_STATUS_UPDATED) ||
3530             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3531                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3532                              0x00000001);
3533                 return IRQ_RETVAL(1);
3534         }
3535         return IRQ_RETVAL(0);
3536 }
3537
3538 static int tg3_init_hw(struct tg3 *);
3539 static int tg3_halt(struct tg3 *, int, int);
3540
3541 #ifdef CONFIG_NET_POLL_CONTROLLER
3542 static void tg3_poll_controller(struct net_device *dev)
3543 {
3544         struct tg3 *tp = netdev_priv(dev);
3545
3546         tg3_interrupt(tp->pdev->irq, dev, NULL);
3547 }
3548 #endif
3549
3550 static void tg3_reset_task(void *_data)
3551 {
3552         struct tg3 *tp = _data;
3553         unsigned int restart_timer;
3554
3555         tg3_full_lock(tp, 0);
3556         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3557
3558         if (!netif_running(tp->dev)) {
3559                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3560                 tg3_full_unlock(tp);
3561                 return;
3562         }
3563
3564         tg3_full_unlock(tp);
3565
3566         tg3_netif_stop(tp);
3567
3568         tg3_full_lock(tp, 1);
3569
3570         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3571         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3572
3573         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3574         tg3_init_hw(tp);
3575
3576         tg3_netif_start(tp);
3577
3578         if (restart_timer)
3579                 mod_timer(&tp->timer, jiffies + 1);
3580
3581         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3582
3583         tg3_full_unlock(tp);
3584 }
3585
3586 static void tg3_tx_timeout(struct net_device *dev)
3587 {
3588         struct tg3 *tp = netdev_priv(dev);
3589
3590         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3591                dev->name);
3592
3593         schedule_work(&tp->reset_task);
3594 }
3595
3596 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3597 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3598 {
3599         u32 base = (u32) mapping & 0xffffffff;
3600
3601         return ((base > 0xffffdcc0) &&
3602                 (base + len + 8 < base));
3603 }
3604
3605 /* Test for DMA addresses > 40-bit */
3606 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3607                                           int len)
3608 {
3609 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3610         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3611                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3612         return 0;
3613 #else
3614         return 0;
3615 #endif
3616 }
3617
3618 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3619
3620 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3621 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3622                                        u32 last_plus_one, u32 *start,
3623                                        u32 base_flags, u32 mss)
3624 {
3625         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3626         dma_addr_t new_addr = 0;
3627         u32 entry = *start;
3628         int i, ret = 0;
3629
3630         if (!new_skb) {
3631                 ret = -1;
3632         } else {
3633                 /* New SKB is guaranteed to be linear. */
3634                 entry = *start;
3635                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3636                                           PCI_DMA_TODEVICE);
3637                 /* Make sure new skb does not cross any 4G boundaries.
3638                  * Drop the packet if it does.
3639                  */
3640                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3641                         ret = -1;
3642                         dev_kfree_skb(new_skb);
3643                         new_skb = NULL;
3644                 } else {
3645                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3646                                     base_flags, 1 | (mss << 1));
3647                         *start = NEXT_TX(entry);
3648                 }
3649         }
3650
3651         /* Now clean up the sw ring entries. */
3652         i = 0;
3653         while (entry != last_plus_one) {
3654                 int len;
3655
3656                 if (i == 0)
3657                         len = skb_headlen(skb);
3658                 else
3659                         len = skb_shinfo(skb)->frags[i-1].size;
3660                 pci_unmap_single(tp->pdev,
3661                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3662                                  len, PCI_DMA_TODEVICE);
3663                 if (i == 0) {
3664                         tp->tx_buffers[entry].skb = new_skb;
3665                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3666                 } else {
3667                         tp->tx_buffers[entry].skb = NULL;
3668                 }
3669                 entry = NEXT_TX(entry);
3670                 i++;
3671         }
3672
3673         dev_kfree_skb(skb);
3674
3675         return ret;
3676 }
3677
3678 static void tg3_set_txd(struct tg3 *tp, int entry,
3679                         dma_addr_t mapping, int len, u32 flags,
3680                         u32 mss_and_is_end)
3681 {
3682         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3683         int is_end = (mss_and_is_end & 0x1);
3684         u32 mss = (mss_and_is_end >> 1);
3685         u32 vlan_tag = 0;
3686
3687         if (is_end)
3688                 flags |= TXD_FLAG_END;
3689         if (flags & TXD_FLAG_VLAN) {
3690                 vlan_tag = flags >> 16;
3691                 flags &= 0xffff;
3692         }
3693         vlan_tag |= (mss << TXD_MSS_SHIFT);
3694
3695         txd->addr_hi = ((u64) mapping >> 32);
3696         txd->addr_lo = ((u64) mapping & 0xffffffff);
3697         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3698         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3699 }
3700
3701 /* hard_start_xmit for devices that don't have any bugs and
3702  * support TG3_FLG2_HW_TSO_2 only.
3703  */
3704 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3705 {
3706         struct tg3 *tp = netdev_priv(dev);
3707         dma_addr_t mapping;
3708         u32 len, entry, base_flags, mss;
3709
3710         len = skb_headlen(skb);
3711
3712         /* No BH disabling for tx_lock here.  We are running in BH disabled
3713          * context and TX reclaim runs via tp->poll inside of a software
3714          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3715          * no IRQ context deadlocks to worry about either.  Rejoice!
3716          */
3717         if (!spin_trylock(&tp->tx_lock))
3718                 return NETDEV_TX_LOCKED;
3719
3720         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3721                 if (!netif_queue_stopped(dev)) {
3722                         netif_stop_queue(dev);
3723
3724                         /* This is a hard error, log it. */
3725                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3726                                "queue awake!\n", dev->name);
3727                 }
3728                 spin_unlock(&tp->tx_lock);
3729                 return NETDEV_TX_BUSY;
3730         }
3731
3732         entry = tp->tx_prod;
3733         base_flags = 0;
3734 #if TG3_TSO_SUPPORT != 0
3735         mss = 0;
3736         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3737             (mss = skb_shinfo(skb)->tso_size) != 0) {
3738                 int tcp_opt_len, ip_tcp_len;
3739
3740                 if (skb_header_cloned(skb) &&
3741                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3742                         dev_kfree_skb(skb);
3743                         goto out_unlock;
3744                 }
3745
3746                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3747                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3748
3749                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3750                                TXD_FLAG_CPU_POST_DMA);
3751
3752                 skb->nh.iph->check = 0;
3753                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3754
3755                 skb->h.th->check = 0;
3756
3757                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3758         }
3759         else if (skb->ip_summed == CHECKSUM_HW)
3760                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3761 #else
3762         mss = 0;
3763         if (skb->ip_summed == CHECKSUM_HW)
3764                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3765 #endif
3766 #if TG3_VLAN_TAG_USED
3767         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3768                 base_flags |= (TXD_FLAG_VLAN |
3769                                (vlan_tx_tag_get(skb) << 16));
3770 #endif
3771
3772         /* Queue skb data, a.k.a. the main skb fragment. */
3773         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3774
3775         tp->tx_buffers[entry].skb = skb;
3776         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3777
3778         tg3_set_txd(tp, entry, mapping, len, base_flags,
3779                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3780
3781         entry = NEXT_TX(entry);
3782
3783         /* Now loop through additional data fragments, and queue them. */
3784         if (skb_shinfo(skb)->nr_frags > 0) {
3785                 unsigned int i, last;
3786
3787                 last = skb_shinfo(skb)->nr_frags - 1;
3788                 for (i = 0; i <= last; i++) {
3789                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3790
3791                         len = frag->size;
3792                         mapping = pci_map_page(tp->pdev,
3793                                                frag->page,
3794                                                frag->page_offset,
3795                                                len, PCI_DMA_TODEVICE);
3796
3797                         tp->tx_buffers[entry].skb = NULL;
3798                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3799
3800                         tg3_set_txd(tp, entry, mapping, len,
3801                                     base_flags, (i == last) | (mss << 1));
3802
3803                         entry = NEXT_TX(entry);
3804                 }
3805         }
3806
3807         /* Packets are ready, update Tx producer idx local and on card. */
3808         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3809
3810         tp->tx_prod = entry;
3811         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3812                 netif_stop_queue(dev);
3813                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3814                         netif_wake_queue(tp->dev);
3815         }
3816
3817 out_unlock:
3818         mmiowb();
3819         spin_unlock(&tp->tx_lock);
3820
3821         dev->trans_start = jiffies;
3822
3823         return NETDEV_TX_OK;
3824 }
3825
3826 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3827  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3828  */
3829 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3830 {
3831         struct tg3 *tp = netdev_priv(dev);
3832         dma_addr_t mapping;
3833         u32 len, entry, base_flags, mss;
3834         int would_hit_hwbug;
3835
3836         len = skb_headlen(skb);
3837
3838         /* No BH disabling for tx_lock here.  We are running in BH disabled
3839          * context and TX reclaim runs via tp->poll inside of a software
3840          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3841          * no IRQ context deadlocks to worry about either.  Rejoice!
3842          */
3843         if (!spin_trylock(&tp->tx_lock))
3844                 return NETDEV_TX_LOCKED; 
3845
3846         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3847                 if (!netif_queue_stopped(dev)) {
3848                         netif_stop_queue(dev);
3849
3850                         /* This is a hard error, log it. */
3851                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3852                                "queue awake!\n", dev->name);
3853                 }
3854                 spin_unlock(&tp->tx_lock);
3855                 return NETDEV_TX_BUSY;
3856         }
3857
3858         entry = tp->tx_prod;
3859         base_flags = 0;
3860         if (skb->ip_summed == CHECKSUM_HW)
3861                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3862 #if TG3_TSO_SUPPORT != 0
3863         mss = 0;
3864         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3865             (mss = skb_shinfo(skb)->tso_size) != 0) {
3866                 int tcp_opt_len, ip_tcp_len;
3867
3868                 if (skb_header_cloned(skb) &&
3869                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3870                         dev_kfree_skb(skb);
3871                         goto out_unlock;
3872                 }
3873
3874                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3875                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3876
3877                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3878                                TXD_FLAG_CPU_POST_DMA);
3879
3880                 skb->nh.iph->check = 0;
3881                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3882                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3883                         skb->h.th->check = 0;
3884                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3885                 }
3886                 else {
3887                         skb->h.th->check =
3888                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3889                                                    skb->nh.iph->daddr,
3890                                                    0, IPPROTO_TCP, 0);
3891                 }
3892
3893                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3894                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3895                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3896                                 int tsflags;
3897
3898                                 tsflags = ((skb->nh.iph->ihl - 5) +
3899                                            (tcp_opt_len >> 2));
3900                                 mss |= (tsflags << 11);
3901                         }
3902                 } else {
3903                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3904                                 int tsflags;
3905
3906                                 tsflags = ((skb->nh.iph->ihl - 5) +
3907                                            (tcp_opt_len >> 2));
3908                                 base_flags |= tsflags << 12;
3909                         }
3910                 }
3911         }
3912 #else
3913         mss = 0;
3914 #endif
3915 #if TG3_VLAN_TAG_USED
3916         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3917                 base_flags |= (TXD_FLAG_VLAN |
3918                                (vlan_tx_tag_get(skb) << 16));
3919 #endif
3920
3921         /* Queue skb data, a.k.a. the main skb fragment. */
3922         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3923
3924         tp->tx_buffers[entry].skb = skb;
3925         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3926
3927         would_hit_hwbug = 0;
3928
3929         if (tg3_4g_overflow_test(mapping, len))
3930                 would_hit_hwbug = 1;
3931
3932         tg3_set_txd(tp, entry, mapping, len, base_flags,
3933                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3934
3935         entry = NEXT_TX(entry);
3936
3937         /* Now loop through additional data fragments, and queue them. */
3938         if (skb_shinfo(skb)->nr_frags > 0) {
3939                 unsigned int i, last;
3940
3941                 last = skb_shinfo(skb)->nr_frags - 1;
3942                 for (i = 0; i <= last; i++) {
3943                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3944
3945                         len = frag->size;
3946                         mapping = pci_map_page(tp->pdev,
3947                                                frag->page,
3948                                                frag->page_offset,
3949                                                len, PCI_DMA_TODEVICE);
3950
3951                         tp->tx_buffers[entry].skb = NULL;
3952                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3953
3954                         if (tg3_4g_overflow_test(mapping, len))
3955                                 would_hit_hwbug = 1;
3956
3957                         if (tg3_40bit_overflow_test(tp, mapping, len))
3958                                 would_hit_hwbug = 1;
3959
3960                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3961                                 tg3_set_txd(tp, entry, mapping, len,
3962                                             base_flags, (i == last)|(mss << 1));
3963                         else
3964                                 tg3_set_txd(tp, entry, mapping, len,
3965                                             base_flags, (i == last));
3966
3967                         entry = NEXT_TX(entry);
3968                 }
3969         }
3970
3971         if (would_hit_hwbug) {
3972                 u32 last_plus_one = entry;
3973                 u32 start;
3974
3975                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3976                 start &= (TG3_TX_RING_SIZE - 1);
3977
3978                 /* If the workaround fails due to memory/mapping
3979                  * failure, silently drop this packet.
3980                  */
3981                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3982                                                 &start, base_flags, mss))
3983                         goto out_unlock;
3984
3985                 entry = start;
3986         }
3987
3988         /* Packets are ready, update Tx producer idx local and on card. */
3989         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3990
3991         tp->tx_prod = entry;
3992         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3993                 netif_stop_queue(dev);
3994                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3995                         netif_wake_queue(tp->dev);
3996         }
3997
3998 out_unlock:
3999         mmiowb();
4000         spin_unlock(&tp->tx_lock);
4001
4002         dev->trans_start = jiffies;
4003
4004         return NETDEV_TX_OK;
4005 }
4006
4007 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4008                                int new_mtu)
4009 {
4010         dev->mtu = new_mtu;
4011
4012         if (new_mtu > ETH_DATA_LEN) {
4013                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4014                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4015                         ethtool_op_set_tso(dev, 0);
4016                 }
4017                 else
4018                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4019         } else {
4020                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4021                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4022                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4023         }
4024 }
4025
4026 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4027 {
4028         struct tg3 *tp = netdev_priv(dev);
4029
4030         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4031                 return -EINVAL;
4032
4033         if (!netif_running(dev)) {
4034                 /* We'll just catch it later when the
4035                  * device is up'd.
4036                  */
4037                 tg3_set_mtu(dev, tp, new_mtu);
4038                 return 0;
4039         }
4040
4041         tg3_netif_stop(tp);
4042
4043         tg3_full_lock(tp, 1);
4044
4045         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4046
4047         tg3_set_mtu(dev, tp, new_mtu);
4048
4049         tg3_init_hw(tp);
4050
4051         tg3_netif_start(tp);
4052
4053         tg3_full_unlock(tp);
4054
4055         return 0;
4056 }
4057
4058 /* Free up pending packets in all rx/tx rings.
4059  *
4060  * The chip has been shut down and the driver detached from
4061  * the networking, so no interrupts or new tx packets will
4062  * end up in the driver.  tp->{tx,}lock is not held and we are not
4063  * in an interrupt context and thus may sleep.
4064  */
4065 static void tg3_free_rings(struct tg3 *tp)
4066 {
4067         struct ring_info *rxp;
4068         int i;
4069
4070         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4071                 rxp = &tp->rx_std_buffers[i];
4072
4073                 if (rxp->skb == NULL)
4074                         continue;
4075                 pci_unmap_single(tp->pdev,
4076                                  pci_unmap_addr(rxp, mapping),
4077                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4078                                  PCI_DMA_FROMDEVICE);
4079                 dev_kfree_skb_any(rxp->skb);
4080                 rxp->skb = NULL;
4081         }
4082
4083         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4084                 rxp = &tp->rx_jumbo_buffers[i];
4085
4086                 if (rxp->skb == NULL)
4087                         continue;
4088                 pci_unmap_single(tp->pdev,
4089                                  pci_unmap_addr(rxp, mapping),
4090                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4091                                  PCI_DMA_FROMDEVICE);
4092                 dev_kfree_skb_any(rxp->skb);
4093                 rxp->skb = NULL;
4094         }
4095
4096         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4097                 struct tx_ring_info *txp;
4098                 struct sk_buff *skb;
4099                 int j;
4100
4101                 txp = &tp->tx_buffers[i];
4102                 skb = txp->skb;
4103
4104                 if (skb == NULL) {
4105                         i++;
4106                         continue;
4107                 }
4108
4109                 pci_unmap_single(tp->pdev,
4110                                  pci_unmap_addr(txp, mapping),
4111                                  skb_headlen(skb),
4112                                  PCI_DMA_TODEVICE);
4113                 txp->skb = NULL;
4114
4115                 i++;
4116
4117                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4118                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4119                         pci_unmap_page(tp->pdev,
4120                                        pci_unmap_addr(txp, mapping),
4121                                        skb_shinfo(skb)->frags[j].size,
4122                                        PCI_DMA_TODEVICE);
4123                         i++;
4124                 }
4125
4126                 dev_kfree_skb_any(skb);
4127         }
4128 }
4129
4130 /* Initialize tx/rx rings for packet processing.
4131  *
4132  * The chip has been shut down and the driver detached from
4133  * the networking, so no interrupts or new tx packets will
4134  * end up in the driver.  tp->{tx,}lock are held and thus
4135  * we may not sleep.
4136  */
4137 static void tg3_init_rings(struct tg3 *tp)
4138 {
4139         u32 i;
4140
4141         /* Free up all the SKBs. */
4142         tg3_free_rings(tp);
4143
4144         /* Zero out all descriptors. */
4145         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4146         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4147         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4148         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4149
4150         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4151         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4152             (tp->dev->mtu > ETH_DATA_LEN))
4153                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4154
4155         /* Initialize invariants of the rings, we only set this
4156          * stuff once.  This works because the card does not
4157          * write into the rx buffer posting rings.
4158          */
4159         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4160                 struct tg3_rx_buffer_desc *rxd;
4161
4162                 rxd = &tp->rx_std[i];
4163                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4164                         << RXD_LEN_SHIFT;
4165                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4166                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4167                                (i << RXD_OPAQUE_INDEX_SHIFT));
4168         }
4169
4170         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4171                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4172                         struct tg3_rx_buffer_desc *rxd;
4173
4174                         rxd = &tp->rx_jumbo[i];
4175                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4176                                 << RXD_LEN_SHIFT;
4177                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4178                                 RXD_FLAG_JUMBO;
4179                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4180                                (i << RXD_OPAQUE_INDEX_SHIFT));
4181                 }
4182         }
4183
4184         /* Now allocate fresh SKBs for each rx ring. */
4185         for (i = 0; i < tp->rx_pending; i++) {
4186                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4187                                      -1, i) < 0)
4188                         break;
4189         }
4190
4191         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4192                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4193                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4194                                              -1, i) < 0)
4195                                 break;
4196                 }
4197         }
4198 }
4199
4200 /*
4201  * Must not be invoked with interrupt sources disabled and
4202  * the hardware shutdown down.
4203  */
4204 static void tg3_free_consistent(struct tg3 *tp)
4205 {
4206         kfree(tp->rx_std_buffers);
4207         tp->rx_std_buffers = NULL;
4208         if (tp->rx_std) {
4209                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4210                                     tp->rx_std, tp->rx_std_mapping);
4211                 tp->rx_std = NULL;
4212         }
4213         if (tp->rx_jumbo) {
4214                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4215                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4216                 tp->rx_jumbo = NULL;
4217         }
4218         if (tp->rx_rcb) {
4219                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4220                                     tp->rx_rcb, tp->rx_rcb_mapping);
4221                 tp->rx_rcb = NULL;
4222         }
4223         if (tp->tx_ring) {
4224                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4225                         tp->tx_ring, tp->tx_desc_mapping);
4226                 tp->tx_ring = NULL;
4227         }
4228         if (tp->hw_status) {
4229                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4230                                     tp->hw_status, tp->status_mapping);
4231                 tp->hw_status = NULL;
4232         }
4233         if (tp->hw_stats) {
4234                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4235                                     tp->hw_stats, tp->stats_mapping);
4236                 tp->hw_stats = NULL;
4237         }
4238 }
4239
4240 /*
4241  * Must not be invoked with interrupt sources disabled and
4242  * the hardware shutdown down.  Can sleep.
4243  */
4244 static int tg3_alloc_consistent(struct tg3 *tp)
4245 {
4246         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4247                                       (TG3_RX_RING_SIZE +
4248                                        TG3_RX_JUMBO_RING_SIZE)) +
4249                                      (sizeof(struct tx_ring_info) *
4250                                       TG3_TX_RING_SIZE),
4251                                      GFP_KERNEL);
4252         if (!tp->rx_std_buffers)
4253                 return -ENOMEM;
4254
4255         memset(tp->rx_std_buffers, 0,
4256                (sizeof(struct ring_info) *
4257                 (TG3_RX_RING_SIZE +
4258                  TG3_RX_JUMBO_RING_SIZE)) +
4259                (sizeof(struct tx_ring_info) *
4260                 TG3_TX_RING_SIZE));
4261
4262         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4263         tp->tx_buffers = (struct tx_ring_info *)
4264                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4265
4266         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4267                                           &tp->rx_std_mapping);
4268         if (!tp->rx_std)
4269                 goto err_out;
4270
4271         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4272                                             &tp->rx_jumbo_mapping);
4273
4274         if (!tp->rx_jumbo)
4275                 goto err_out;
4276
4277         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4278                                           &tp->rx_rcb_mapping);
4279         if (!tp->rx_rcb)
4280                 goto err_out;
4281
4282         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4283                                            &tp->tx_desc_mapping);
4284         if (!tp->tx_ring)
4285                 goto err_out;
4286
4287         tp->hw_status = pci_alloc_consistent(tp->pdev,
4288                                              TG3_HW_STATUS_SIZE,
4289                                              &tp->status_mapping);
4290         if (!tp->hw_status)
4291                 goto err_out;
4292
4293         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4294                                             sizeof(struct tg3_hw_stats),
4295                                             &tp->stats_mapping);
4296         if (!tp->hw_stats)
4297                 goto err_out;
4298
4299         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4300         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4301
4302         return 0;
4303
4304 err_out:
4305         tg3_free_consistent(tp);
4306         return -ENOMEM;
4307 }
4308
4309 #define MAX_WAIT_CNT 1000
4310
4311 /* To stop a block, clear the enable bit and poll till it
4312  * clears.  tp->lock is held.
4313  */
4314 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4315 {
4316         unsigned int i;
4317         u32 val;
4318
4319         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4320                 switch (ofs) {
4321                 case RCVLSC_MODE:
4322                 case DMAC_MODE:
4323                 case MBFREE_MODE:
4324                 case BUFMGR_MODE:
4325                 case MEMARB_MODE:
4326                         /* We can't enable/disable these bits of the
4327                          * 5705/5750, just say success.
4328                          */
4329                         return 0;
4330
4331                 default:
4332                         break;
4333                 };
4334         }
4335
4336         val = tr32(ofs);
4337         val &= ~enable_bit;
4338         tw32_f(ofs, val);
4339
4340         for (i = 0; i < MAX_WAIT_CNT; i++) {
4341                 udelay(100);
4342                 val = tr32(ofs);
4343                 if ((val & enable_bit) == 0)
4344                         break;
4345         }
4346
4347         if (i == MAX_WAIT_CNT && !silent) {
4348                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4349                        "ofs=%lx enable_bit=%x\n",
4350                        ofs, enable_bit);
4351                 return -ENODEV;
4352         }
4353
4354         return 0;
4355 }
4356
4357 /* tp->lock is held. */
4358 static int tg3_abort_hw(struct tg3 *tp, int silent)
4359 {
4360         int i, err;
4361
4362         tg3_disable_ints(tp);
4363
4364         tp->rx_mode &= ~RX_MODE_ENABLE;
4365         tw32_f(MAC_RX_MODE, tp->rx_mode);
4366         udelay(10);
4367
4368         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4371         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4372         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4373         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4374
4375         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4376         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4377         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4378         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4379         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4380         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4381         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4382
4383         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4384         tw32_f(MAC_MODE, tp->mac_mode);
4385         udelay(40);
4386
4387         tp->tx_mode &= ~TX_MODE_ENABLE;
4388         tw32_f(MAC_TX_MODE, tp->tx_mode);
4389
4390         for (i = 0; i < MAX_WAIT_CNT; i++) {
4391                 udelay(100);
4392                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4393                         break;
4394         }
4395         if (i >= MAX_WAIT_CNT) {
4396                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4397                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4398                        tp->dev->name, tr32(MAC_TX_MODE));
4399                 err |= -ENODEV;
4400         }
4401
4402         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4403         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4404         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4405
4406         tw32(FTQ_RESET, 0xffffffff);
4407         tw32(FTQ_RESET, 0x00000000);
4408
4409         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4410         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4411
4412         if (tp->hw_status)
4413                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4414         if (tp->hw_stats)
4415                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4416
4417         return err;
4418 }
4419
4420 /* tp->lock is held. */
4421 static int tg3_nvram_lock(struct tg3 *tp)
4422 {
4423         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4424                 int i;
4425
4426                 if (tp->nvram_lock_cnt == 0) {
4427                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4428                         for (i = 0; i < 8000; i++) {
4429                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4430                                         break;
4431                                 udelay(20);
4432                         }
4433                         if (i == 8000) {
4434                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4435                                 return -ENODEV;
4436                         }
4437                 }
4438                 tp->nvram_lock_cnt++;
4439         }
4440         return 0;
4441 }
4442
4443 /* tp->lock is held. */
4444 static void tg3_nvram_unlock(struct tg3 *tp)
4445 {
4446         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4447                 if (tp->nvram_lock_cnt > 0)
4448                         tp->nvram_lock_cnt--;
4449                 if (tp->nvram_lock_cnt == 0)
4450                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4451         }
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_enable_nvram_access(struct tg3 *tp)
4456 {
4457         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4458             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4459                 u32 nvaccess = tr32(NVRAM_ACCESS);
4460
4461                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_disable_nvram_access(struct tg3 *tp)
4467 {
4468         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4469             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4470                 u32 nvaccess = tr32(NVRAM_ACCESS);
4471
4472                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4473         }
4474 }
4475
4476 /* tp->lock is held. */
4477 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4478 {
4479         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4480                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4481                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4482
4483         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4484                 switch (kind) {
4485                 case RESET_KIND_INIT:
4486                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4487                                       DRV_STATE_START);
4488                         break;
4489
4490                 case RESET_KIND_SHUTDOWN:
4491                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4492                                       DRV_STATE_UNLOAD);
4493                         break;
4494
4495                 case RESET_KIND_SUSPEND:
4496                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4497                                       DRV_STATE_SUSPEND);
4498                         break;
4499
4500                 default:
4501                         break;
4502                 };
4503         }
4504 }
4505
4506 /* tp->lock is held. */
4507 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4508 {
4509         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4510                 switch (kind) {
4511                 case RESET_KIND_INIT:
4512                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4513                                       DRV_STATE_START_DONE);
4514                         break;
4515
4516                 case RESET_KIND_SHUTDOWN:
4517                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4518                                       DRV_STATE_UNLOAD_DONE);
4519                         break;
4520
4521                 default:
4522                         break;
4523                 };
4524         }
4525 }
4526
4527 /* tp->lock is held. */
4528 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4529 {
4530         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4531                 switch (kind) {
4532                 case RESET_KIND_INIT:
4533                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4534                                       DRV_STATE_START);
4535                         break;
4536
4537                 case RESET_KIND_SHUTDOWN:
4538                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4539                                       DRV_STATE_UNLOAD);
4540                         break;
4541
4542                 case RESET_KIND_SUSPEND:
4543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4544                                       DRV_STATE_SUSPEND);
4545                         break;
4546
4547                 default:
4548                         break;
4549                 };
4550         }
4551 }
4552
4553 static void tg3_stop_fw(struct tg3 *);
4554
4555 /* tp->lock is held. */
4556 static int tg3_chip_reset(struct tg3 *tp)
4557 {
4558         u32 val;
4559         void (*write_op)(struct tg3 *, u32, u32);
4560         int i;
4561
4562         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4563                 tg3_nvram_lock(tp);
4564                 /* No matching tg3_nvram_unlock() after this because
4565                  * chip reset below will undo the nvram lock.
4566                  */
4567                 tp->nvram_lock_cnt = 0;
4568         }
4569
4570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4571             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4573                 tw32(GRC_FASTBOOT_PC, 0);
4574
4575         /*
4576          * We must avoid the readl() that normally takes place.
4577          * It locks machines, causes machine checks, and other
4578          * fun things.  So, temporarily disable the 5701
4579          * hardware workaround, while we do the reset.
4580          */
4581         write_op = tp->write32;
4582         if (write_op == tg3_write_flush_reg32)
4583                 tp->write32 = tg3_write32;
4584
4585         /* do the reset */
4586         val = GRC_MISC_CFG_CORECLK_RESET;
4587
4588         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4589                 if (tr32(0x7e2c) == 0x60) {
4590                         tw32(0x7e2c, 0x20);
4591                 }
4592                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4593                         tw32(GRC_MISC_CFG, (1 << 29));
4594                         val |= (1 << 29);
4595                 }
4596         }
4597
4598         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4599                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4600         tw32(GRC_MISC_CFG, val);
4601
4602         /* restore 5701 hardware bug workaround write method */
4603         tp->write32 = write_op;
4604
4605         /* Unfortunately, we have to delay before the PCI read back.
4606          * Some 575X chips even will not respond to a PCI cfg access
4607          * when the reset command is given to the chip.
4608          *
4609          * How do these hardware designers expect things to work
4610          * properly if the PCI write is posted for a long period
4611          * of time?  It is always necessary to have some method by
4612          * which a register read back can occur to push the write
4613          * out which does the reset.
4614          *
4615          * For most tg3 variants the trick below was working.
4616          * Ho hum...
4617          */
4618         udelay(120);
4619
4620         /* Flush PCI posted writes.  The normal MMIO registers
4621          * are inaccessible at this time so this is the only
4622          * way to make this reliably (actually, this is no longer
4623          * the case, see above).  I tried to use indirect
4624          * register read/write but this upset some 5701 variants.
4625          */
4626         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4627
4628         udelay(120);
4629
4630         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4631                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4632                         int i;
4633                         u32 cfg_val;
4634
4635                         /* Wait for link training to complete.  */
4636                         for (i = 0; i < 5000; i++)
4637                                 udelay(100);
4638
4639                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4640                         pci_write_config_dword(tp->pdev, 0xc4,
4641                                                cfg_val | (1 << 15));
4642                 }
4643                 /* Set PCIE max payload size and clear error status.  */
4644                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4645         }
4646
4647         /* Re-enable indirect register accesses. */
4648         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4649                                tp->misc_host_ctrl);
4650
4651         /* Set MAX PCI retry to zero. */
4652         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4653         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4654             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4655                 val |= PCISTATE_RETRY_SAME_DMA;
4656         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4657
4658         pci_restore_state(tp->pdev);
4659
4660         /* Make sure PCI-X relaxed ordering bit is clear. */
4661         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4662         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4663         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4664
4665         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4666                 u32 val;
4667
4668                 /* Chip reset on 5780 will reset MSI enable bit,
4669                  * so need to restore it.
4670                  */
4671                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4672                         u16 ctrl;
4673
4674                         pci_read_config_word(tp->pdev,
4675                                              tp->msi_cap + PCI_MSI_FLAGS,
4676                                              &ctrl);
4677                         pci_write_config_word(tp->pdev,
4678                                               tp->msi_cap + PCI_MSI_FLAGS,
4679                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4680                         val = tr32(MSGINT_MODE);
4681                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4682                 }
4683
4684                 val = tr32(MEMARB_MODE);
4685                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4686
4687         } else
4688                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4689
4690         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4691                 tg3_stop_fw(tp);
4692                 tw32(0x5000, 0x400);
4693         }
4694
4695         tw32(GRC_MODE, tp->grc_mode);
4696
4697         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4698                 u32 val = tr32(0xc4);
4699
4700                 tw32(0xc4, val | (1 << 15));
4701         }
4702
4703         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4705                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4706                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4707                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4708                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4709         }
4710
4711         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4712                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4713                 tw32_f(MAC_MODE, tp->mac_mode);
4714         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4715                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4716                 tw32_f(MAC_MODE, tp->mac_mode);
4717         } else
4718                 tw32_f(MAC_MODE, 0);
4719         udelay(40);
4720
4721         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4722                 /* Wait for firmware initialization to complete. */
4723                 for (i = 0; i < 100000; i++) {
4724                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4725                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4726                                 break;
4727                         udelay(10);
4728                 }
4729                 if (i >= 100000) {
4730                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4731                                "firmware will not restart magic=%08x\n",
4732                                tp->dev->name, val);
4733                         return -ENODEV;
4734                 }
4735         }
4736
4737         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4738             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4739                 u32 val = tr32(0x7c00);
4740
4741                 tw32(0x7c00, val | (1 << 25));
4742         }
4743
4744         /* Reprobe ASF enable state.  */
4745         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4746         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4747         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4748         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4749                 u32 nic_cfg;
4750
4751                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4752                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4753                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4754                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4755                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4756                 }
4757         }
4758
4759         return 0;
4760 }
4761
4762 /* tp->lock is held. */
4763 static void tg3_stop_fw(struct tg3 *tp)
4764 {
4765         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4766                 u32 val;
4767                 int i;
4768
4769                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4770                 val = tr32(GRC_RX_CPU_EVENT);
4771                 val |= (1 << 14);
4772                 tw32(GRC_RX_CPU_EVENT, val);
4773
4774                 /* Wait for RX cpu to ACK the event.  */
4775                 for (i = 0; i < 100; i++) {
4776                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4777                                 break;
4778                         udelay(1);
4779                 }
4780         }
4781 }
4782
4783 /* tp->lock is held. */
4784 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4785 {
4786         int err;
4787
4788         tg3_stop_fw(tp);
4789
4790         tg3_write_sig_pre_reset(tp, kind);
4791
4792         tg3_abort_hw(tp, silent);
4793         err = tg3_chip_reset(tp);
4794
4795         tg3_write_sig_legacy(tp, kind);
4796         tg3_write_sig_post_reset(tp, kind);
4797
4798         if (err)
4799                 return err;
4800
4801         return 0;
4802 }
4803
4804 #define TG3_FW_RELEASE_MAJOR    0x0
4805 #define TG3_FW_RELASE_MINOR     0x0
4806 #define TG3_FW_RELEASE_FIX      0x0
4807 #define TG3_FW_START_ADDR       0x08000000
4808 #define TG3_FW_TEXT_ADDR        0x08000000
4809 #define TG3_FW_TEXT_LEN         0x9c0
4810 #define TG3_FW_RODATA_ADDR      0x080009c0
4811 #define TG3_FW_RODATA_LEN       0x60
4812 #define TG3_FW_DATA_ADDR        0x08000a40
4813 #define TG3_FW_DATA_LEN         0x20
4814 #define TG3_FW_SBSS_ADDR        0x08000a60
4815 #define TG3_FW_SBSS_LEN         0xc
4816 #define TG3_FW_BSS_ADDR         0x08000a70
4817 #define TG3_FW_BSS_LEN          0x10
4818
4819 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4820         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4821         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4822         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4823         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4824         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4825         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4826         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4827         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4828         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4829         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4830         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4831         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4832         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4833         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4834         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4835         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4836         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4837         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4838         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4839         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4840         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4841         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4842         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4843         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4844         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4845         0, 0, 0, 0, 0, 0,
4846         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4847         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4848         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4850         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4851         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4852         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4853         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4854         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4855         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4856         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4857         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4858         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4859         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4860         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4861         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4862         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4863         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4864         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4865         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4866         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4867         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4868         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4869         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4870         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4871         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4872         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4873         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4874         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4875         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4876         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4877         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4878         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4879         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4880         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4881         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4882         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4883         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4884         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4885         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4886         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4887         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4888         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4889         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4890         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4891         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4892         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4893         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4894         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4895         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4896         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4897         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4898         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4899         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4900         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4901         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4902         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4903         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4904         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4905         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4906         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4907         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4908         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4909         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4910         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4911 };
4912
4913 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4914         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4915         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4916         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4917         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4918         0x00000000
4919 };
4920
4921 #if 0 /* All zeros, don't eat up space with it. */
4922 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4923         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4924         0x00000000, 0x00000000, 0x00000000, 0x00000000
4925 };
4926 #endif
4927
4928 #define RX_CPU_SCRATCH_BASE     0x30000
4929 #define RX_CPU_SCRATCH_SIZE     0x04000
4930 #define TX_CPU_SCRATCH_BASE     0x34000
4931 #define TX_CPU_SCRATCH_SIZE     0x04000
4932
4933 /* tp->lock is held. */
4934 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4935 {
4936         int i;
4937
4938         if (offset == TX_CPU_BASE &&
4939             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4940                 BUG();
4941
4942         if (offset == RX_CPU_BASE) {
4943                 for (i = 0; i < 10000; i++) {
4944                         tw32(offset + CPU_STATE, 0xffffffff);
4945                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4946                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4947                                 break;
4948                 }
4949
4950                 tw32(offset + CPU_STATE, 0xffffffff);
4951                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4952                 udelay(10);
4953         } else {
4954                 for (i = 0; i < 10000; i++) {
4955                         tw32(offset + CPU_STATE, 0xffffffff);
4956                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4957                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4958                                 break;
4959                 }
4960         }
4961
4962         if (i >= 10000) {
4963                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4964                        "and %s CPU\n",
4965                        tp->dev->name,
4966                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4967                 return -ENODEV;
4968         }
4969
4970         /* Clear firmware's nvram arbitration. */
4971         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4972                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4973         return 0;
4974 }
4975
4976 struct fw_info {
4977         unsigned int text_base;
4978         unsigned int text_len;
4979         u32 *text_data;
4980         unsigned int rodata_base;
4981         unsigned int rodata_len;
4982         u32 *rodata_data;
4983         unsigned int data_base;
4984         unsigned int data_len;
4985         u32 *data_data;
4986 };
4987
4988 /* tp->lock is held. */
4989 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4990                                  int cpu_scratch_size, struct fw_info *info)
4991 {
4992         int err, lock_err, i;
4993         void (*write_op)(struct tg3 *, u32, u32);
4994
4995         if (cpu_base == TX_CPU_BASE &&
4996             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4997                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4998                        "TX cpu firmware on %s which is 5705.\n",
4999                        tp->dev->name);
5000                 return -EINVAL;
5001         }
5002
5003         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5004                 write_op = tg3_write_mem;
5005         else
5006                 write_op = tg3_write_indirect_reg32;
5007
5008         /* It is possible that bootcode is still loading at this point.
5009          * Get the nvram lock first before halting the cpu.
5010          */
5011         lock_err = tg3_nvram_lock(tp);
5012         err = tg3_halt_cpu(tp, cpu_base);
5013         if (!lock_err)
5014                 tg3_nvram_unlock(tp);
5015         if (err)
5016                 goto out;
5017
5018         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5019                 write_op(tp, cpu_scratch_base + i, 0);
5020         tw32(cpu_base + CPU_STATE, 0xffffffff);
5021         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5022         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5023                 write_op(tp, (cpu_scratch_base +
5024                               (info->text_base & 0xffff) +
5025                               (i * sizeof(u32))),
5026                          (info->text_data ?
5027                           info->text_data[i] : 0));
5028         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5029                 write_op(tp, (cpu_scratch_base +
5030                               (info->rodata_base & 0xffff) +
5031                               (i * sizeof(u32))),
5032                          (info->rodata_data ?
5033                           info->rodata_data[i] : 0));
5034         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5035                 write_op(tp, (cpu_scratch_base +
5036                               (info->data_base & 0xffff) +
5037                               (i * sizeof(u32))),
5038                          (info->data_data ?
5039                           info->data_data[i] : 0));
5040
5041         err = 0;
5042
5043 out:
5044         return err;
5045 }
5046
5047 /* tp->lock is held. */
5048 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5049 {
5050         struct fw_info info;
5051         int err, i;
5052
5053         info.text_base = TG3_FW_TEXT_ADDR;
5054         info.text_len = TG3_FW_TEXT_LEN;
5055         info.text_data = &tg3FwText[0];
5056         info.rodata_base = TG3_FW_RODATA_ADDR;
5057         info.rodata_len = TG3_FW_RODATA_LEN;
5058         info.rodata_data = &tg3FwRodata[0];
5059         info.data_base = TG3_FW_DATA_ADDR;
5060         info.data_len = TG3_FW_DATA_LEN;
5061         info.data_data = NULL;
5062
5063         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5064                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5065                                     &info);
5066         if (err)
5067                 return err;
5068
5069         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5070                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5071                                     &info);
5072         if (err)
5073                 return err;
5074
5075         /* Now startup only the RX cpu. */
5076         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5077         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5078
5079         for (i = 0; i < 5; i++) {
5080                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5081                         break;
5082                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5083                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5084                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5085                 udelay(1000);
5086         }
5087         if (i >= 5) {
5088                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5089                        "to set RX CPU PC, is %08x should be %08x\n",
5090                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5091                        TG3_FW_TEXT_ADDR);
5092                 return -ENODEV;
5093         }
5094         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5095         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5096
5097         return 0;
5098 }
5099
5100 #if TG3_TSO_SUPPORT != 0
5101
5102 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5103 #define TG3_TSO_FW_RELASE_MINOR         0x6
5104 #define TG3_TSO_FW_RELEASE_FIX          0x0
5105 #define TG3_TSO_FW_START_ADDR           0x08000000
5106 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5107 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5108 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5109 #define TG3_TSO_FW_RODATA_LEN           0x60
5110 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5111 #define TG3_TSO_FW_DATA_LEN             0x30
5112 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5113 #define TG3_TSO_FW_SBSS_LEN             0x2c
5114 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5115 #define TG3_TSO_FW_BSS_LEN              0x894
5116
5117 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5118         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5119         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5120         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5121         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5122         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5123         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5124         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5125         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5126         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5127         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5128         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5129         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5130         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5131         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5132         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5133         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5134         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5135         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5136         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5137         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5138         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5139         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5140         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5141         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5142         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5143         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5144         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5145         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5146         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5147         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5148         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5149         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5150         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5151         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5152         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5153         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5154         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5155         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5156         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5157         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5158         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5159         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5160         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5161         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5162         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5163         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5164         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5165         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5166         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5167         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5168         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5169         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5170         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5171         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5172         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5173         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5174         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5175         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5176         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5177         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5178         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5179         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5180         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5181         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5182         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5183         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5184         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5185         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5186         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5187         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5188         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5189         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5190         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5191         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5192         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5193         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5194         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5195         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5196         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5197         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5198         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5199         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5200         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5201         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5202         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5203         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5204         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5205         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5206         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5207         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5208         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5209         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5210         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5211         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5212         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5213         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5214         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5215         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5216         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5217         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5218         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5219         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5220         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5221         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5222         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5223         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5224         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5225         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5226         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5227         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5228         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5229         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5230         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5231         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5232         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5233         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5234         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5235         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5236         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5237         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5238         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5239         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5240         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5241         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5242         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5243         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5244         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5245         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5246         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5247         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5248         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5249         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5250         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5251         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5252         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5253         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5254         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5255         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5256         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5257         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5258         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5259         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5260         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5261         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5262         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5263         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5264         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5265         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5266         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5267         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5268         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5269         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5270         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5271         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5272         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5273         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5274         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5275         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5276         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5277         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5278         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5279         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5280         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5281         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5282         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5283         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5284         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5285         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5286         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5287         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5288         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5289         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5290         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5291         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5292         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5293         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5294         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5295         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5296         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5297         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5298         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5299         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5300         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5301         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5302         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5303         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5304         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5305         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5306         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5307         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5308         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5309         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5310         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5311         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5312         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5313         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5314         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5315         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5316         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5317         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5318         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5319         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5320         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5321         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5322         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5323         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5324         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5325         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5326         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5327         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5328         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5329         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5330         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5331         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5332         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5333         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5334         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5335         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5336         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5337         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5338         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5339         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5340         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5341         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5342         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5343         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5344         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5345         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5346         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5347         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5348         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5349         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5350         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5351         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5352         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5353         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5354         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5355         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5356         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5357         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5358         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5359         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5360         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5361         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5362         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5363         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5364         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5365         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5366         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5367         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5368         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5369         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5370         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5371         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5372         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5373         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5374         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5375         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5376         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5377         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5378         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5379         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5380         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5381         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5382         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5383         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5384         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5385         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5386         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5387         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5388         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5389         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5390         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5391         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5392         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5393         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5394         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5395         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5396         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5397         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5398         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5399         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5400         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5401         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5402 };
5403
5404 static u32 tg3TsoFwRodata[] = {
5405         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5406         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5407         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5408         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5409         0x00000000,
5410 };
5411
5412 static u32 tg3TsoFwData[] = {
5413         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5414         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5415         0x00000000,
5416 };
5417
5418 /* 5705 needs a special version of the TSO firmware.  */
5419 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5420 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5421 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5422 #define TG3_TSO5_FW_START_ADDR          0x00010000
5423 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5424 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5425 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5426 #define TG3_TSO5_FW_RODATA_LEN          0x50
5427 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5428 #define TG3_TSO5_FW_DATA_LEN            0x20
5429 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5430 #define TG3_TSO5_FW_SBSS_LEN            0x28
5431 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5432 #define TG3_TSO5_FW_BSS_LEN             0x88
5433
5434 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5435         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5436         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5437         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5438         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5439         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5440         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5441         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5442         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5443         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5444         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5445         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5446         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5447         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5448         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5449         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5450         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5451         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5452         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5453         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5454         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5455         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5456         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5457         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5458         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5459         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5460         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5461         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5462         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5463         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5464         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5465         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5466         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5467         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5468         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5469         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5470         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5471         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5472         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5473         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5474         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5475         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5476         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5477         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5478         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5479         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5480         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5481         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5482         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5483         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5484         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5485         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5486         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5487         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5488         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5489         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5490         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5491         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5492         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5493         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5494         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5495         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5496         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5497         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5498         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5499         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5500         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5501         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5502         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5503         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5504         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5505         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5506         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5507         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5508         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5509         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5510         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5511         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5512         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5513         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5514         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5515         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5516         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5517         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5518         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5519         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5520         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5521         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5522         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5523         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5524         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5525         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5526         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5527         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5528         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5529         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5530         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5531         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5532         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5533         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5534         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5535         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5536         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5537         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5538         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5539         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5540         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5541         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5542         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5543         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5544         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5545         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5546         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5547         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5548         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5549         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5550         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5551         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5552         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5553         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5554         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5555         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5556         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5557         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5558         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5559         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5560         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5561         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5562         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5563         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5564         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5565         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5566         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5567         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5568         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5569         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5570         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5571         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5572         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5573         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5574         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5575         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5576         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5577         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5578         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5579         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5580         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5581         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5582         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5583         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5584         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5585         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5586         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5587         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5588         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5589         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5594         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5595         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5596         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5597         0x00000000, 0x00000000, 0x00000000,
5598 };
5599
5600 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5601         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5602         0x00000000, 0x00000000, 0x00000000,
5603 };
5604
5605 /* tp->lock is held. */
5606 static int tg3_load_tso_firmware(struct tg3 *tp)
5607 {
5608         struct fw_info info;
5609         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5610         int err, i;
5611
5612         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5613                 return 0;
5614
5615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5616                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5617                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5618                 info.text_data = &tg3Tso5FwText[0];
5619                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5620                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5621                 info.rodata_data = &tg3Tso5FwRodata[0];
5622                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5623                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5624                 info.data_data = &tg3Tso5FwData[0];
5625                 cpu_base = RX_CPU_BASE;
5626                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5627                 cpu_scratch_size = (info.text_len +
5628                                     info.rodata_len +
5629                                     info.data_len +
5630                                     TG3_TSO5_FW_SBSS_LEN +
5631                                     TG3_TSO5_FW_BSS_LEN);
5632         } else {
5633                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5634                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5635                 info.text_data = &tg3TsoFwText[0];
5636                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5637                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5638                 info.rodata_data = &tg3TsoFwRodata[0];
5639                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5640                 info.data_len = TG3_TSO_FW_DATA_LEN;
5641                 info.data_data = &tg3TsoFwData[0];
5642                 cpu_base = TX_CPU_BASE;
5643                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5644                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5645         }
5646
5647         err = tg3_load_firmware_cpu(tp, cpu_base,
5648                                     cpu_scratch_base, cpu_scratch_size,
5649                                     &info);
5650         if (err)
5651                 return err;
5652
5653         /* Now startup the cpu. */
5654         tw32(cpu_base + CPU_STATE, 0xffffffff);
5655         tw32_f(cpu_base + CPU_PC,    info.text_base);
5656
5657         for (i = 0; i < 5; i++) {
5658                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5659                         break;
5660                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5661                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5662                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5663                 udelay(1000);
5664         }
5665         if (i >= 5) {
5666                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5667                        "to set CPU PC, is %08x should be %08x\n",
5668                        tp->dev->name, tr32(cpu_base + CPU_PC),
5669                        info.text_base);
5670                 return -ENODEV;
5671         }
5672         tw32(cpu_base + CPU_STATE, 0xffffffff);
5673         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5674         return 0;
5675 }
5676
5677 #endif /* TG3_TSO_SUPPORT != 0 */
5678
5679 /* tp->lock is held. */
5680 static void __tg3_set_mac_addr(struct tg3 *tp)
5681 {
5682         u32 addr_high, addr_low;
5683         int i;
5684
5685         addr_high = ((tp->dev->dev_addr[0] << 8) |
5686                      tp->dev->dev_addr[1]);
5687         addr_low = ((tp->dev->dev_addr[2] << 24) |
5688                     (tp->dev->dev_addr[3] << 16) |
5689                     (tp->dev->dev_addr[4] <<  8) |
5690                     (tp->dev->dev_addr[5] <<  0));
5691         for (i = 0; i < 4; i++) {
5692                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5693                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5694         }
5695
5696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5698                 for (i = 0; i < 12; i++) {
5699                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5700                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5701                 }
5702         }
5703
5704         addr_high = (tp->dev->dev_addr[0] +
5705                      tp->dev->dev_addr[1] +
5706                      tp->dev->dev_addr[2] +
5707                      tp->dev->dev_addr[3] +
5708                      tp->dev->dev_addr[4] +
5709                      tp->dev->dev_addr[5]) &
5710                 TX_BACKOFF_SEED_MASK;
5711         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5712 }
5713
5714 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5715 {
5716         struct tg3 *tp = netdev_priv(dev);
5717         struct sockaddr *addr = p;
5718
5719         if (!is_valid_ether_addr(addr->sa_data))
5720                 return -EINVAL;
5721
5722         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5723
5724         if (!netif_running(dev))
5725                 return 0;
5726
5727         spin_lock_bh(&tp->lock);
5728         __tg3_set_mac_addr(tp);
5729         spin_unlock_bh(&tp->lock);
5730
5731         return 0;
5732 }
5733
5734 /* tp->lock is held. */
5735 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5736                            dma_addr_t mapping, u32 maxlen_flags,
5737                            u32 nic_addr)
5738 {
5739         tg3_write_mem(tp,
5740                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5741                       ((u64) mapping >> 32));
5742         tg3_write_mem(tp,
5743                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5744                       ((u64) mapping & 0xffffffff));
5745         tg3_write_mem(tp,
5746                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5747                        maxlen_flags);
5748
5749         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5750                 tg3_write_mem(tp,
5751                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5752                               nic_addr);
5753 }
5754
5755 static void __tg3_set_rx_mode(struct net_device *);
5756 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5757 {
5758         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5759         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5760         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5761         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5762         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5763                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5764                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5765         }
5766         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5767         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5768         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5769                 u32 val = ec->stats_block_coalesce_usecs;
5770
5771                 if (!netif_carrier_ok(tp->dev))
5772                         val = 0;
5773
5774                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5775         }
5776 }
5777
5778 /* tp->lock is held. */
5779 static int tg3_reset_hw(struct tg3 *tp)
5780 {
5781         u32 val, rdmac_mode;
5782         int i, err, limit;
5783
5784         tg3_disable_ints(tp);
5785
5786         tg3_stop_fw(tp);
5787
5788         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5789
5790         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5791                 tg3_abort_hw(tp, 1);
5792         }
5793
5794         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5795                 tg3_phy_reset(tp);
5796
5797         err = tg3_chip_reset(tp);
5798         if (err)
5799                 return err;
5800
5801         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5802
5803         /* This works around an issue with Athlon chipsets on
5804          * B3 tigon3 silicon.  This bit has no effect on any
5805          * other revision.  But do not set this on PCI Express
5806          * chips.
5807          */
5808         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5809                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5810         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5811
5812         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5813             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5814                 val = tr32(TG3PCI_PCISTATE);
5815                 val |= PCISTATE_RETRY_SAME_DMA;
5816                 tw32(TG3PCI_PCISTATE, val);
5817         }
5818
5819         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5820                 /* Enable some hw fixes.  */
5821                 val = tr32(TG3PCI_MSI_DATA);
5822                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5823                 tw32(TG3PCI_MSI_DATA, val);
5824         }
5825
5826         /* Descriptor ring init may make accesses to the
5827          * NIC SRAM area to setup the TX descriptors, so we
5828          * can only do this after the hardware has been
5829          * successfully reset.
5830          */
5831         tg3_init_rings(tp);
5832
5833         /* This value is determined during the probe time DMA
5834          * engine test, tg3_test_dma.
5835          */
5836         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5837
5838         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5839                           GRC_MODE_4X_NIC_SEND_RINGS |
5840                           GRC_MODE_NO_TX_PHDR_CSUM |
5841                           GRC_MODE_NO_RX_PHDR_CSUM);
5842         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5843         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5844                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5845         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5846                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5847
5848         tw32(GRC_MODE,
5849              tp->grc_mode |
5850              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5851
5852         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5853         val = tr32(GRC_MISC_CFG);
5854         val &= ~0xff;
5855         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5856         tw32(GRC_MISC_CFG, val);
5857
5858         /* Initialize MBUF/DESC pool. */
5859         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5860                 /* Do nothing.  */
5861         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5862                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5863                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5864                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5865                 else
5866                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5867                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5868                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5869         }
5870 #if TG3_TSO_SUPPORT != 0
5871         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5872                 int fw_len;
5873
5874                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5875                           TG3_TSO5_FW_RODATA_LEN +
5876                           TG3_TSO5_FW_DATA_LEN +
5877                           TG3_TSO5_FW_SBSS_LEN +
5878                           TG3_TSO5_FW_BSS_LEN);
5879                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5880                 tw32(BUFMGR_MB_POOL_ADDR,
5881                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5882                 tw32(BUFMGR_MB_POOL_SIZE,
5883                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5884         }
5885 #endif
5886
5887         if (tp->dev->mtu <= ETH_DATA_LEN) {
5888                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5889                      tp->bufmgr_config.mbuf_read_dma_low_water);
5890                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5891                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5892                 tw32(BUFMGR_MB_HIGH_WATER,
5893                      tp->bufmgr_config.mbuf_high_water);
5894         } else {
5895                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5896                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5897                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5898                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5899                 tw32(BUFMGR_MB_HIGH_WATER,
5900                      tp->bufmgr_config.mbuf_high_water_jumbo);
5901         }
5902         tw32(BUFMGR_DMA_LOW_WATER,
5903              tp->bufmgr_config.dma_low_water);
5904         tw32(BUFMGR_DMA_HIGH_WATER,
5905              tp->bufmgr_config.dma_high_water);
5906
5907         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5908         for (i = 0; i < 2000; i++) {
5909                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5910                         break;
5911                 udelay(10);
5912         }
5913         if (i >= 2000) {
5914                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5915                        tp->dev->name);
5916                 return -ENODEV;
5917         }
5918
5919         /* Setup replenish threshold. */
5920         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5921
5922         /* Initialize TG3_BDINFO's at:
5923          *  RCVDBDI_STD_BD:     standard eth size rx ring
5924          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5925          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5926          *
5927          * like so:
5928          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5929          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5930          *                              ring attribute flags
5931          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5932          *
5933          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5934          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5935          *
5936          * The size of each ring is fixed in the firmware, but the location is
5937          * configurable.
5938          */
5939         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5940              ((u64) tp->rx_std_mapping >> 32));
5941         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5942              ((u64) tp->rx_std_mapping & 0xffffffff));
5943         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5944              NIC_SRAM_RX_BUFFER_DESC);
5945
5946         /* Don't even try to program the JUMBO/MINI buffer descriptor
5947          * configs on 5705.
5948          */
5949         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5950                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5951                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5952         } else {
5953                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5954                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5955
5956                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5957                      BDINFO_FLAGS_DISABLED);
5958
5959                 /* Setup replenish threshold. */
5960                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5961
5962                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5963                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5964                              ((u64) tp->rx_jumbo_mapping >> 32));
5965                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5966                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5967                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5968                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5969                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5970                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5971                 } else {
5972                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5973                              BDINFO_FLAGS_DISABLED);
5974                 }
5975
5976         }
5977
5978         /* There is only one send ring on 5705/5750, no need to explicitly
5979          * disable the others.
5980          */
5981         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5982                 /* Clear out send RCB ring in SRAM. */
5983                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5984                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5985                                       BDINFO_FLAGS_DISABLED);
5986         }
5987
5988         tp->tx_prod = 0;
5989         tp->tx_cons = 0;
5990         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5991         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5992
5993         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5994                        tp->tx_desc_mapping,
5995                        (TG3_TX_RING_SIZE <<
5996                         BDINFO_FLAGS_MAXLEN_SHIFT),
5997                        NIC_SRAM_TX_BUFFER_DESC);
5998
5999         /* There is only one receive return ring on 5705/5750, no need
6000          * to explicitly disable the others.
6001          */
6002         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6003                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6004                      i += TG3_BDINFO_SIZE) {
6005                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6006                                       BDINFO_FLAGS_DISABLED);
6007                 }
6008         }
6009
6010         tp->rx_rcb_ptr = 0;
6011         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6012
6013         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6014                        tp->rx_rcb_mapping,
6015                        (TG3_RX_RCB_RING_SIZE(tp) <<
6016                         BDINFO_FLAGS_MAXLEN_SHIFT),
6017                        0);
6018
6019         tp->rx_std_ptr = tp->rx_pending;
6020         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6021                      tp->rx_std_ptr);
6022
6023         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6024                                                 tp->rx_jumbo_pending : 0;
6025         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6026                      tp->rx_jumbo_ptr);
6027
6028         /* Initialize MAC address and backoff seed. */
6029         __tg3_set_mac_addr(tp);
6030
6031         /* MTU + ethernet header + FCS + optional VLAN tag */
6032         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6033
6034         /* The slot time is changed by tg3_setup_phy if we
6035          * run at gigabit with half duplex.
6036          */
6037         tw32(MAC_TX_LENGTHS,
6038              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6039              (6 << TX_LENGTHS_IPG_SHIFT) |
6040              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6041
6042         /* Receive rules. */
6043         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6044         tw32(RCVLPC_CONFIG, 0x0181);
6045
6046         /* Calculate RDMAC_MODE setting early, we need it to determine
6047          * the RCVLPC_STATE_ENABLE mask.
6048          */
6049         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6050                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6051                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6052                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6053                       RDMAC_MODE_LNGREAD_ENAB);
6054         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6055                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6056
6057         /* If statement applies to 5705 and 5750 PCI devices only */
6058         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6059              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6060             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6061                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6062                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6063                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6064                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6065                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6066                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6067                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6068                 }
6069         }
6070
6071         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6072                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6073
6074 #if TG3_TSO_SUPPORT != 0
6075         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6076                 rdmac_mode |= (1 << 27);
6077 #endif
6078
6079         /* Receive/send statistics. */
6080         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6081             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6082                 val = tr32(RCVLPC_STATS_ENABLE);
6083                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6084                 tw32(RCVLPC_STATS_ENABLE, val);
6085         } else {
6086                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6087         }
6088         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6089         tw32(SNDDATAI_STATSENAB, 0xffffff);
6090         tw32(SNDDATAI_STATSCTRL,
6091              (SNDDATAI_SCTRL_ENABLE |
6092               SNDDATAI_SCTRL_FASTUPD));
6093
6094         /* Setup host coalescing engine. */
6095         tw32(HOSTCC_MODE, 0);
6096         for (i = 0; i < 2000; i++) {
6097                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6098                         break;
6099                 udelay(10);
6100         }
6101
6102         __tg3_set_coalesce(tp, &tp->coal);
6103
6104         /* set status block DMA address */
6105         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6106              ((u64) tp->status_mapping >> 32));
6107         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6108              ((u64) tp->status_mapping & 0xffffffff));
6109
6110         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6111                 /* Status/statistics block address.  See tg3_timer,
6112                  * the tg3_periodic_fetch_stats call there, and
6113                  * tg3_get_stats to see how this works for 5705/5750 chips.
6114                  */
6115                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6116                      ((u64) tp->stats_mapping >> 32));
6117                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6118                      ((u64) tp->stats_mapping & 0xffffffff));
6119                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6120                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6121         }
6122
6123         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6124
6125         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6126         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6127         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6128                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6129
6130         /* Clear statistics/status block in chip, and status block in ram. */
6131         for (i = NIC_SRAM_STATS_BLK;
6132              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6133              i += sizeof(u32)) {
6134                 tg3_write_mem(tp, i, 0);
6135                 udelay(40);
6136         }
6137         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6138
6139         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6140                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6141                 /* reset to prevent losing 1st rx packet intermittently */
6142                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6143                 udelay(10);
6144         }
6145
6146         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6147                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6148         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6149         udelay(40);
6150
6151         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6152          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6153          * register to preserve the GPIO settings for LOMs. The GPIOs,
6154          * whether used as inputs or outputs, are set by boot code after
6155          * reset.
6156          */
6157         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6158                 u32 gpio_mask;
6159
6160                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6161                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6162
6163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6164                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6165                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6166
6167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6168                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6169
6170                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6171
6172                 /* GPIO1 must be driven high for eeprom write protect */
6173                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6174                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6175         }
6176         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6177         udelay(100);
6178
6179         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6180         tp->last_tag = 0;
6181
6182         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6183                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6184                 udelay(40);
6185         }
6186
6187         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6188                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6189                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6190                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6191                WDMAC_MODE_LNGREAD_ENAB);
6192
6193         /* If statement applies to 5705 and 5750 PCI devices only */
6194         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6195              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6196             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6197                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6198                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6199                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6200                         /* nothing */
6201                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6202                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6203                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6204                         val |= WDMAC_MODE_RX_ACCEL;
6205                 }
6206         }
6207
6208         /* Enable host coalescing bug fix */
6209         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6210             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6211                 val |= (1 << 29);
6212
6213         tw32_f(WDMAC_MODE, val);
6214         udelay(40);
6215
6216         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6217                 val = tr32(TG3PCI_X_CAPS);
6218                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6219                         val &= ~PCIX_CAPS_BURST_MASK;
6220                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6221                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6222                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6223                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6224                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6225                                 val |= (tp->split_mode_max_reqs <<
6226                                         PCIX_CAPS_SPLIT_SHIFT);
6227                 }
6228                 tw32(TG3PCI_X_CAPS, val);
6229         }
6230
6231         tw32_f(RDMAC_MODE, rdmac_mode);
6232         udelay(40);
6233
6234         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6235         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6236                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6237         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6238         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6239         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6240         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6241         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6242 #if TG3_TSO_SUPPORT != 0
6243         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6244                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6245 #endif
6246         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6247         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6248
6249         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6250                 err = tg3_load_5701_a0_firmware_fix(tp);
6251                 if (err)
6252                         return err;
6253         }
6254
6255 #if TG3_TSO_SUPPORT != 0
6256         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6257                 err = tg3_load_tso_firmware(tp);
6258                 if (err)
6259                         return err;
6260         }
6261 #endif
6262
6263         tp->tx_mode = TX_MODE_ENABLE;
6264         tw32_f(MAC_TX_MODE, tp->tx_mode);
6265         udelay(100);
6266
6267         tp->rx_mode = RX_MODE_ENABLE;
6268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6269                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6270
6271         tw32_f(MAC_RX_MODE, tp->rx_mode);
6272         udelay(10);
6273
6274         if (tp->link_config.phy_is_low_power) {
6275                 tp->link_config.phy_is_low_power = 0;
6276                 tp->link_config.speed = tp->link_config.orig_speed;
6277                 tp->link_config.duplex = tp->link_config.orig_duplex;
6278                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6279         }
6280
6281         tp->mi_mode = MAC_MI_MODE_BASE;
6282         tw32_f(MAC_MI_MODE, tp->mi_mode);
6283         udelay(80);
6284
6285         tw32(MAC_LED_CTRL, tp->led_ctrl);
6286
6287         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6288         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6289                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6290                 udelay(10);
6291         }
6292         tw32_f(MAC_RX_MODE, tp->rx_mode);
6293         udelay(10);
6294
6295         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6296                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6297                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6298                         /* Set drive transmission level to 1.2V  */
6299                         /* only if the signal pre-emphasis bit is not set  */
6300                         val = tr32(MAC_SERDES_CFG);
6301                         val &= 0xfffff000;
6302                         val |= 0x880;
6303                         tw32(MAC_SERDES_CFG, val);
6304                 }
6305                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6306                         tw32(MAC_SERDES_CFG, 0x616000);
6307         }
6308
6309         /* Prevent chip from dropping frames when flow control
6310          * is enabled.
6311          */
6312         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6313
6314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6315             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6316                 /* Use hardware link auto-negotiation */
6317                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6318         }
6319
6320         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6322                 u32 tmp;
6323
6324                 tmp = tr32(SERDES_RX_CTRL);
6325                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6326                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6327                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6328                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6329         }
6330
6331         err = tg3_setup_phy(tp, 1);
6332         if (err)
6333                 return err;
6334
6335         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6336                 u32 tmp;
6337
6338                 /* Clear CRC stats. */
6339                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6340                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6341                         tg3_readphy(tp, 0x14, &tmp);
6342                 }
6343         }
6344
6345         __tg3_set_rx_mode(tp->dev);
6346
6347         /* Initialize receive rules. */
6348         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6349         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6350         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6351         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6352
6353         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6354             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6355                 limit = 8;
6356         else
6357                 limit = 16;
6358         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6359                 limit -= 4;
6360         switch (limit) {
6361         case 16:
6362                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6363         case 15:
6364                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6365         case 14:
6366                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6367         case 13:
6368                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6369         case 12:
6370                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6371         case 11:
6372                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6373         case 10:
6374                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6375         case 9:
6376                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6377         case 8:
6378                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6379         case 7:
6380                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6381         case 6:
6382                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6383         case 5:
6384                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6385         case 4:
6386                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6387         case 3:
6388                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6389         case 2:
6390         case 1:
6391
6392         default:
6393                 break;
6394         };
6395
6396         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6397
6398         return 0;
6399 }
6400
6401 /* Called at device open time to get the chip ready for
6402  * packet processing.  Invoked with tp->lock held.
6403  */
6404 static int tg3_init_hw(struct tg3 *tp)
6405 {
6406         int err;
6407
6408         /* Force the chip into D0. */
6409         err = tg3_set_power_state(tp, PCI_D0);
6410         if (err)
6411                 goto out;
6412
6413         tg3_switch_clocks(tp);
6414
6415         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6416
6417         err = tg3_reset_hw(tp);
6418
6419 out:
6420         return err;
6421 }
6422
6423 #define TG3_STAT_ADD32(PSTAT, REG) \
6424 do {    u32 __val = tr32(REG); \
6425         (PSTAT)->low += __val; \
6426         if ((PSTAT)->low < __val) \
6427                 (PSTAT)->high += 1; \
6428 } while (0)
6429
6430 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6431 {
6432         struct tg3_hw_stats *sp = tp->hw_stats;
6433
6434         if (!netif_carrier_ok(tp->dev))
6435                 return;
6436
6437         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6438         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6439         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6440         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6441         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6442         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6443         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6444         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6445         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6446         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6447         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6448         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6449         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6450
6451         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6452         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6453         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6454         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6455         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6456         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6457         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6458         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6459         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6460         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6461         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6462         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6463         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6464         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6465 }
6466
6467 static void tg3_timer(unsigned long __opaque)
6468 {
6469         struct tg3 *tp = (struct tg3 *) __opaque;
6470
6471         if (tp->irq_sync)
6472                 goto restart_timer;
6473
6474         spin_lock(&tp->lock);
6475
6476         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6477                 /* All of this garbage is because when using non-tagged
6478                  * IRQ status the mailbox/status_block protocol the chip
6479                  * uses with the cpu is race prone.
6480                  */
6481                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6482                         tw32(GRC_LOCAL_CTRL,
6483                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6484                 } else {
6485                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6486                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6487                 }
6488
6489                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6490                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6491                         spin_unlock(&tp->lock);
6492                         schedule_work(&tp->reset_task);
6493                         return;
6494                 }
6495         }
6496
6497         /* This part only runs once per second. */
6498         if (!--tp->timer_counter) {
6499                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6500                         tg3_periodic_fetch_stats(tp);
6501
6502                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6503                         u32 mac_stat;
6504                         int phy_event;
6505
6506                         mac_stat = tr32(MAC_STATUS);
6507
6508                         phy_event = 0;
6509                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6510                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6511                                         phy_event = 1;
6512                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6513                                 phy_event = 1;
6514
6515                         if (phy_event)
6516                                 tg3_setup_phy(tp, 0);
6517                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6518                         u32 mac_stat = tr32(MAC_STATUS);
6519                         int need_setup = 0;
6520
6521                         if (netif_carrier_ok(tp->dev) &&
6522                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6523                                 need_setup = 1;
6524                         }
6525                         if (! netif_carrier_ok(tp->dev) &&
6526                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6527                                          MAC_STATUS_SIGNAL_DET))) {
6528                                 need_setup = 1;
6529                         }
6530                         if (need_setup) {
6531                                 tw32_f(MAC_MODE,
6532                                      (tp->mac_mode &
6533                                       ~MAC_MODE_PORT_MODE_MASK));
6534                                 udelay(40);
6535                                 tw32_f(MAC_MODE, tp->mac_mode);
6536                                 udelay(40);
6537                                 tg3_setup_phy(tp, 0);
6538                         }
6539                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6540                         tg3_serdes_parallel_detect(tp);
6541
6542                 tp->timer_counter = tp->timer_multiplier;
6543         }
6544
6545         /* Heartbeat is only sent once every 2 seconds.  */
6546         if (!--tp->asf_counter) {
6547                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6548                         u32 val;
6549
6550                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6551                                       FWCMD_NICDRV_ALIVE2);
6552                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6553                         /* 5 seconds timeout */
6554                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6555                         val = tr32(GRC_RX_CPU_EVENT);
6556                         val |= (1 << 14);
6557                         tw32(GRC_RX_CPU_EVENT, val);
6558                 }
6559                 tp->asf_counter = tp->asf_multiplier;
6560         }
6561
6562         spin_unlock(&tp->lock);
6563
6564 restart_timer:
6565         tp->timer.expires = jiffies + tp->timer_offset;
6566         add_timer(&tp->timer);
6567 }
6568
6569 static int tg3_request_irq(struct tg3 *tp)
6570 {
6571         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6572         unsigned long flags;
6573         struct net_device *dev = tp->dev;
6574
6575         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6576                 fn = tg3_msi;
6577                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6578                         fn = tg3_msi_1shot;
6579                 flags = SA_SAMPLE_RANDOM;
6580         } else {
6581                 fn = tg3_interrupt;
6582                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6583                         fn = tg3_interrupt_tagged;
6584                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6585         }
6586         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6587 }
6588
6589 static int tg3_test_interrupt(struct tg3 *tp)
6590 {
6591         struct net_device *dev = tp->dev;
6592         int err, i;
6593         u32 int_mbox = 0;
6594
6595         if (!netif_running(dev))
6596                 return -ENODEV;
6597
6598         tg3_disable_ints(tp);
6599
6600         free_irq(tp->pdev->irq, dev);
6601
6602         err = request_irq(tp->pdev->irq, tg3_test_isr,
6603                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6604         if (err)
6605                 return err;
6606
6607         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6608         tg3_enable_ints(tp);
6609
6610         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6611                HOSTCC_MODE_NOW);
6612
6613         for (i = 0; i < 5; i++) {
6614                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6615                                         TG3_64BIT_REG_LOW);
6616                 if (int_mbox != 0)
6617                         break;
6618                 msleep(10);
6619         }
6620
6621         tg3_disable_ints(tp);
6622
6623         free_irq(tp->pdev->irq, dev);
6624         
6625         err = tg3_request_irq(tp);
6626
6627         if (err)
6628                 return err;
6629
6630         if (int_mbox != 0)
6631                 return 0;
6632
6633         return -EIO;
6634 }
6635
6636 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6637  * successfully restored
6638  */
6639 static int tg3_test_msi(struct tg3 *tp)
6640 {
6641         struct net_device *dev = tp->dev;
6642         int err;
6643         u16 pci_cmd;
6644
6645         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6646                 return 0;
6647
6648         /* Turn off SERR reporting in case MSI terminates with Master
6649          * Abort.
6650          */
6651         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6652         pci_write_config_word(tp->pdev, PCI_COMMAND,
6653                               pci_cmd & ~PCI_COMMAND_SERR);
6654
6655         err = tg3_test_interrupt(tp);
6656
6657         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6658
6659         if (!err)
6660                 return 0;
6661
6662         /* other failures */
6663         if (err != -EIO)
6664                 return err;
6665
6666         /* MSI test failed, go back to INTx mode */
6667         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6668                "switching to INTx mode. Please report this failure to "
6669                "the PCI maintainer and include system chipset information.\n",
6670                        tp->dev->name);
6671
6672         free_irq(tp->pdev->irq, dev);
6673         pci_disable_msi(tp->pdev);
6674
6675         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6676
6677         err = tg3_request_irq(tp);
6678         if (err)
6679                 return err;
6680
6681         /* Need to reset the chip because the MSI cycle may have terminated
6682          * with Master Abort.
6683          */
6684         tg3_full_lock(tp, 1);
6685
6686         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6687         err = tg3_init_hw(tp);
6688
6689         tg3_full_unlock(tp);
6690
6691         if (err)
6692                 free_irq(tp->pdev->irq, dev);
6693
6694         return err;
6695 }
6696
6697 static int tg3_open(struct net_device *dev)
6698 {
6699         struct tg3 *tp = netdev_priv(dev);
6700         int err;
6701
6702         tg3_full_lock(tp, 0);
6703
6704         err = tg3_set_power_state(tp, PCI_D0);
6705         if (err)
6706                 return err;
6707
6708         tg3_disable_ints(tp);
6709         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6710
6711         tg3_full_unlock(tp);
6712
6713         /* The placement of this call is tied
6714          * to the setup and use of Host TX descriptors.
6715          */
6716         err = tg3_alloc_consistent(tp);
6717         if (err)
6718                 return err;
6719
6720         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6721             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6722             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6723             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6724               (tp->pdev_peer == tp->pdev))) {
6725                 /* All MSI supporting chips should support tagged
6726                  * status.  Assert that this is the case.
6727                  */
6728                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6729                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6730                                "Not using MSI.\n", tp->dev->name);
6731                 } else if (pci_enable_msi(tp->pdev) == 0) {
6732                         u32 msi_mode;
6733
6734                         msi_mode = tr32(MSGINT_MODE);
6735                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6736                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6737                 }
6738         }
6739         err = tg3_request_irq(tp);
6740
6741         if (err) {
6742                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6743                         pci_disable_msi(tp->pdev);
6744                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6745                 }
6746                 tg3_free_consistent(tp);
6747                 return err;
6748         }
6749
6750         tg3_full_lock(tp, 0);
6751
6752         err = tg3_init_hw(tp);
6753         if (err) {
6754                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6755                 tg3_free_rings(tp);
6756         } else {
6757                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6758                         tp->timer_offset = HZ;
6759                 else
6760                         tp->timer_offset = HZ / 10;
6761
6762                 BUG_ON(tp->timer_offset > HZ);
6763                 tp->timer_counter = tp->timer_multiplier =
6764                         (HZ / tp->timer_offset);
6765                 tp->asf_counter = tp->asf_multiplier =
6766                         ((HZ / tp->timer_offset) * 2);
6767
6768                 init_timer(&tp->timer);
6769                 tp->timer.expires = jiffies + tp->timer_offset;
6770                 tp->timer.data = (unsigned long) tp;
6771                 tp->timer.function = tg3_timer;
6772         }
6773
6774         tg3_full_unlock(tp);
6775
6776         if (err) {
6777                 free_irq(tp->pdev->irq, dev);
6778                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6779                         pci_disable_msi(tp->pdev);
6780                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6781                 }
6782                 tg3_free_consistent(tp);
6783                 return err;
6784         }
6785
6786         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6787                 err = tg3_test_msi(tp);
6788
6789                 if (err) {
6790                         tg3_full_lock(tp, 0);
6791
6792                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6793                                 pci_disable_msi(tp->pdev);
6794                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6795                         }
6796                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6797                         tg3_free_rings(tp);
6798                         tg3_free_consistent(tp);
6799
6800                         tg3_full_unlock(tp);
6801
6802                         return err;
6803                 }
6804
6805                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6806                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6807                                 u32 val = tr32(0x7c04);
6808
6809                                 tw32(0x7c04, val | (1 << 29));
6810                         }
6811                 }
6812         }
6813
6814         tg3_full_lock(tp, 0);
6815
6816         add_timer(&tp->timer);
6817         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6818         tg3_enable_ints(tp);
6819
6820         tg3_full_unlock(tp);
6821
6822         netif_start_queue(dev);
6823
6824         return 0;
6825 }
6826
6827 #if 0
6828 /*static*/ void tg3_dump_state(struct tg3 *tp)
6829 {
6830         u32 val32, val32_2, val32_3, val32_4, val32_5;
6831         u16 val16;
6832         int i;
6833
6834         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6835         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6836         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6837                val16, val32);
6838
6839         /* MAC block */
6840         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6841                tr32(MAC_MODE), tr32(MAC_STATUS));
6842         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6843                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6844         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6845                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6846         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6847                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6848
6849         /* Send data initiator control block */
6850         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6851                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6852         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6853                tr32(SNDDATAI_STATSCTRL));
6854
6855         /* Send data completion control block */
6856         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6857
6858         /* Send BD ring selector block */
6859         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6860                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6861
6862         /* Send BD initiator control block */
6863         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6864                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6865
6866         /* Send BD completion control block */
6867         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6868
6869         /* Receive list placement control block */
6870         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6871                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6872         printk("       RCVLPC_STATSCTRL[%08x]\n",
6873                tr32(RCVLPC_STATSCTRL));
6874
6875         /* Receive data and receive BD initiator control block */
6876         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6877                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6878
6879         /* Receive data completion control block */
6880         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6881                tr32(RCVDCC_MODE));
6882
6883         /* Receive BD initiator control block */
6884         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6885                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6886
6887         /* Receive BD completion control block */
6888         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6889                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6890
6891         /* Receive list selector control block */
6892         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6893                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6894
6895         /* Mbuf cluster free block */
6896         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6897                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6898
6899         /* Host coalescing control block */
6900         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6901                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6902         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6903                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6904                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6905         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6906                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6907                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6908         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6909                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6910         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6911                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6912
6913         /* Memory arbiter control block */
6914         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6915                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6916
6917         /* Buffer manager control block */
6918         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6919                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6920         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6921                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6922         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6923                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6924                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6925                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6926
6927         /* Read DMA control block */
6928         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6929                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6930
6931         /* Write DMA control block */
6932         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6933                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6934
6935         /* DMA completion block */
6936         printk("DEBUG: DMAC_MODE[%08x]\n",
6937                tr32(DMAC_MODE));
6938
6939         /* GRC block */
6940         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6941                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6942         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6943                tr32(GRC_LOCAL_CTRL));
6944
6945         /* TG3_BDINFOs */
6946         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6947                tr32(RCVDBDI_JUMBO_BD + 0x0),
6948                tr32(RCVDBDI_JUMBO_BD + 0x4),
6949                tr32(RCVDBDI_JUMBO_BD + 0x8),
6950                tr32(RCVDBDI_JUMBO_BD + 0xc));
6951         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6952                tr32(RCVDBDI_STD_BD + 0x0),
6953                tr32(RCVDBDI_STD_BD + 0x4),
6954                tr32(RCVDBDI_STD_BD + 0x8),
6955                tr32(RCVDBDI_STD_BD + 0xc));
6956         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6957                tr32(RCVDBDI_MINI_BD + 0x0),
6958                tr32(RCVDBDI_MINI_BD + 0x4),
6959                tr32(RCVDBDI_MINI_BD + 0x8),
6960                tr32(RCVDBDI_MINI_BD + 0xc));
6961
6962         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6963         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6964         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6965         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6966         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6967                val32, val32_2, val32_3, val32_4);
6968
6969         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6970         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6971         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6972         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6973         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6974                val32, val32_2, val32_3, val32_4);
6975
6976         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6977         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6978         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6979         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6980         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6981         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6982                val32, val32_2, val32_3, val32_4, val32_5);
6983
6984         /* SW status block */
6985         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6986                tp->hw_status->status,
6987                tp->hw_status->status_tag,
6988                tp->hw_status->rx_jumbo_consumer,
6989                tp->hw_status->rx_consumer,
6990                tp->hw_status->rx_mini_consumer,
6991                tp->hw_status->idx[0].rx_producer,
6992                tp->hw_status->idx[0].tx_consumer);
6993
6994         /* SW statistics block */
6995         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6996                ((u32 *)tp->hw_stats)[0],
6997                ((u32 *)tp->hw_stats)[1],
6998                ((u32 *)tp->hw_stats)[2],
6999                ((u32 *)tp->hw_stats)[3]);
7000
7001         /* Mailboxes */
7002         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7003                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7004                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7005                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7006                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7007
7008         /* NIC side send descriptors. */
7009         for (i = 0; i < 6; i++) {
7010                 unsigned long txd;
7011
7012                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7013                         + (i * sizeof(struct tg3_tx_buffer_desc));
7014                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7015                        i,
7016                        readl(txd + 0x0), readl(txd + 0x4),
7017                        readl(txd + 0x8), readl(txd + 0xc));
7018         }
7019
7020         /* NIC side RX descriptors. */
7021         for (i = 0; i < 6; i++) {
7022                 unsigned long rxd;
7023
7024                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7025                         + (i * sizeof(struct tg3_rx_buffer_desc));
7026                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7027                        i,
7028                        readl(rxd + 0x0), readl(rxd + 0x4),
7029                        readl(rxd + 0x8), readl(rxd + 0xc));
7030                 rxd += (4 * sizeof(u32));
7031                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7032                        i,
7033                        readl(rxd + 0x0), readl(rxd + 0x4),
7034                        readl(rxd + 0x8), readl(rxd + 0xc));
7035         }
7036
7037         for (i = 0; i < 6; i++) {
7038                 unsigned long rxd;
7039
7040                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7041                         + (i * sizeof(struct tg3_rx_buffer_desc));
7042                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7043                        i,
7044                        readl(rxd + 0x0), readl(rxd + 0x4),
7045                        readl(rxd + 0x8), readl(rxd + 0xc));
7046                 rxd += (4 * sizeof(u32));
7047                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7048                        i,
7049                        readl(rxd + 0x0), readl(rxd + 0x4),
7050                        readl(rxd + 0x8), readl(rxd + 0xc));
7051         }
7052 }
7053 #endif
7054
7055 static struct net_device_stats *tg3_get_stats(struct net_device *);
7056 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7057
7058 static int tg3_close(struct net_device *dev)
7059 {
7060         struct tg3 *tp = netdev_priv(dev);
7061
7062         /* Calling flush_scheduled_work() may deadlock because
7063          * linkwatch_event() may be on the workqueue and it will try to get
7064          * the rtnl_lock which we are holding.
7065          */
7066         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7067                 msleep(1);
7068
7069         netif_stop_queue(dev);
7070
7071         del_timer_sync(&tp->timer);
7072
7073         tg3_full_lock(tp, 1);
7074 #if 0
7075         tg3_dump_state(tp);
7076 #endif
7077
7078         tg3_disable_ints(tp);
7079
7080         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7081         tg3_free_rings(tp);
7082         tp->tg3_flags &=
7083                 ~(TG3_FLAG_INIT_COMPLETE |
7084                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7085
7086         tg3_full_unlock(tp);
7087
7088         free_irq(tp->pdev->irq, dev);
7089         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7090                 pci_disable_msi(tp->pdev);
7091                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7092         }
7093
7094         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7095                sizeof(tp->net_stats_prev));
7096         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7097                sizeof(tp->estats_prev));
7098
7099         tg3_free_consistent(tp);
7100
7101         tg3_set_power_state(tp, PCI_D3hot);
7102
7103         netif_carrier_off(tp->dev);
7104
7105         return 0;
7106 }
7107
7108 static inline unsigned long get_stat64(tg3_stat64_t *val)
7109 {
7110         unsigned long ret;
7111
7112 #if (BITS_PER_LONG == 32)
7113         ret = val->low;
7114 #else
7115         ret = ((u64)val->high << 32) | ((u64)val->low);
7116 #endif
7117         return ret;
7118 }
7119
7120 static unsigned long calc_crc_errors(struct tg3 *tp)
7121 {
7122         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7123
7124         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7125             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7126              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7127                 u32 val;
7128
7129                 spin_lock_bh(&tp->lock);
7130                 if (!tg3_readphy(tp, 0x1e, &val)) {
7131                         tg3_writephy(tp, 0x1e, val | 0x8000);
7132                         tg3_readphy(tp, 0x14, &val);
7133                 } else
7134                         val = 0;
7135                 spin_unlock_bh(&tp->lock);
7136
7137                 tp->phy_crc_errors += val;
7138
7139                 return tp->phy_crc_errors;
7140         }
7141
7142         return get_stat64(&hw_stats->rx_fcs_errors);
7143 }
7144
7145 #define ESTAT_ADD(member) \
7146         estats->member =        old_estats->member + \
7147                                 get_stat64(&hw_stats->member)
7148
7149 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7150 {
7151         struct tg3_ethtool_stats *estats = &tp->estats;
7152         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7153         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7154
7155         if (!hw_stats)
7156                 return old_estats;
7157
7158         ESTAT_ADD(rx_octets);
7159         ESTAT_ADD(rx_fragments);
7160         ESTAT_ADD(rx_ucast_packets);
7161         ESTAT_ADD(rx_mcast_packets);
7162         ESTAT_ADD(rx_bcast_packets);
7163         ESTAT_ADD(rx_fcs_errors);
7164         ESTAT_ADD(rx_align_errors);
7165         ESTAT_ADD(rx_xon_pause_rcvd);
7166         ESTAT_ADD(rx_xoff_pause_rcvd);
7167         ESTAT_ADD(rx_mac_ctrl_rcvd);
7168         ESTAT_ADD(rx_xoff_entered);
7169         ESTAT_ADD(rx_frame_too_long_errors);
7170         ESTAT_ADD(rx_jabbers);
7171         ESTAT_ADD(rx_undersize_packets);
7172         ESTAT_ADD(rx_in_length_errors);
7173         ESTAT_ADD(rx_out_length_errors);
7174         ESTAT_ADD(rx_64_or_less_octet_packets);
7175         ESTAT_ADD(rx_65_to_127_octet_packets);
7176         ESTAT_ADD(rx_128_to_255_octet_packets);
7177         ESTAT_ADD(rx_256_to_511_octet_packets);
7178         ESTAT_ADD(rx_512_to_1023_octet_packets);
7179         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7180         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7181         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7182         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7183         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7184
7185         ESTAT_ADD(tx_octets);
7186         ESTAT_ADD(tx_collisions);
7187         ESTAT_ADD(tx_xon_sent);
7188         ESTAT_ADD(tx_xoff_sent);
7189         ESTAT_ADD(tx_flow_control);
7190         ESTAT_ADD(tx_mac_errors);
7191         ESTAT_ADD(tx_single_collisions);
7192         ESTAT_ADD(tx_mult_collisions);
7193         ESTAT_ADD(tx_deferred);
7194         ESTAT_ADD(tx_excessive_collisions);
7195         ESTAT_ADD(tx_late_collisions);
7196         ESTAT_ADD(tx_collide_2times);
7197         ESTAT_ADD(tx_collide_3times);
7198         ESTAT_ADD(tx_collide_4times);
7199         ESTAT_ADD(tx_collide_5times);
7200         ESTAT_ADD(tx_collide_6times);
7201         ESTAT_ADD(tx_collide_7times);
7202         ESTAT_ADD(tx_collide_8times);
7203         ESTAT_ADD(tx_collide_9times);
7204         ESTAT_ADD(tx_collide_10times);
7205         ESTAT_ADD(tx_collide_11times);
7206         ESTAT_ADD(tx_collide_12times);
7207         ESTAT_ADD(tx_collide_13times);
7208         ESTAT_ADD(tx_collide_14times);
7209         ESTAT_ADD(tx_collide_15times);
7210         ESTAT_ADD(tx_ucast_packets);
7211         ESTAT_ADD(tx_mcast_packets);
7212         ESTAT_ADD(tx_bcast_packets);
7213         ESTAT_ADD(tx_carrier_sense_errors);
7214         ESTAT_ADD(tx_discards);
7215         ESTAT_ADD(tx_errors);
7216
7217         ESTAT_ADD(dma_writeq_full);
7218         ESTAT_ADD(dma_write_prioq_full);
7219         ESTAT_ADD(rxbds_empty);
7220         ESTAT_ADD(rx_discards);
7221         ESTAT_ADD(rx_errors);
7222         ESTAT_ADD(rx_threshold_hit);
7223
7224         ESTAT_ADD(dma_readq_full);
7225         ESTAT_ADD(dma_read_prioq_full);
7226         ESTAT_ADD(tx_comp_queue_full);
7227
7228         ESTAT_ADD(ring_set_send_prod_index);
7229         ESTAT_ADD(ring_status_update);
7230         ESTAT_ADD(nic_irqs);
7231         ESTAT_ADD(nic_avoided_irqs);
7232         ESTAT_ADD(nic_tx_threshold_hit);
7233
7234         return estats;
7235 }
7236
7237 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7238 {
7239         struct tg3 *tp = netdev_priv(dev);
7240         struct net_device_stats *stats = &tp->net_stats;
7241         struct net_device_stats *old_stats = &tp->net_stats_prev;
7242         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7243
7244         if (!hw_stats)
7245                 return old_stats;
7246
7247         stats->rx_packets = old_stats->rx_packets +
7248                 get_stat64(&hw_stats->rx_ucast_packets) +
7249                 get_stat64(&hw_stats->rx_mcast_packets) +
7250                 get_stat64(&hw_stats->rx_bcast_packets);
7251                 
7252         stats->tx_packets = old_stats->tx_packets +
7253                 get_stat64(&hw_stats->tx_ucast_packets) +
7254                 get_stat64(&hw_stats->tx_mcast_packets) +
7255                 get_stat64(&hw_stats->tx_bcast_packets);
7256
7257         stats->rx_bytes = old_stats->rx_bytes +
7258                 get_stat64(&hw_stats->rx_octets);
7259         stats->tx_bytes = old_stats->tx_bytes +
7260                 get_stat64(&hw_stats->tx_octets);
7261
7262         stats->rx_errors = old_stats->rx_errors +
7263                 get_stat64(&hw_stats->rx_errors);
7264         stats->tx_errors = old_stats->tx_errors +
7265                 get_stat64(&hw_stats->tx_errors) +
7266                 get_stat64(&hw_stats->tx_mac_errors) +
7267                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7268                 get_stat64(&hw_stats->tx_discards);
7269
7270         stats->multicast = old_stats->multicast +
7271                 get_stat64(&hw_stats->rx_mcast_packets);
7272         stats->collisions = old_stats->collisions +
7273                 get_stat64(&hw_stats->tx_collisions);
7274
7275         stats->rx_length_errors = old_stats->rx_length_errors +
7276                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7277                 get_stat64(&hw_stats->rx_undersize_packets);
7278
7279         stats->rx_over_errors = old_stats->rx_over_errors +
7280                 get_stat64(&hw_stats->rxbds_empty);
7281         stats->rx_frame_errors = old_stats->rx_frame_errors +
7282                 get_stat64(&hw_stats->rx_align_errors);
7283         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7284                 get_stat64(&hw_stats->tx_discards);
7285         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7286                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7287
7288         stats->rx_crc_errors = old_stats->rx_crc_errors +
7289                 calc_crc_errors(tp);
7290
7291         stats->rx_missed_errors = old_stats->rx_missed_errors +
7292                 get_stat64(&hw_stats->rx_discards);
7293
7294         return stats;
7295 }
7296
7297 static inline u32 calc_crc(unsigned char *buf, int len)
7298 {
7299         u32 reg;
7300         u32 tmp;
7301         int j, k;
7302
7303         reg = 0xffffffff;
7304
7305         for (j = 0; j < len; j++) {
7306                 reg ^= buf[j];
7307
7308                 for (k = 0; k < 8; k++) {
7309                         tmp = reg & 0x01;
7310
7311                         reg >>= 1;
7312
7313                         if (tmp) {
7314                                 reg ^= 0xedb88320;
7315                         }
7316                 }
7317         }
7318
7319         return ~reg;
7320 }
7321
7322 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7323 {
7324         /* accept or reject all multicast frames */
7325         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7326         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7327         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7328         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7329 }
7330
7331 static void __tg3_set_rx_mode(struct net_device *dev)
7332 {
7333         struct tg3 *tp = netdev_priv(dev);
7334         u32 rx_mode;
7335
7336         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7337                                   RX_MODE_KEEP_VLAN_TAG);
7338
7339         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7340          * flag clear.
7341          */
7342 #if TG3_VLAN_TAG_USED
7343         if (!tp->vlgrp &&
7344             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7345                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7346 #else
7347         /* By definition, VLAN is disabled always in this
7348          * case.
7349          */
7350         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7351                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7352 #endif
7353
7354         if (dev->flags & IFF_PROMISC) {
7355                 /* Promiscuous mode. */
7356                 rx_mode |= RX_MODE_PROMISC;
7357         } else if (dev->flags & IFF_ALLMULTI) {
7358                 /* Accept all multicast. */
7359                 tg3_set_multi (tp, 1);
7360         } else if (dev->mc_count < 1) {
7361                 /* Reject all multicast. */
7362                 tg3_set_multi (tp, 0);
7363         } else {
7364                 /* Accept one or more multicast(s). */
7365                 struct dev_mc_list *mclist;
7366                 unsigned int i;
7367                 u32 mc_filter[4] = { 0, };
7368                 u32 regidx;
7369                 u32 bit;
7370                 u32 crc;
7371
7372                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7373                      i++, mclist = mclist->next) {
7374
7375                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7376                         bit = ~crc & 0x7f;
7377                         regidx = (bit & 0x60) >> 5;
7378                         bit &= 0x1f;
7379                         mc_filter[regidx] |= (1 << bit);
7380                 }
7381
7382                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7383                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7384                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7385                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7386         }
7387
7388         if (rx_mode != tp->rx_mode) {
7389                 tp->rx_mode = rx_mode;
7390                 tw32_f(MAC_RX_MODE, rx_mode);
7391                 udelay(10);
7392         }
7393 }
7394
7395 static void tg3_set_rx_mode(struct net_device *dev)
7396 {
7397         struct tg3 *tp = netdev_priv(dev);
7398
7399         if (!netif_running(dev))
7400                 return;
7401
7402         tg3_full_lock(tp, 0);
7403         __tg3_set_rx_mode(dev);
7404         tg3_full_unlock(tp);
7405 }
7406
7407 #define TG3_REGDUMP_LEN         (32 * 1024)
7408
7409 static int tg3_get_regs_len(struct net_device *dev)
7410 {
7411         return TG3_REGDUMP_LEN;
7412 }
7413
7414 static void tg3_get_regs(struct net_device *dev,
7415                 struct ethtool_regs *regs, void *_p)
7416 {
7417         u32 *p = _p;
7418         struct tg3 *tp = netdev_priv(dev);
7419         u8 *orig_p = _p;
7420         int i;
7421
7422         regs->version = 0;
7423
7424         memset(p, 0, TG3_REGDUMP_LEN);
7425
7426         if (tp->link_config.phy_is_low_power)
7427                 return;
7428
7429         tg3_full_lock(tp, 0);
7430
7431 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7432 #define GET_REG32_LOOP(base,len)                \
7433 do {    p = (u32 *)(orig_p + (base));           \
7434         for (i = 0; i < len; i += 4)            \
7435                 __GET_REG32((base) + i);        \
7436 } while (0)
7437 #define GET_REG32_1(reg)                        \
7438 do {    p = (u32 *)(orig_p + (reg));            \
7439         __GET_REG32((reg));                     \
7440 } while (0)
7441
7442         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7443         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7444         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7445         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7446         GET_REG32_1(SNDDATAC_MODE);
7447         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7448         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7449         GET_REG32_1(SNDBDC_MODE);
7450         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7451         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7452         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7453         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7454         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7455         GET_REG32_1(RCVDCC_MODE);
7456         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7457         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7458         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7459         GET_REG32_1(MBFREE_MODE);
7460         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7461         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7462         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7463         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7464         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7465         GET_REG32_1(RX_CPU_MODE);
7466         GET_REG32_1(RX_CPU_STATE);
7467         GET_REG32_1(RX_CPU_PGMCTR);
7468         GET_REG32_1(RX_CPU_HWBKPT);
7469         GET_REG32_1(TX_CPU_MODE);
7470         GET_REG32_1(TX_CPU_STATE);
7471         GET_REG32_1(TX_CPU_PGMCTR);
7472         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7473         GET_REG32_LOOP(FTQ_RESET, 0x120);
7474         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7475         GET_REG32_1(DMAC_MODE);
7476         GET_REG32_LOOP(GRC_MODE, 0x4c);
7477         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7478                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7479
7480 #undef __GET_REG32
7481 #undef GET_REG32_LOOP
7482 #undef GET_REG32_1
7483
7484         tg3_full_unlock(tp);
7485 }
7486
7487 static int tg3_get_eeprom_len(struct net_device *dev)
7488 {
7489         struct tg3 *tp = netdev_priv(dev);
7490
7491         return tp->nvram_size;
7492 }
7493
7494 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7495 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7496
7497 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7498 {
7499         struct tg3 *tp = netdev_priv(dev);
7500         int ret;
7501         u8  *pd;
7502         u32 i, offset, len, val, b_offset, b_count;
7503
7504         if (tp->link_config.phy_is_low_power)
7505                 return -EAGAIN;
7506
7507         offset = eeprom->offset;
7508         len = eeprom->len;
7509         eeprom->len = 0;
7510
7511         eeprom->magic = TG3_EEPROM_MAGIC;
7512
7513         if (offset & 3) {
7514                 /* adjustments to start on required 4 byte boundary */
7515                 b_offset = offset & 3;
7516                 b_count = 4 - b_offset;
7517                 if (b_count > len) {
7518                         /* i.e. offset=1 len=2 */
7519                         b_count = len;
7520                 }
7521                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7522                 if (ret)
7523                         return ret;
7524                 val = cpu_to_le32(val);
7525                 memcpy(data, ((char*)&val) + b_offset, b_count);
7526                 len -= b_count;
7527                 offset += b_count;
7528                 eeprom->len += b_count;
7529         }
7530
7531         /* read bytes upto the last 4 byte boundary */
7532         pd = &data[eeprom->len];
7533         for (i = 0; i < (len - (len & 3)); i += 4) {
7534                 ret = tg3_nvram_read(tp, offset + i, &val);
7535                 if (ret) {
7536                         eeprom->len += i;
7537                         return ret;
7538                 }
7539                 val = cpu_to_le32(val);
7540                 memcpy(pd + i, &val, 4);
7541         }
7542         eeprom->len += i;
7543
7544         if (len & 3) {
7545                 /* read last bytes not ending on 4 byte boundary */
7546                 pd = &data[eeprom->len];
7547                 b_count = len & 3;
7548                 b_offset = offset + len - b_count;
7549                 ret = tg3_nvram_read(tp, b_offset, &val);
7550                 if (ret)
7551                         return ret;
7552                 val = cpu_to_le32(val);
7553                 memcpy(pd, ((char*)&val), b_count);
7554                 eeprom->len += b_count;
7555         }
7556         return 0;
7557 }
7558
7559 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7560
7561 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7562 {
7563         struct tg3 *tp = netdev_priv(dev);
7564         int ret;
7565         u32 offset, len, b_offset, odd_len, start, end;
7566         u8 *buf;
7567
7568         if (tp->link_config.phy_is_low_power)
7569                 return -EAGAIN;
7570
7571         if (eeprom->magic != TG3_EEPROM_MAGIC)
7572                 return -EINVAL;
7573
7574         offset = eeprom->offset;
7575         len = eeprom->len;
7576
7577         if ((b_offset = (offset & 3))) {
7578                 /* adjustments to start on required 4 byte boundary */
7579                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7580                 if (ret)
7581                         return ret;
7582                 start = cpu_to_le32(start);
7583                 len += b_offset;
7584                 offset &= ~3;
7585                 if (len < 4)
7586                         len = 4;
7587         }
7588
7589         odd_len = 0;
7590         if (len & 3) {
7591                 /* adjustments to end on required 4 byte boundary */
7592                 odd_len = 1;
7593                 len = (len + 3) & ~3;
7594                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7595                 if (ret)
7596                         return ret;
7597                 end = cpu_to_le32(end);
7598         }
7599
7600         buf = data;
7601         if (b_offset || odd_len) {
7602                 buf = kmalloc(len, GFP_KERNEL);
7603                 if (buf == 0)
7604                         return -ENOMEM;
7605                 if (b_offset)
7606                         memcpy(buf, &start, 4);
7607                 if (odd_len)
7608                         memcpy(buf+len-4, &end, 4);
7609                 memcpy(buf + b_offset, data, eeprom->len);
7610         }
7611
7612         ret = tg3_nvram_write_block(tp, offset, len, buf);
7613
7614         if (buf != data)
7615                 kfree(buf);
7616
7617         return ret;
7618 }
7619
7620 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7621 {
7622         struct tg3 *tp = netdev_priv(dev);
7623   
7624         cmd->supported = (SUPPORTED_Autoneg);
7625
7626         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7627                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7628                                    SUPPORTED_1000baseT_Full);
7629
7630         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7631                 cmd->supported |= (SUPPORTED_100baseT_Half |
7632                                   SUPPORTED_100baseT_Full |
7633                                   SUPPORTED_10baseT_Half |
7634                                   SUPPORTED_10baseT_Full |
7635                                   SUPPORTED_MII);
7636         else
7637                 cmd->supported |= SUPPORTED_FIBRE;
7638   
7639         cmd->advertising = tp->link_config.advertising;
7640         if (netif_running(dev)) {
7641                 cmd->speed = tp->link_config.active_speed;
7642                 cmd->duplex = tp->link_config.active_duplex;
7643         }
7644         cmd->port = 0;
7645         cmd->phy_address = PHY_ADDR;
7646         cmd->transceiver = 0;
7647         cmd->autoneg = tp->link_config.autoneg;
7648         cmd->maxtxpkt = 0;
7649         cmd->maxrxpkt = 0;
7650         return 0;
7651 }
7652   
7653 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7654 {
7655         struct tg3 *tp = netdev_priv(dev);
7656   
7657         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7658                 /* These are the only valid advertisement bits allowed.  */
7659                 if (cmd->autoneg == AUTONEG_ENABLE &&
7660                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7661                                           ADVERTISED_1000baseT_Full |
7662                                           ADVERTISED_Autoneg |
7663                                           ADVERTISED_FIBRE)))
7664                         return -EINVAL;
7665                 /* Fiber can only do SPEED_1000.  */
7666                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7667                          (cmd->speed != SPEED_1000))
7668                         return -EINVAL;
7669         /* Copper cannot force SPEED_1000.  */
7670         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7671                    (cmd->speed == SPEED_1000))
7672                 return -EINVAL;
7673         else if ((cmd->speed == SPEED_1000) &&
7674                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7675                 return -EINVAL;
7676
7677         tg3_full_lock(tp, 0);
7678
7679         tp->link_config.autoneg = cmd->autoneg;
7680         if (cmd->autoneg == AUTONEG_ENABLE) {
7681                 tp->link_config.advertising = cmd->advertising;
7682                 tp->link_config.speed = SPEED_INVALID;
7683                 tp->link_config.duplex = DUPLEX_INVALID;
7684         } else {
7685                 tp->link_config.advertising = 0;
7686                 tp->link_config.speed = cmd->speed;
7687                 tp->link_config.duplex = cmd->duplex;
7688         }
7689   
7690         if (netif_running(dev))
7691                 tg3_setup_phy(tp, 1);
7692
7693         tg3_full_unlock(tp);
7694   
7695         return 0;
7696 }
7697   
7698 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7699 {
7700         struct tg3 *tp = netdev_priv(dev);
7701   
7702         strcpy(info->driver, DRV_MODULE_NAME);
7703         strcpy(info->version, DRV_MODULE_VERSION);
7704         strcpy(info->fw_version, tp->fw_ver);
7705         strcpy(info->bus_info, pci_name(tp->pdev));
7706 }
7707   
7708 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7709 {
7710         struct tg3 *tp = netdev_priv(dev);
7711   
7712         wol->supported = WAKE_MAGIC;
7713         wol->wolopts = 0;
7714         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7715                 wol->wolopts = WAKE_MAGIC;
7716         memset(&wol->sopass, 0, sizeof(wol->sopass));
7717 }
7718   
7719 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7720 {
7721         struct tg3 *tp = netdev_priv(dev);
7722   
7723         if (wol->wolopts & ~WAKE_MAGIC)
7724                 return -EINVAL;
7725         if ((wol->wolopts & WAKE_MAGIC) &&
7726             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7727             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7728                 return -EINVAL;
7729   
7730         spin_lock_bh(&tp->lock);
7731         if (wol->wolopts & WAKE_MAGIC)
7732                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7733         else
7734                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7735         spin_unlock_bh(&tp->lock);
7736   
7737         return 0;
7738 }
7739   
7740 static u32 tg3_get_msglevel(struct net_device *dev)
7741 {
7742         struct tg3 *tp = netdev_priv(dev);
7743         return tp->msg_enable;
7744 }
7745   
7746 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7747 {
7748         struct tg3 *tp = netdev_priv(dev);
7749         tp->msg_enable = value;
7750 }
7751   
7752 #if TG3_TSO_SUPPORT != 0
7753 static int tg3_set_tso(struct net_device *dev, u32 value)
7754 {
7755         struct tg3 *tp = netdev_priv(dev);
7756
7757         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7758                 if (value)
7759                         return -EINVAL;
7760                 return 0;
7761         }
7762         return ethtool_op_set_tso(dev, value);
7763 }
7764 #endif
7765   
7766 static int tg3_nway_reset(struct net_device *dev)
7767 {
7768         struct tg3 *tp = netdev_priv(dev);
7769         u32 bmcr;
7770         int r;
7771   
7772         if (!netif_running(dev))
7773                 return -EAGAIN;
7774
7775         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7776                 return -EINVAL;
7777
7778         spin_lock_bh(&tp->lock);
7779         r = -EINVAL;
7780         tg3_readphy(tp, MII_BMCR, &bmcr);
7781         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7782             ((bmcr & BMCR_ANENABLE) ||
7783              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7784                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7785                                            BMCR_ANENABLE);
7786                 r = 0;
7787         }
7788         spin_unlock_bh(&tp->lock);
7789   
7790         return r;
7791 }
7792   
7793 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7794 {
7795         struct tg3 *tp = netdev_priv(dev);
7796   
7797         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7798         ering->rx_mini_max_pending = 0;
7799         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7800                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7801         else
7802                 ering->rx_jumbo_max_pending = 0;
7803
7804         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7805
7806         ering->rx_pending = tp->rx_pending;
7807         ering->rx_mini_pending = 0;
7808         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7809                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7810         else
7811                 ering->rx_jumbo_pending = 0;
7812
7813         ering->tx_pending = tp->tx_pending;
7814 }
7815   
7816 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7817 {
7818         struct tg3 *tp = netdev_priv(dev);
7819         int irq_sync = 0;
7820   
7821         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7822             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7823             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7824                 return -EINVAL;
7825   
7826         if (netif_running(dev)) {
7827                 tg3_netif_stop(tp);
7828                 irq_sync = 1;
7829         }
7830
7831         tg3_full_lock(tp, irq_sync);
7832   
7833         tp->rx_pending = ering->rx_pending;
7834
7835         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7836             tp->rx_pending > 63)
7837                 tp->rx_pending = 63;
7838         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7839         tp->tx_pending = ering->tx_pending;
7840
7841         if (netif_running(dev)) {
7842                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7843                 tg3_init_hw(tp);
7844                 tg3_netif_start(tp);
7845         }
7846
7847         tg3_full_unlock(tp);
7848   
7849         return 0;
7850 }
7851   
7852 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7853 {
7854         struct tg3 *tp = netdev_priv(dev);
7855   
7856         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7857         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7858         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7859 }
7860   
7861 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7862 {
7863         struct tg3 *tp = netdev_priv(dev);
7864         int irq_sync = 0;
7865   
7866         if (netif_running(dev)) {
7867                 tg3_netif_stop(tp);
7868                 irq_sync = 1;
7869         }
7870
7871         tg3_full_lock(tp, irq_sync);
7872
7873         if (epause->autoneg)
7874                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7875         else
7876                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7877         if (epause->rx_pause)
7878                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7879         else
7880                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7881         if (epause->tx_pause)
7882                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7883         else
7884                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7885
7886         if (netif_running(dev)) {
7887                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7888                 tg3_init_hw(tp);
7889                 tg3_netif_start(tp);
7890         }
7891
7892         tg3_full_unlock(tp);
7893   
7894         return 0;
7895 }
7896   
7897 static u32 tg3_get_rx_csum(struct net_device *dev)
7898 {
7899         struct tg3 *tp = netdev_priv(dev);
7900         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7901 }
7902   
7903 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7904 {
7905         struct tg3 *tp = netdev_priv(dev);
7906   
7907         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7908                 if (data != 0)
7909                         return -EINVAL;
7910                 return 0;
7911         }
7912   
7913         spin_lock_bh(&tp->lock);
7914         if (data)
7915                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7916         else
7917                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7918         spin_unlock_bh(&tp->lock);
7919   
7920         return 0;
7921 }
7922   
7923 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7924 {
7925         struct tg3 *tp = netdev_priv(dev);
7926   
7927         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7928                 if (data != 0)
7929                         return -EINVAL;
7930                 return 0;
7931         }
7932   
7933         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7935                 ethtool_op_set_tx_hw_csum(dev, data);
7936         else
7937                 ethtool_op_set_tx_csum(dev, data);
7938
7939         return 0;
7940 }
7941
7942 static int tg3_get_stats_count (struct net_device *dev)
7943 {
7944         return TG3_NUM_STATS;
7945 }
7946
7947 static int tg3_get_test_count (struct net_device *dev)
7948 {
7949         return TG3_NUM_TEST;
7950 }
7951
7952 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7953 {
7954         switch (stringset) {
7955         case ETH_SS_STATS:
7956                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7957                 break;
7958         case ETH_SS_TEST:
7959                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7960                 break;
7961         default:
7962                 WARN_ON(1);     /* we need a WARN() */
7963                 break;
7964         }
7965 }
7966
7967 static int tg3_phys_id(struct net_device *dev, u32 data)
7968 {
7969         struct tg3 *tp = netdev_priv(dev);
7970         int i;
7971
7972         if (!netif_running(tp->dev))
7973                 return -EAGAIN;
7974
7975         if (data == 0)
7976                 data = 2;
7977
7978         for (i = 0; i < (data * 2); i++) {
7979                 if ((i % 2) == 0)
7980                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7981                                            LED_CTRL_1000MBPS_ON |
7982                                            LED_CTRL_100MBPS_ON |
7983                                            LED_CTRL_10MBPS_ON |
7984                                            LED_CTRL_TRAFFIC_OVERRIDE |
7985                                            LED_CTRL_TRAFFIC_BLINK |
7986                                            LED_CTRL_TRAFFIC_LED);
7987         
7988                 else
7989                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7990                                            LED_CTRL_TRAFFIC_OVERRIDE);
7991
7992                 if (msleep_interruptible(500))
7993                         break;
7994         }
7995         tw32(MAC_LED_CTRL, tp->led_ctrl);
7996         return 0;
7997 }
7998
7999 static void tg3_get_ethtool_stats (struct net_device *dev,
8000                                    struct ethtool_stats *estats, u64 *tmp_stats)
8001 {
8002         struct tg3 *tp = netdev_priv(dev);
8003         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8004 }
8005
8006 #define NVRAM_TEST_SIZE 0x100
8007 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8008
8009 static int tg3_test_nvram(struct tg3 *tp)
8010 {
8011         u32 *buf, csum, magic;
8012         int i, j, err = 0, size;
8013
8014         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8015                 return -EIO;
8016
8017         if (magic == TG3_EEPROM_MAGIC)
8018                 size = NVRAM_TEST_SIZE;
8019         else if ((magic & 0xff000000) == 0xa5000000) {
8020                 if ((magic & 0xe00000) == 0x200000)
8021                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8022                 else
8023                         return 0;
8024         } else
8025                 return -EIO;
8026
8027         buf = kmalloc(size, GFP_KERNEL);
8028         if (buf == NULL)
8029                 return -ENOMEM;
8030
8031         err = -EIO;
8032         for (i = 0, j = 0; i < size; i += 4, j++) {
8033                 u32 val;
8034
8035                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8036                         break;
8037                 buf[j] = cpu_to_le32(val);
8038         }
8039         if (i < size)
8040                 goto out;
8041
8042         /* Selfboot format */
8043         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8044                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8045
8046                 for (i = 0; i < size; i++)
8047                         csum8 += buf8[i];
8048
8049                 if (csum8 == 0)
8050                         return 0;
8051                 return -EIO;
8052         }
8053
8054         /* Bootstrap checksum at offset 0x10 */
8055         csum = calc_crc((unsigned char *) buf, 0x10);
8056         if(csum != cpu_to_le32(buf[0x10/4]))
8057                 goto out;
8058
8059         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8060         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8061         if (csum != cpu_to_le32(buf[0xfc/4]))
8062                  goto out;
8063
8064         err = 0;
8065
8066 out:
8067         kfree(buf);
8068         return err;
8069 }
8070
8071 #define TG3_SERDES_TIMEOUT_SEC  2
8072 #define TG3_COPPER_TIMEOUT_SEC  6
8073
8074 static int tg3_test_link(struct tg3 *tp)
8075 {
8076         int i, max;
8077
8078         if (!netif_running(tp->dev))
8079                 return -ENODEV;
8080
8081         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8082                 max = TG3_SERDES_TIMEOUT_SEC;
8083         else
8084                 max = TG3_COPPER_TIMEOUT_SEC;
8085
8086         for (i = 0; i < max; i++) {
8087                 if (netif_carrier_ok(tp->dev))
8088                         return 0;
8089
8090                 if (msleep_interruptible(1000))
8091                         break;
8092         }
8093
8094         return -EIO;
8095 }
8096
8097 /* Only test the commonly used registers */
8098 static int tg3_test_registers(struct tg3 *tp)
8099 {
8100         int i, is_5705;
8101         u32 offset, read_mask, write_mask, val, save_val, read_val;
8102         static struct {
8103                 u16 offset;
8104                 u16 flags;
8105 #define TG3_FL_5705     0x1
8106 #define TG3_FL_NOT_5705 0x2
8107 #define TG3_FL_NOT_5788 0x4
8108                 u32 read_mask;
8109                 u32 write_mask;
8110         } reg_tbl[] = {
8111                 /* MAC Control Registers */
8112                 { MAC_MODE, TG3_FL_NOT_5705,
8113                         0x00000000, 0x00ef6f8c },
8114                 { MAC_MODE, TG3_FL_5705,
8115                         0x00000000, 0x01ef6b8c },
8116                 { MAC_STATUS, TG3_FL_NOT_5705,
8117                         0x03800107, 0x00000000 },
8118                 { MAC_STATUS, TG3_FL_5705,
8119                         0x03800100, 0x00000000 },
8120                 { MAC_ADDR_0_HIGH, 0x0000,
8121                         0x00000000, 0x0000ffff },
8122                 { MAC_ADDR_0_LOW, 0x0000,
8123                         0x00000000, 0xffffffff },
8124                 { MAC_RX_MTU_SIZE, 0x0000,
8125                         0x00000000, 0x0000ffff },
8126                 { MAC_TX_MODE, 0x0000,
8127                         0x00000000, 0x00000070 },
8128                 { MAC_TX_LENGTHS, 0x0000,
8129                         0x00000000, 0x00003fff },
8130                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8131                         0x00000000, 0x000007fc },
8132                 { MAC_RX_MODE, TG3_FL_5705,
8133                         0x00000000, 0x000007dc },
8134                 { MAC_HASH_REG_0, 0x0000,
8135                         0x00000000, 0xffffffff },
8136                 { MAC_HASH_REG_1, 0x0000,
8137                         0x00000000, 0xffffffff },
8138                 { MAC_HASH_REG_2, 0x0000,
8139                         0x00000000, 0xffffffff },
8140                 { MAC_HASH_REG_3, 0x0000,
8141                         0x00000000, 0xffffffff },
8142
8143                 /* Receive Data and Receive BD Initiator Control Registers. */
8144                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8145                         0x00000000, 0xffffffff },
8146                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8147                         0x00000000, 0xffffffff },
8148                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8149                         0x00000000, 0x00000003 },
8150                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8151                         0x00000000, 0xffffffff },
8152                 { RCVDBDI_STD_BD+0, 0x0000,
8153                         0x00000000, 0xffffffff },
8154                 { RCVDBDI_STD_BD+4, 0x0000,
8155                         0x00000000, 0xffffffff },
8156                 { RCVDBDI_STD_BD+8, 0x0000,
8157                         0x00000000, 0xffff0002 },
8158                 { RCVDBDI_STD_BD+0xc, 0x0000,
8159                         0x00000000, 0xffffffff },
8160         
8161                 /* Receive BD Initiator Control Registers. */
8162                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8163                         0x00000000, 0xffffffff },
8164                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8165                         0x00000000, 0x000003ff },
8166                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8167                         0x00000000, 0xffffffff },
8168         
8169                 /* Host Coalescing Control Registers. */
8170                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8171                         0x00000000, 0x00000004 },
8172                 { HOSTCC_MODE, TG3_FL_5705,
8173                         0x00000000, 0x000000f6 },
8174                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8177                         0x00000000, 0x000003ff },
8178                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8179                         0x00000000, 0xffffffff },
8180                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8181                         0x00000000, 0x000003ff },
8182                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8183                         0x00000000, 0xffffffff },
8184                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8185                         0x00000000, 0x000000ff },
8186                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8187                         0x00000000, 0xffffffff },
8188                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8189                         0x00000000, 0x000000ff },
8190                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8191                         0x00000000, 0xffffffff },
8192                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8193                         0x00000000, 0xffffffff },
8194                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8195                         0x00000000, 0xffffffff },
8196                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8197                         0x00000000, 0x000000ff },
8198                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8199                         0x00000000, 0xffffffff },
8200                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8201                         0x00000000, 0x000000ff },
8202                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8203                         0x00000000, 0xffffffff },
8204                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8205                         0x00000000, 0xffffffff },
8206                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8207                         0x00000000, 0xffffffff },
8208                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8209                         0x00000000, 0xffffffff },
8210                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8211                         0x00000000, 0xffffffff },
8212                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8213                         0xffffffff, 0x00000000 },
8214                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8215                         0xffffffff, 0x00000000 },
8216
8217                 /* Buffer Manager Control Registers. */
8218                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8219                         0x00000000, 0x007fff80 },
8220                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8221                         0x00000000, 0x007fffff },
8222                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8223                         0x00000000, 0x0000003f },
8224                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8225                         0x00000000, 0x000001ff },
8226                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8227                         0x00000000, 0x000001ff },
8228                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8229                         0xffffffff, 0x00000000 },
8230                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8231                         0xffffffff, 0x00000000 },
8232         
8233                 /* Mailbox Registers */
8234                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8235                         0x00000000, 0x000001ff },
8236                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8237                         0x00000000, 0x000001ff },
8238                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8239                         0x00000000, 0x000007ff },
8240                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8241                         0x00000000, 0x000001ff },
8242
8243                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8244         };
8245
8246         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8247                 is_5705 = 1;
8248         else
8249                 is_5705 = 0;
8250
8251         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8252                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8253                         continue;
8254
8255                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8256                         continue;
8257
8258                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8259                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8260                         continue;
8261
8262                 offset = (u32) reg_tbl[i].offset;
8263                 read_mask = reg_tbl[i].read_mask;
8264                 write_mask = reg_tbl[i].write_mask;
8265
8266                 /* Save the original register content */
8267                 save_val = tr32(offset);
8268
8269                 /* Determine the read-only value. */
8270                 read_val = save_val & read_mask;
8271
8272                 /* Write zero to the register, then make sure the read-only bits
8273                  * are not changed and the read/write bits are all zeros.
8274                  */
8275                 tw32(offset, 0);
8276
8277                 val = tr32(offset);
8278
8279                 /* Test the read-only and read/write bits. */
8280                 if (((val & read_mask) != read_val) || (val & write_mask))
8281                         goto out;
8282
8283                 /* Write ones to all the bits defined by RdMask and WrMask, then
8284                  * make sure the read-only bits are not changed and the
8285                  * read/write bits are all ones.
8286                  */
8287                 tw32(offset, read_mask | write_mask);
8288
8289                 val = tr32(offset);
8290
8291                 /* Test the read-only bits. */
8292                 if ((val & read_mask) != read_val)
8293                         goto out;
8294
8295                 /* Test the read/write bits. */
8296                 if ((val & write_mask) != write_mask)
8297                         goto out;
8298
8299                 tw32(offset, save_val);
8300         }
8301
8302         return 0;
8303
8304 out:
8305         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8306         tw32(offset, save_val);
8307         return -EIO;
8308 }
8309
8310 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8311 {
8312         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8313         int i;
8314         u32 j;
8315
8316         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8317                 for (j = 0; j < len; j += 4) {
8318                         u32 val;
8319
8320                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8321                         tg3_read_mem(tp, offset + j, &val);
8322                         if (val != test_pattern[i])
8323                                 return -EIO;
8324                 }
8325         }
8326         return 0;
8327 }
8328
8329 static int tg3_test_memory(struct tg3 *tp)
8330 {
8331         static struct mem_entry {
8332                 u32 offset;
8333                 u32 len;
8334         } mem_tbl_570x[] = {
8335                 { 0x00000000, 0x00b50},
8336                 { 0x00002000, 0x1c000},
8337                 { 0xffffffff, 0x00000}
8338         }, mem_tbl_5705[] = {
8339                 { 0x00000100, 0x0000c},
8340                 { 0x00000200, 0x00008},
8341                 { 0x00004000, 0x00800},
8342                 { 0x00006000, 0x01000},
8343                 { 0x00008000, 0x02000},
8344                 { 0x00010000, 0x0e000},
8345                 { 0xffffffff, 0x00000}
8346         }, mem_tbl_5755[] = {
8347                 { 0x00000200, 0x00008},
8348                 { 0x00004000, 0x00800},
8349                 { 0x00006000, 0x00800},
8350                 { 0x00008000, 0x02000},
8351                 { 0x00010000, 0x0c000},
8352                 { 0xffffffff, 0x00000}
8353         };
8354         struct mem_entry *mem_tbl;
8355         int err = 0;
8356         int i;
8357
8358         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8359                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8360                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8361                         mem_tbl = mem_tbl_5755;
8362                 else
8363                         mem_tbl = mem_tbl_5705;
8364         } else
8365                 mem_tbl = mem_tbl_570x;
8366
8367         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8368                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8369                     mem_tbl[i].len)) != 0)
8370                         break;
8371         }
8372         
8373         return err;
8374 }
8375
8376 #define TG3_MAC_LOOPBACK        0
8377 #define TG3_PHY_LOOPBACK        1
8378
8379 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8380 {
8381         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8382         u32 desc_idx;
8383         struct sk_buff *skb, *rx_skb;
8384         u8 *tx_data;
8385         dma_addr_t map;
8386         int num_pkts, tx_len, rx_len, i, err;
8387         struct tg3_rx_buffer_desc *desc;
8388
8389         if (loopback_mode == TG3_MAC_LOOPBACK) {
8390                 /* HW errata - mac loopback fails in some cases on 5780.
8391                  * Normal traffic and PHY loopback are not affected by
8392                  * errata.
8393                  */
8394                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8395                         return 0;
8396
8397                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8398                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8399                            MAC_MODE_PORT_MODE_GMII;
8400                 tw32(MAC_MODE, mac_mode);
8401         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8402                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8403                                            BMCR_SPEED1000);
8404                 udelay(40);
8405                 /* reset to prevent losing 1st rx packet intermittently */
8406                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8407                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8408                         udelay(10);
8409                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8410                 }
8411                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8412                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8413                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8414                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8415                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8416                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8417                 }
8418                 tw32(MAC_MODE, mac_mode);
8419         }
8420         else
8421                 return -EINVAL;
8422
8423         err = -EIO;
8424
8425         tx_len = 1514;
8426         skb = dev_alloc_skb(tx_len);
8427         tx_data = skb_put(skb, tx_len);
8428         memcpy(tx_data, tp->dev->dev_addr, 6);
8429         memset(tx_data + 6, 0x0, 8);
8430
8431         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8432
8433         for (i = 14; i < tx_len; i++)
8434                 tx_data[i] = (u8) (i & 0xff);
8435
8436         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8437
8438         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8439              HOSTCC_MODE_NOW);
8440
8441         udelay(10);
8442
8443         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8444
8445         num_pkts = 0;
8446
8447         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8448
8449         tp->tx_prod++;
8450         num_pkts++;
8451
8452         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8453                      tp->tx_prod);
8454         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8455
8456         udelay(10);
8457
8458         for (i = 0; i < 10; i++) {
8459                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8460                        HOSTCC_MODE_NOW);
8461
8462                 udelay(10);
8463
8464                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8465                 rx_idx = tp->hw_status->idx[0].rx_producer;
8466                 if ((tx_idx == tp->tx_prod) &&
8467                     (rx_idx == (rx_start_idx + num_pkts)))
8468                         break;
8469         }
8470
8471         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8472         dev_kfree_skb(skb);
8473
8474         if (tx_idx != tp->tx_prod)
8475                 goto out;
8476
8477         if (rx_idx != rx_start_idx + num_pkts)
8478                 goto out;
8479
8480         desc = &tp->rx_rcb[rx_start_idx];
8481         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8482         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8483         if (opaque_key != RXD_OPAQUE_RING_STD)
8484                 goto out;
8485
8486         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8487             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8488                 goto out;
8489
8490         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8491         if (rx_len != tx_len)
8492                 goto out;
8493
8494         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8495
8496         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8497         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8498
8499         for (i = 14; i < tx_len; i++) {
8500                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8501                         goto out;
8502         }
8503         err = 0;
8504         
8505         /* tg3_free_rings will unmap and free the rx_skb */
8506 out:
8507         return err;
8508 }
8509
8510 #define TG3_MAC_LOOPBACK_FAILED         1
8511 #define TG3_PHY_LOOPBACK_FAILED         2
8512 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8513                                          TG3_PHY_LOOPBACK_FAILED)
8514
8515 static int tg3_test_loopback(struct tg3 *tp)
8516 {
8517         int err = 0;
8518
8519         if (!netif_running(tp->dev))
8520                 return TG3_LOOPBACK_FAILED;
8521
8522         tg3_reset_hw(tp);
8523
8524         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8525                 err |= TG3_MAC_LOOPBACK_FAILED;
8526         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8527                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8528                         err |= TG3_PHY_LOOPBACK_FAILED;
8529         }
8530
8531         return err;
8532 }
8533
8534 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8535                           u64 *data)
8536 {
8537         struct tg3 *tp = netdev_priv(dev);
8538
8539         if (tp->link_config.phy_is_low_power)
8540                 tg3_set_power_state(tp, PCI_D0);
8541
8542         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8543
8544         if (tg3_test_nvram(tp) != 0) {
8545                 etest->flags |= ETH_TEST_FL_FAILED;
8546                 data[0] = 1;
8547         }
8548         if (tg3_test_link(tp) != 0) {
8549                 etest->flags |= ETH_TEST_FL_FAILED;
8550                 data[1] = 1;
8551         }
8552         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8553                 int err, irq_sync = 0;
8554
8555                 if (netif_running(dev)) {
8556                         tg3_netif_stop(tp);
8557                         irq_sync = 1;
8558                 }
8559
8560                 tg3_full_lock(tp, irq_sync);
8561
8562                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8563                 err = tg3_nvram_lock(tp);
8564                 tg3_halt_cpu(tp, RX_CPU_BASE);
8565                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8566                         tg3_halt_cpu(tp, TX_CPU_BASE);
8567                 if (!err)
8568                         tg3_nvram_unlock(tp);
8569
8570                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8571                         tg3_phy_reset(tp);
8572
8573                 if (tg3_test_registers(tp) != 0) {
8574                         etest->flags |= ETH_TEST_FL_FAILED;
8575                         data[2] = 1;
8576                 }
8577                 if (tg3_test_memory(tp) != 0) {
8578                         etest->flags |= ETH_TEST_FL_FAILED;
8579                         data[3] = 1;
8580                 }
8581                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8582                         etest->flags |= ETH_TEST_FL_FAILED;
8583
8584                 tg3_full_unlock(tp);
8585
8586                 if (tg3_test_interrupt(tp) != 0) {
8587                         etest->flags |= ETH_TEST_FL_FAILED;
8588                         data[5] = 1;
8589                 }
8590
8591                 tg3_full_lock(tp, 0);
8592
8593                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8594                 if (netif_running(dev)) {
8595                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8596                         tg3_init_hw(tp);
8597                         tg3_netif_start(tp);
8598                 }
8599
8600                 tg3_full_unlock(tp);
8601         }
8602         if (tp->link_config.phy_is_low_power)
8603                 tg3_set_power_state(tp, PCI_D3hot);
8604
8605 }
8606
8607 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8608 {
8609         struct mii_ioctl_data *data = if_mii(ifr);
8610         struct tg3 *tp = netdev_priv(dev);
8611         int err;
8612
8613         switch(cmd) {
8614         case SIOCGMIIPHY:
8615                 data->phy_id = PHY_ADDR;
8616
8617                 /* fallthru */
8618         case SIOCGMIIREG: {
8619                 u32 mii_regval;
8620
8621                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8622                         break;                  /* We have no PHY */
8623
8624                 if (tp->link_config.phy_is_low_power)
8625                         return -EAGAIN;
8626
8627                 spin_lock_bh(&tp->lock);
8628                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8629                 spin_unlock_bh(&tp->lock);
8630
8631                 data->val_out = mii_regval;
8632
8633                 return err;
8634         }
8635
8636         case SIOCSMIIREG:
8637                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8638                         break;                  /* We have no PHY */
8639
8640                 if (!capable(CAP_NET_ADMIN))
8641                         return -EPERM;
8642
8643                 if (tp->link_config.phy_is_low_power)
8644                         return -EAGAIN;
8645
8646                 spin_lock_bh(&tp->lock);
8647                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8648                 spin_unlock_bh(&tp->lock);
8649
8650                 return err;
8651
8652         default:
8653                 /* do nothing */
8654                 break;
8655         }
8656         return -EOPNOTSUPP;
8657 }
8658
8659 #if TG3_VLAN_TAG_USED
8660 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8661 {
8662         struct tg3 *tp = netdev_priv(dev);
8663
8664         tg3_full_lock(tp, 0);
8665
8666         tp->vlgrp = grp;
8667
8668         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8669         __tg3_set_rx_mode(dev);
8670
8671         tg3_full_unlock(tp);
8672 }
8673
8674 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8675 {
8676         struct tg3 *tp = netdev_priv(dev);
8677
8678         tg3_full_lock(tp, 0);
8679         if (tp->vlgrp)
8680                 tp->vlgrp->vlan_devices[vid] = NULL;
8681         tg3_full_unlock(tp);
8682 }
8683 #endif
8684
8685 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8686 {
8687         struct tg3 *tp = netdev_priv(dev);
8688
8689         memcpy(ec, &tp->coal, sizeof(*ec));
8690         return 0;
8691 }
8692
8693 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8694 {
8695         struct tg3 *tp = netdev_priv(dev);
8696         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8697         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8698
8699         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8700                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8701                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8702                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8703                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8704         }
8705
8706         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8707             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8708             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8709             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8710             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8711             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8712             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8713             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8714             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8715             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8716                 return -EINVAL;
8717
8718         /* No rx interrupts will be generated if both are zero */
8719         if ((ec->rx_coalesce_usecs == 0) &&
8720             (ec->rx_max_coalesced_frames == 0))
8721                 return -EINVAL;
8722
8723         /* No tx interrupts will be generated if both are zero */
8724         if ((ec->tx_coalesce_usecs == 0) &&
8725             (ec->tx_max_coalesced_frames == 0))
8726                 return -EINVAL;
8727
8728         /* Only copy relevant parameters, ignore all others. */
8729         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8730         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8731         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8732         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8733         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8734         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8735         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8736         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8737         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8738
8739         if (netif_running(dev)) {
8740                 tg3_full_lock(tp, 0);
8741                 __tg3_set_coalesce(tp, &tp->coal);
8742                 tg3_full_unlock(tp);
8743         }
8744         return 0;
8745 }
8746
8747 static struct ethtool_ops tg3_ethtool_ops = {
8748         .get_settings           = tg3_get_settings,
8749         .set_settings           = tg3_set_settings,
8750         .get_drvinfo            = tg3_get_drvinfo,
8751         .get_regs_len           = tg3_get_regs_len,
8752         .get_regs               = tg3_get_regs,
8753         .get_wol                = tg3_get_wol,
8754         .set_wol                = tg3_set_wol,
8755         .get_msglevel           = tg3_get_msglevel,
8756         .set_msglevel           = tg3_set_msglevel,
8757         .nway_reset             = tg3_nway_reset,
8758         .get_link               = ethtool_op_get_link,
8759         .get_eeprom_len         = tg3_get_eeprom_len,
8760         .get_eeprom             = tg3_get_eeprom,
8761         .set_eeprom             = tg3_set_eeprom,
8762         .get_ringparam          = tg3_get_ringparam,
8763         .set_ringparam          = tg3_set_ringparam,
8764         .get_pauseparam         = tg3_get_pauseparam,
8765         .set_pauseparam         = tg3_set_pauseparam,
8766         .get_rx_csum            = tg3_get_rx_csum,
8767         .set_rx_csum            = tg3_set_rx_csum,
8768         .get_tx_csum            = ethtool_op_get_tx_csum,
8769         .set_tx_csum            = tg3_set_tx_csum,
8770         .get_sg                 = ethtool_op_get_sg,
8771         .set_sg                 = ethtool_op_set_sg,
8772 #if TG3_TSO_SUPPORT != 0
8773         .get_tso                = ethtool_op_get_tso,
8774         .set_tso                = tg3_set_tso,
8775 #endif
8776         .self_test_count        = tg3_get_test_count,
8777         .self_test              = tg3_self_test,
8778         .get_strings            = tg3_get_strings,
8779         .phys_id                = tg3_phys_id,
8780         .get_stats_count        = tg3_get_stats_count,
8781         .get_ethtool_stats      = tg3_get_ethtool_stats,
8782         .get_coalesce           = tg3_get_coalesce,
8783         .set_coalesce           = tg3_set_coalesce,
8784         .get_perm_addr          = ethtool_op_get_perm_addr,
8785 };
8786
8787 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8788 {
8789         u32 cursize, val, magic;
8790
8791         tp->nvram_size = EEPROM_CHIP_SIZE;
8792
8793         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8794                 return;
8795
8796         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8797                 return;
8798
8799         /*
8800          * Size the chip by reading offsets at increasing powers of two.
8801          * When we encounter our validation signature, we know the addressing
8802          * has wrapped around, and thus have our chip size.
8803          */
8804         cursize = 0x10;
8805
8806         while (cursize < tp->nvram_size) {
8807                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8808                         return;
8809
8810                 if (val == magic)
8811                         break;
8812
8813                 cursize <<= 1;
8814         }
8815
8816         tp->nvram_size = cursize;
8817 }
8818                 
8819 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8820 {
8821         u32 val;
8822
8823         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8824                 return;
8825
8826         /* Selfboot format */
8827         if (val != TG3_EEPROM_MAGIC) {
8828                 tg3_get_eeprom_size(tp);
8829                 return;
8830         }
8831
8832         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8833                 if (val != 0) {
8834                         tp->nvram_size = (val >> 16) * 1024;
8835                         return;
8836                 }
8837         }
8838         tp->nvram_size = 0x20000;
8839 }
8840
8841 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8842 {
8843         u32 nvcfg1;
8844
8845         nvcfg1 = tr32(NVRAM_CFG1);
8846         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8847                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8848         }
8849         else {
8850                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8851                 tw32(NVRAM_CFG1, nvcfg1);
8852         }
8853
8854         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8855             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8856                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8857                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8858                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8859                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8860                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8861                                 break;
8862                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8863                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8864                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8865                                 break;
8866                         case FLASH_VENDOR_ATMEL_EEPROM:
8867                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8868                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8869                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8870                                 break;
8871                         case FLASH_VENDOR_ST:
8872                                 tp->nvram_jedecnum = JEDEC_ST;
8873                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8874                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8875                                 break;
8876                         case FLASH_VENDOR_SAIFUN:
8877                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8878                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8879                                 break;
8880                         case FLASH_VENDOR_SST_SMALL:
8881                         case FLASH_VENDOR_SST_LARGE:
8882                                 tp->nvram_jedecnum = JEDEC_SST;
8883                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8884                                 break;
8885                 }
8886         }
8887         else {
8888                 tp->nvram_jedecnum = JEDEC_ATMEL;
8889                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8890                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8891         }
8892 }
8893
8894 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8895 {
8896         u32 nvcfg1;
8897
8898         nvcfg1 = tr32(NVRAM_CFG1);
8899
8900         /* NVRAM protection for TPM */
8901         if (nvcfg1 & (1 << 27))
8902                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8903
8904         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8905                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8906                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8907                         tp->nvram_jedecnum = JEDEC_ATMEL;
8908                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8909                         break;
8910                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8911                         tp->nvram_jedecnum = JEDEC_ATMEL;
8912                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8913                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8914                         break;
8915                 case FLASH_5752VENDOR_ST_M45PE10:
8916                 case FLASH_5752VENDOR_ST_M45PE20:
8917                 case FLASH_5752VENDOR_ST_M45PE40:
8918                         tp->nvram_jedecnum = JEDEC_ST;
8919                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8920                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8921                         break;
8922         }
8923
8924         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8925                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8926                         case FLASH_5752PAGE_SIZE_256:
8927                                 tp->nvram_pagesize = 256;
8928                                 break;
8929                         case FLASH_5752PAGE_SIZE_512:
8930                                 tp->nvram_pagesize = 512;
8931                                 break;
8932                         case FLASH_5752PAGE_SIZE_1K:
8933                                 tp->nvram_pagesize = 1024;
8934                                 break;
8935                         case FLASH_5752PAGE_SIZE_2K:
8936                                 tp->nvram_pagesize = 2048;
8937                                 break;
8938                         case FLASH_5752PAGE_SIZE_4K:
8939                                 tp->nvram_pagesize = 4096;
8940                                 break;
8941                         case FLASH_5752PAGE_SIZE_264:
8942                                 tp->nvram_pagesize = 264;
8943                                 break;
8944                 }
8945         }
8946         else {
8947                 /* For eeprom, set pagesize to maximum eeprom size */
8948                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8949
8950                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8951                 tw32(NVRAM_CFG1, nvcfg1);
8952         }
8953 }
8954
8955 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8956 {
8957         u32 nvcfg1;
8958
8959         nvcfg1 = tr32(NVRAM_CFG1);
8960
8961         /* NVRAM protection for TPM */
8962         if (nvcfg1 & (1 << 27))
8963                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8964
8965         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8966                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8967                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8968                         tp->nvram_jedecnum = JEDEC_ATMEL;
8969                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8970                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8971
8972                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8973                         tw32(NVRAM_CFG1, nvcfg1);
8974                         break;
8975                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8976                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8977                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8978                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8979                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8980                         tp->nvram_jedecnum = JEDEC_ATMEL;
8981                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8982                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8983                         tp->nvram_pagesize = 264;
8984                         break;
8985                 case FLASH_5752VENDOR_ST_M45PE10:
8986                 case FLASH_5752VENDOR_ST_M45PE20:
8987                 case FLASH_5752VENDOR_ST_M45PE40:
8988                         tp->nvram_jedecnum = JEDEC_ST;
8989                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8990                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8991                         tp->nvram_pagesize = 256;
8992                         break;
8993         }
8994 }
8995
8996 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8997 {
8998         u32 nvcfg1;
8999
9000         nvcfg1 = tr32(NVRAM_CFG1);
9001
9002         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9003                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9004                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9005                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9006                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9007                         tp->nvram_jedecnum = JEDEC_ATMEL;
9008                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9009                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9010
9011                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9012                         tw32(NVRAM_CFG1, nvcfg1);
9013                         break;
9014                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9015                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9016                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9017                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9018                         tp->nvram_jedecnum = JEDEC_ATMEL;
9019                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9020                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9021                         tp->nvram_pagesize = 264;
9022                         break;
9023                 case FLASH_5752VENDOR_ST_M45PE10:
9024                 case FLASH_5752VENDOR_ST_M45PE20:
9025                 case FLASH_5752VENDOR_ST_M45PE40:
9026                         tp->nvram_jedecnum = JEDEC_ST;
9027                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9028                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9029                         tp->nvram_pagesize = 256;
9030                         break;
9031         }
9032 }
9033
9034 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9035 static void __devinit tg3_nvram_init(struct tg3 *tp)
9036 {
9037         int j;
9038
9039         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9040                 return;
9041
9042         tw32_f(GRC_EEPROM_ADDR,
9043              (EEPROM_ADDR_FSM_RESET |
9044               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9045                EEPROM_ADDR_CLKPERD_SHIFT)));
9046
9047         /* XXX schedule_timeout() ... */
9048         for (j = 0; j < 100; j++)
9049                 udelay(10);
9050
9051         /* Enable seeprom accesses. */
9052         tw32_f(GRC_LOCAL_CTRL,
9053              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9054         udelay(100);
9055
9056         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9057             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9058                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9059
9060                 if (tg3_nvram_lock(tp)) {
9061                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9062                                "tg3_nvram_init failed.\n", tp->dev->name);
9063                         return;
9064                 }
9065                 tg3_enable_nvram_access(tp);
9066
9067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9068                         tg3_get_5752_nvram_info(tp);
9069                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9070                         tg3_get_5755_nvram_info(tp);
9071                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9072                         tg3_get_5787_nvram_info(tp);
9073                 else
9074                         tg3_get_nvram_info(tp);
9075
9076                 tg3_get_nvram_size(tp);
9077
9078                 tg3_disable_nvram_access(tp);
9079                 tg3_nvram_unlock(tp);
9080
9081         } else {
9082                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9083
9084                 tg3_get_eeprom_size(tp);
9085         }
9086 }
9087
9088 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9089                                         u32 offset, u32 *val)
9090 {
9091         u32 tmp;
9092         int i;
9093
9094         if (offset > EEPROM_ADDR_ADDR_MASK ||
9095             (offset % 4) != 0)
9096                 return -EINVAL;
9097
9098         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9099                                         EEPROM_ADDR_DEVID_MASK |
9100                                         EEPROM_ADDR_READ);
9101         tw32(GRC_EEPROM_ADDR,
9102              tmp |
9103              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9104              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9105               EEPROM_ADDR_ADDR_MASK) |
9106              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9107
9108         for (i = 0; i < 10000; i++) {
9109                 tmp = tr32(GRC_EEPROM_ADDR);
9110
9111                 if (tmp & EEPROM_ADDR_COMPLETE)
9112                         break;
9113                 udelay(100);
9114         }
9115         if (!(tmp & EEPROM_ADDR_COMPLETE))
9116                 return -EBUSY;
9117
9118         *val = tr32(GRC_EEPROM_DATA);
9119         return 0;
9120 }
9121
9122 #define NVRAM_CMD_TIMEOUT 10000
9123
9124 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9125 {
9126         int i;
9127
9128         tw32(NVRAM_CMD, nvram_cmd);
9129         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9130                 udelay(10);
9131                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9132                         udelay(10);
9133                         break;
9134                 }
9135         }
9136         if (i == NVRAM_CMD_TIMEOUT) {
9137                 return -EBUSY;
9138         }
9139         return 0;
9140 }
9141
9142 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9143 {
9144         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9145             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9146             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9147             (tp->nvram_jedecnum == JEDEC_ATMEL))
9148
9149                 addr = ((addr / tp->nvram_pagesize) <<
9150                         ATMEL_AT45DB0X1B_PAGE_POS) +
9151                        (addr % tp->nvram_pagesize);
9152
9153         return addr;
9154 }
9155
9156 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9157 {
9158         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9159             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9160             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9161             (tp->nvram_jedecnum == JEDEC_ATMEL))
9162
9163                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9164                         tp->nvram_pagesize) +
9165                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9166
9167         return addr;
9168 }
9169
9170 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9171 {
9172         int ret;
9173
9174         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9175                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9176                 return -EINVAL;
9177         }
9178
9179         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9180                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9181
9182         offset = tg3_nvram_phys_addr(tp, offset);
9183
9184         if (offset > NVRAM_ADDR_MSK)
9185                 return -EINVAL;
9186
9187         ret = tg3_nvram_lock(tp);
9188         if (ret)
9189                 return ret;
9190
9191         tg3_enable_nvram_access(tp);
9192
9193         tw32(NVRAM_ADDR, offset);
9194         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9195                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9196
9197         if (ret == 0)
9198                 *val = swab32(tr32(NVRAM_RDDATA));
9199
9200         tg3_disable_nvram_access(tp);
9201
9202         tg3_nvram_unlock(tp);
9203
9204         return ret;
9205 }
9206
9207 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9208 {
9209         int err;
9210         u32 tmp;
9211
9212         err = tg3_nvram_read(tp, offset, &tmp);
9213         *val = swab32(tmp);
9214         return err;
9215 }
9216
9217 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9218                                     u32 offset, u32 len, u8 *buf)
9219 {
9220         int i, j, rc = 0;
9221         u32 val;
9222
9223         for (i = 0; i < len; i += 4) {
9224                 u32 addr, data;
9225
9226                 addr = offset + i;
9227
9228                 memcpy(&data, buf + i, 4);
9229
9230                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9231
9232                 val = tr32(GRC_EEPROM_ADDR);
9233                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9234
9235                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9236                         EEPROM_ADDR_READ);
9237                 tw32(GRC_EEPROM_ADDR, val |
9238                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9239                         (addr & EEPROM_ADDR_ADDR_MASK) |
9240                         EEPROM_ADDR_START |
9241                         EEPROM_ADDR_WRITE);
9242                 
9243                 for (j = 0; j < 10000; j++) {
9244                         val = tr32(GRC_EEPROM_ADDR);
9245
9246                         if (val & EEPROM_ADDR_COMPLETE)
9247                                 break;
9248                         udelay(100);
9249                 }
9250                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9251                         rc = -EBUSY;
9252                         break;
9253                 }
9254         }
9255
9256         return rc;
9257 }
9258
9259 /* offset and length are dword aligned */
9260 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9261                 u8 *buf)
9262 {
9263         int ret = 0;
9264         u32 pagesize = tp->nvram_pagesize;
9265         u32 pagemask = pagesize - 1;
9266         u32 nvram_cmd;
9267         u8 *tmp;
9268
9269         tmp = kmalloc(pagesize, GFP_KERNEL);
9270         if (tmp == NULL)
9271                 return -ENOMEM;
9272
9273         while (len) {
9274                 int j;
9275                 u32 phy_addr, page_off, size;
9276
9277                 phy_addr = offset & ~pagemask;
9278         
9279                 for (j = 0; j < pagesize; j += 4) {
9280                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9281                                                 (u32 *) (tmp + j))))
9282                                 break;
9283                 }
9284                 if (ret)
9285                         break;
9286
9287                 page_off = offset & pagemask;
9288                 size = pagesize;
9289                 if (len < size)
9290                         size = len;
9291
9292                 len -= size;
9293
9294                 memcpy(tmp + page_off, buf, size);
9295
9296                 offset = offset + (pagesize - page_off);
9297
9298                 tg3_enable_nvram_access(tp);
9299
9300                 /*
9301                  * Before we can erase the flash page, we need
9302                  * to issue a special "write enable" command.
9303                  */
9304                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9305
9306                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9307                         break;
9308
9309                 /* Erase the target page */
9310                 tw32(NVRAM_ADDR, phy_addr);
9311
9312                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9313                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9314
9315                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9316                         break;
9317
9318                 /* Issue another write enable to start the write. */
9319                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9320
9321                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9322                         break;
9323
9324                 for (j = 0; j < pagesize; j += 4) {
9325                         u32 data;
9326
9327                         data = *((u32 *) (tmp + j));
9328                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9329
9330                         tw32(NVRAM_ADDR, phy_addr + j);
9331
9332                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9333                                 NVRAM_CMD_WR;
9334
9335                         if (j == 0)
9336                                 nvram_cmd |= NVRAM_CMD_FIRST;
9337                         else if (j == (pagesize - 4))
9338                                 nvram_cmd |= NVRAM_CMD_LAST;
9339
9340                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9341                                 break;
9342                 }
9343                 if (ret)
9344                         break;
9345         }
9346
9347         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9348         tg3_nvram_exec_cmd(tp, nvram_cmd);
9349
9350         kfree(tmp);
9351
9352         return ret;
9353 }
9354
9355 /* offset and length are dword aligned */
9356 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9357                 u8 *buf)
9358 {
9359         int i, ret = 0;
9360
9361         for (i = 0; i < len; i += 4, offset += 4) {
9362                 u32 data, page_off, phy_addr, nvram_cmd;
9363
9364                 memcpy(&data, buf + i, 4);
9365                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9366
9367                 page_off = offset % tp->nvram_pagesize;
9368
9369                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9370
9371                 tw32(NVRAM_ADDR, phy_addr);
9372
9373                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9374
9375                 if ((page_off == 0) || (i == 0))
9376                         nvram_cmd |= NVRAM_CMD_FIRST;
9377                 else if (page_off == (tp->nvram_pagesize - 4))
9378                         nvram_cmd |= NVRAM_CMD_LAST;
9379
9380                 if (i == (len - 4))
9381                         nvram_cmd |= NVRAM_CMD_LAST;
9382
9383                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9384                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9385                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9386                     (tp->nvram_jedecnum == JEDEC_ST) &&
9387                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9388
9389                         if ((ret = tg3_nvram_exec_cmd(tp,
9390                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9391                                 NVRAM_CMD_DONE)))
9392
9393                                 break;
9394                 }
9395                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9396                         /* We always do complete word writes to eeprom. */
9397                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9398                 }
9399
9400                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9401                         break;
9402         }
9403         return ret;
9404 }
9405
9406 /* offset and length are dword aligned */
9407 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9408 {
9409         int ret;
9410
9411         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9412                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9413                 return -EINVAL;
9414         }
9415
9416         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9417                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9418                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9419                 udelay(40);
9420         }
9421
9422         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9423                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9424         }
9425         else {
9426                 u32 grc_mode;
9427
9428                 ret = tg3_nvram_lock(tp);
9429                 if (ret)
9430                         return ret;
9431
9432                 tg3_enable_nvram_access(tp);
9433                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9434                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9435                         tw32(NVRAM_WRITE1, 0x406);
9436
9437                 grc_mode = tr32(GRC_MODE);
9438                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9439
9440                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9441                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9442
9443                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9444                                 buf);
9445                 }
9446                 else {
9447                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9448                                 buf);
9449                 }
9450
9451                 grc_mode = tr32(GRC_MODE);
9452                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9453
9454                 tg3_disable_nvram_access(tp);
9455                 tg3_nvram_unlock(tp);
9456         }
9457
9458         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9459                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9460                 udelay(40);
9461         }
9462
9463         return ret;
9464 }
9465
9466 struct subsys_tbl_ent {
9467         u16 subsys_vendor, subsys_devid;
9468         u32 phy_id;
9469 };
9470
9471 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9472         /* Broadcom boards. */
9473         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9474         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9475         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9476         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9477         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9478         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9479         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9480         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9481         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9482         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9483         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9484
9485         /* 3com boards. */
9486         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9487         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9488         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9489         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9490         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9491
9492         /* DELL boards. */
9493         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9494         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9495         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9496         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9497
9498         /* Compaq boards. */
9499         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9500         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9501         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9502         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9503         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9504
9505         /* IBM boards. */
9506         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9507 };
9508
9509 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9510 {
9511         int i;
9512
9513         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9514                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9515                      tp->pdev->subsystem_vendor) &&
9516                     (subsys_id_to_phy_id[i].subsys_devid ==
9517                      tp->pdev->subsystem_device))
9518                         return &subsys_id_to_phy_id[i];
9519         }
9520         return NULL;
9521 }
9522
9523 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9524 {
9525         u32 val;
9526         u16 pmcsr;
9527
9528         /* On some early chips the SRAM cannot be accessed in D3hot state,
9529          * so need make sure we're in D0.
9530          */
9531         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9532         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9533         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9534         msleep(1);
9535
9536         /* Make sure register accesses (indirect or otherwise)
9537          * will function correctly.
9538          */
9539         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9540                                tp->misc_host_ctrl);
9541
9542         tp->phy_id = PHY_ID_INVALID;
9543         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9544
9545         /* Do not even try poking around in here on Sun parts.  */
9546         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9547                 return;
9548
9549         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9550         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9551                 u32 nic_cfg, led_cfg;
9552                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9553                 int eeprom_phy_serdes = 0;
9554
9555                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9556                 tp->nic_sram_data_cfg = nic_cfg;
9557
9558                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9559                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9560                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9561                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9562                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9563                     (ver > 0) && (ver < 0x100))
9564                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9565
9566                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9567                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9568                         eeprom_phy_serdes = 1;
9569
9570                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9571                 if (nic_phy_id != 0) {
9572                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9573                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9574
9575                         eeprom_phy_id  = (id1 >> 16) << 10;
9576                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9577                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9578                 } else
9579                         eeprom_phy_id = 0;
9580
9581                 tp->phy_id = eeprom_phy_id;
9582                 if (eeprom_phy_serdes) {
9583                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9584                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9585                         else
9586                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9587                 }
9588
9589                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9590                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9591                                     SHASTA_EXT_LED_MODE_MASK);
9592                 else
9593                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9594
9595                 switch (led_cfg) {
9596                 default:
9597                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9598                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9599                         break;
9600
9601                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9602                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9603                         break;
9604
9605                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9606                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9607
9608                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9609                          * read on some older 5700/5701 bootcode.
9610                          */
9611                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9612                             ASIC_REV_5700 ||
9613                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9614                             ASIC_REV_5701)
9615                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9616
9617                         break;
9618
9619                 case SHASTA_EXT_LED_SHARED:
9620                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9621                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9622                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9623                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9624                                                  LED_CTRL_MODE_PHY_2);
9625                         break;
9626
9627                 case SHASTA_EXT_LED_MAC:
9628                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9629                         break;
9630
9631                 case SHASTA_EXT_LED_COMBO:
9632                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9633                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9634                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9635                                                  LED_CTRL_MODE_PHY_2);
9636                         break;
9637
9638                 };
9639
9640                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9641                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9642                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9643                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9644
9645                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9646                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9647                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9648                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9649
9650                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9651                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9652                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9653                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9654                 }
9655                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9656                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9657
9658                 if (cfg2 & (1 << 17))
9659                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9660
9661                 /* serdes signal pre-emphasis in register 0x590 set by */
9662                 /* bootcode if bit 18 is set */
9663                 if (cfg2 & (1 << 18))
9664                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9665         }
9666 }
9667
9668 static int __devinit tg3_phy_probe(struct tg3 *tp)
9669 {
9670         u32 hw_phy_id_1, hw_phy_id_2;
9671         u32 hw_phy_id, hw_phy_id_masked;
9672         int err;
9673
9674         /* Reading the PHY ID register can conflict with ASF
9675          * firwmare access to the PHY hardware.
9676          */
9677         err = 0;
9678         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9679                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9680         } else {
9681                 /* Now read the physical PHY_ID from the chip and verify
9682                  * that it is sane.  If it doesn't look good, we fall back
9683                  * to either the hard-coded table based PHY_ID and failing
9684                  * that the value found in the eeprom area.
9685                  */
9686                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9687                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9688
9689                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9690                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9691                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9692
9693                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9694         }
9695
9696         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9697                 tp->phy_id = hw_phy_id;
9698                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9699                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9700                 else
9701                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9702         } else {
9703                 if (tp->phy_id != PHY_ID_INVALID) {
9704                         /* Do nothing, phy ID already set up in
9705                          * tg3_get_eeprom_hw_cfg().
9706                          */
9707                 } else {
9708                         struct subsys_tbl_ent *p;
9709
9710                         /* No eeprom signature?  Try the hardcoded
9711                          * subsys device table.
9712                          */
9713                         p = lookup_by_subsys(tp);
9714                         if (!p)
9715                                 return -ENODEV;
9716
9717                         tp->phy_id = p->phy_id;
9718                         if (!tp->phy_id ||
9719                             tp->phy_id == PHY_ID_BCM8002)
9720                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9721                 }
9722         }
9723
9724         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9725             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9726                 u32 bmsr, adv_reg, tg3_ctrl;
9727
9728                 tg3_readphy(tp, MII_BMSR, &bmsr);
9729                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9730                     (bmsr & BMSR_LSTATUS))
9731                         goto skip_phy_reset;
9732                     
9733                 err = tg3_phy_reset(tp);
9734                 if (err)
9735                         return err;
9736
9737                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9738                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9739                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9740                 tg3_ctrl = 0;
9741                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9742                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9743                                     MII_TG3_CTRL_ADV_1000_FULL);
9744                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9745                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9746                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9747                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9748                 }
9749
9750                 if (!tg3_copper_is_advertising_all(tp)) {
9751                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9752
9753                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9754                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9755
9756                         tg3_writephy(tp, MII_BMCR,
9757                                      BMCR_ANENABLE | BMCR_ANRESTART);
9758                 }
9759                 tg3_phy_set_wirespeed(tp);
9760
9761                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9762                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9763                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9764         }
9765
9766 skip_phy_reset:
9767         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9768                 err = tg3_init_5401phy_dsp(tp);
9769                 if (err)
9770                         return err;
9771         }
9772
9773         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9774                 err = tg3_init_5401phy_dsp(tp);
9775         }
9776
9777         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9778                 tp->link_config.advertising =
9779                         (ADVERTISED_1000baseT_Half |
9780                          ADVERTISED_1000baseT_Full |
9781                          ADVERTISED_Autoneg |
9782                          ADVERTISED_FIBRE);
9783         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9784                 tp->link_config.advertising &=
9785                         ~(ADVERTISED_1000baseT_Half |
9786                           ADVERTISED_1000baseT_Full);
9787
9788         return err;
9789 }
9790
9791 static void __devinit tg3_read_partno(struct tg3 *tp)
9792 {
9793         unsigned char vpd_data[256];
9794         int i;
9795         u32 magic;
9796
9797         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9798                 /* Sun decided not to put the necessary bits in the
9799                  * NVRAM of their onboard tg3 parts :(
9800                  */
9801                 strcpy(tp->board_part_number, "Sun 570X");
9802                 return;
9803         }
9804
9805         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9806                 return;
9807
9808         if (magic == TG3_EEPROM_MAGIC) {
9809                 for (i = 0; i < 256; i += 4) {
9810                         u32 tmp;
9811
9812                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9813                                 goto out_not_found;
9814
9815                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9816                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9817                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9818                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9819                 }
9820         } else {
9821                 int vpd_cap;
9822
9823                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9824                 for (i = 0; i < 256; i += 4) {
9825                         u32 tmp, j = 0;
9826                         u16 tmp16;
9827
9828                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9829                                               i);
9830                         while (j++ < 100) {
9831                                 pci_read_config_word(tp->pdev, vpd_cap +
9832                                                      PCI_VPD_ADDR, &tmp16);
9833                                 if (tmp16 & 0x8000)
9834                                         break;
9835                                 msleep(1);
9836                         }
9837                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9838                                               &tmp);
9839                         tmp = cpu_to_le32(tmp);
9840                         memcpy(&vpd_data[i], &tmp, 4);
9841                 }
9842         }
9843
9844         /* Now parse and find the part number. */
9845         for (i = 0; i < 256; ) {
9846                 unsigned char val = vpd_data[i];
9847                 int block_end;
9848
9849                 if (val == 0x82 || val == 0x91) {
9850                         i = (i + 3 +
9851                              (vpd_data[i + 1] +
9852                               (vpd_data[i + 2] << 8)));
9853                         continue;
9854                 }
9855
9856                 if (val != 0x90)
9857                         goto out_not_found;
9858
9859                 block_end = (i + 3 +
9860                              (vpd_data[i + 1] +
9861                               (vpd_data[i + 2] << 8)));
9862                 i += 3;
9863                 while (i < block_end) {
9864                         if (vpd_data[i + 0] == 'P' &&
9865                             vpd_data[i + 1] == 'N') {
9866                                 int partno_len = vpd_data[i + 2];
9867
9868                                 if (partno_len > 24)
9869                                         goto out_not_found;
9870
9871                                 memcpy(tp->board_part_number,
9872                                        &vpd_data[i + 3],
9873                                        partno_len);
9874
9875                                 /* Success. */
9876                                 return;
9877                         }
9878                 }
9879
9880                 /* Part number not found. */
9881                 goto out_not_found;
9882         }
9883
9884 out_not_found:
9885         strcpy(tp->board_part_number, "none");
9886 }
9887
9888 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9889 {
9890         u32 val, offset, start;
9891
9892         if (tg3_nvram_read_swab(tp, 0, &val))
9893                 return;
9894
9895         if (val != TG3_EEPROM_MAGIC)
9896                 return;
9897
9898         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9899             tg3_nvram_read_swab(tp, 0x4, &start))
9900                 return;
9901
9902         offset = tg3_nvram_logical_addr(tp, offset);
9903         if (tg3_nvram_read_swab(tp, offset, &val))
9904                 return;
9905
9906         if ((val & 0xfc000000) == 0x0c000000) {
9907                 u32 ver_offset, addr;
9908                 int i;
9909
9910                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9911                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9912                         return;
9913
9914                 if (val != 0)
9915                         return;
9916
9917                 addr = offset + ver_offset - start;
9918                 for (i = 0; i < 16; i += 4) {
9919                         if (tg3_nvram_read(tp, addr + i, &val))
9920                                 return;
9921
9922                         val = cpu_to_le32(val);
9923                         memcpy(tp->fw_ver + i, &val, 4);
9924                 }
9925         }
9926 }
9927
9928 #ifdef CONFIG_SPARC64
9929 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9930 {
9931         struct pci_dev *pdev = tp->pdev;
9932         struct pcidev_cookie *pcp = pdev->sysdata;
9933
9934         if (pcp != NULL) {
9935                 int node = pcp->prom_node;
9936                 u32 venid;
9937                 int err;
9938
9939                 err = prom_getproperty(node, "subsystem-vendor-id",
9940                                        (char *) &venid, sizeof(venid));
9941                 if (err == 0 || err == -1)
9942                         return 0;
9943                 if (venid == PCI_VENDOR_ID_SUN)
9944                         return 1;
9945
9946                 /* TG3 chips onboard the SunBlade-2500 don't have the
9947                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9948                  * are distinguishable from non-Sun variants by being
9949                  * named "network" by the firmware.  Non-Sun cards will
9950                  * show up as being named "ethernet".
9951                  */
9952                 if (!strcmp(pcp->prom_name, "network"))
9953                         return 1;
9954         }
9955         return 0;
9956 }
9957 #endif
9958
9959 static int __devinit tg3_get_invariants(struct tg3 *tp)
9960 {
9961         static struct pci_device_id write_reorder_chipsets[] = {
9962                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9963                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9964                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9965                              PCI_DEVICE_ID_VIA_8385_0) },
9966                 { },
9967         };
9968         u32 misc_ctrl_reg;
9969         u32 cacheline_sz_reg;
9970         u32 pci_state_reg, grc_misc_cfg;
9971         u32 val;
9972         u16 pci_cmd;
9973         int err;
9974
9975 #ifdef CONFIG_SPARC64
9976         if (tg3_is_sun_570X(tp))
9977                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9978 #endif
9979
9980         /* Force memory write invalidate off.  If we leave it on,
9981          * then on 5700_BX chips we have to enable a workaround.
9982          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9983          * to match the cacheline size.  The Broadcom driver have this
9984          * workaround but turns MWI off all the times so never uses
9985          * it.  This seems to suggest that the workaround is insufficient.
9986          */
9987         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9988         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9989         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9990
9991         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9992          * has the register indirect write enable bit set before
9993          * we try to access any of the MMIO registers.  It is also
9994          * critical that the PCI-X hw workaround situation is decided
9995          * before that as well.
9996          */
9997         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9998                               &misc_ctrl_reg);
9999
10000         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10001                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10002
10003         /* Wrong chip ID in 5752 A0. This code can be removed later
10004          * as A0 is not in production.
10005          */
10006         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10007                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10008
10009         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10010          * we need to disable memory and use config. cycles
10011          * only to access all registers. The 5702/03 chips
10012          * can mistakenly decode the special cycles from the
10013          * ICH chipsets as memory write cycles, causing corruption
10014          * of register and memory space. Only certain ICH bridges
10015          * will drive special cycles with non-zero data during the
10016          * address phase which can fall within the 5703's address
10017          * range. This is not an ICH bug as the PCI spec allows
10018          * non-zero address during special cycles. However, only
10019          * these ICH bridges are known to drive non-zero addresses
10020          * during special cycles.
10021          *
10022          * Since special cycles do not cross PCI bridges, we only
10023          * enable this workaround if the 5703 is on the secondary
10024          * bus of these ICH bridges.
10025          */
10026         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10027             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10028                 static struct tg3_dev_id {
10029                         u32     vendor;
10030                         u32     device;
10031                         u32     rev;
10032                 } ich_chipsets[] = {
10033                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10034                           PCI_ANY_ID },
10035                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10036                           PCI_ANY_ID },
10037                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10038                           0xa },
10039                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10040                           PCI_ANY_ID },
10041                         { },
10042                 };
10043                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10044                 struct pci_dev *bridge = NULL;
10045
10046                 while (pci_id->vendor != 0) {
10047                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10048                                                 bridge);
10049                         if (!bridge) {
10050                                 pci_id++;
10051                                 continue;
10052                         }
10053                         if (pci_id->rev != PCI_ANY_ID) {
10054                                 u8 rev;
10055
10056                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10057                                                      &rev);
10058                                 if (rev > pci_id->rev)
10059                                         continue;
10060                         }
10061                         if (bridge->subordinate &&
10062                             (bridge->subordinate->number ==
10063                              tp->pdev->bus->number)) {
10064
10065                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10066                                 pci_dev_put(bridge);
10067                                 break;
10068                         }
10069                 }
10070         }
10071
10072         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10073          * DMA addresses > 40-bit. This bridge may have other additional
10074          * 57xx devices behind it in some 4-port NIC designs for example.
10075          * Any tg3 device found behind the bridge will also need the 40-bit
10076          * DMA workaround.
10077          */
10078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10080                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10081                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10082                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10083         }
10084         else {
10085                 struct pci_dev *bridge = NULL;
10086
10087                 do {
10088                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10089                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10090                                                 bridge);
10091                         if (bridge && bridge->subordinate &&
10092                             (bridge->subordinate->number <=
10093                              tp->pdev->bus->number) &&
10094                             (bridge->subordinate->subordinate >=
10095                              tp->pdev->bus->number)) {
10096                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10097                                 pci_dev_put(bridge);
10098                                 break;
10099                         }
10100                 } while (bridge);
10101         }
10102
10103         /* Initialize misc host control in PCI block. */
10104         tp->misc_host_ctrl |= (misc_ctrl_reg &
10105                                MISC_HOST_CTRL_CHIPREV);
10106         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10107                                tp->misc_host_ctrl);
10108
10109         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10110                               &cacheline_sz_reg);
10111
10112         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10113         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10114         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10115         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10116
10117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10118             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10121             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10122                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10123
10124         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10125             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10126                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10127
10128         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10130                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10131                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10132                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10133                 } else
10134                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10135         }
10136
10137         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10138             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10139             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10140             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10141             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10142                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10143
10144         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10145                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10146
10147         /* If we have an AMD 762 or VIA K8T800 chipset, write
10148          * reordering to the mailbox registers done by the host
10149          * controller can cause major troubles.  We read back from
10150          * every mailbox register write to force the writes to be
10151          * posted to the chip in order.
10152          */
10153         if (pci_dev_present(write_reorder_chipsets) &&
10154             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10155                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10156
10157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10158             tp->pci_lat_timer < 64) {
10159                 tp->pci_lat_timer = 64;
10160
10161                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10162                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10163                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10164                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10165
10166                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10167                                        cacheline_sz_reg);
10168         }
10169
10170         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10171                               &pci_state_reg);
10172
10173         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10174                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10175
10176                 /* If this is a 5700 BX chipset, and we are in PCI-X
10177                  * mode, enable register write workaround.
10178                  *
10179                  * The workaround is to use indirect register accesses
10180                  * for all chip writes not to mailbox registers.
10181                  */
10182                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10183                         u32 pm_reg;
10184                         u16 pci_cmd;
10185
10186                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10187
10188                         /* The chip can have it's power management PCI config
10189                          * space registers clobbered due to this bug.
10190                          * So explicitly force the chip into D0 here.
10191                          */
10192                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10193                                               &pm_reg);
10194                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10195                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10196                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10197                                                pm_reg);
10198
10199                         /* Also, force SERR#/PERR# in PCI command. */
10200                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10201                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10202                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10203                 }
10204         }
10205
10206         /* 5700 BX chips need to have their TX producer index mailboxes
10207          * written twice to workaround a bug.
10208          */
10209         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10210                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10211
10212         /* Back to back register writes can cause problems on this chip,
10213          * the workaround is to read back all reg writes except those to
10214          * mailbox regs.  See tg3_write_indirect_reg32().
10215          *
10216          * PCI Express 5750_A0 rev chips need this workaround too.
10217          */
10218         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10219             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10220              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10221                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10222
10223         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10224                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10225         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10226                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10227
10228         /* Chip-specific fixup from Broadcom driver */
10229         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10230             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10231                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10232                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10233         }
10234
10235         /* Default fast path register access methods */
10236         tp->read32 = tg3_read32;
10237         tp->write32 = tg3_write32;
10238         tp->read32_mbox = tg3_read32;
10239         tp->write32_mbox = tg3_write32;
10240         tp->write32_tx_mbox = tg3_write32;
10241         tp->write32_rx_mbox = tg3_write32;
10242
10243         /* Various workaround register access methods */
10244         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10245                 tp->write32 = tg3_write_indirect_reg32;
10246         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10247                 tp->write32 = tg3_write_flush_reg32;
10248
10249         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10250             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10251                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10252                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10253                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10254         }
10255
10256         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10257                 tp->read32 = tg3_read_indirect_reg32;
10258                 tp->write32 = tg3_write_indirect_reg32;
10259                 tp->read32_mbox = tg3_read_indirect_mbox;
10260                 tp->write32_mbox = tg3_write_indirect_mbox;
10261                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10262                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10263
10264                 iounmap(tp->regs);
10265                 tp->regs = NULL;
10266
10267                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10268                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10269                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10270         }
10271
10272         /* Get eeprom hw config before calling tg3_set_power_state().
10273          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10274          * determined before calling tg3_set_power_state() so that
10275          * we know whether or not to switch out of Vaux power.
10276          * When the flag is set, it means that GPIO1 is used for eeprom
10277          * write protect and also implies that it is a LOM where GPIOs
10278          * are not used to switch power.
10279          */ 
10280         tg3_get_eeprom_hw_cfg(tp);
10281
10282         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10283          * GPIO1 driven high will bring 5700's external PHY out of reset.
10284          * It is also used as eeprom write protect on LOMs.
10285          */
10286         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10287         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10288             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10289                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10290                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10291         /* Unused GPIO3 must be driven as output on 5752 because there
10292          * are no pull-up resistors on unused GPIO pins.
10293          */
10294         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10295                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10296
10297         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10298                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10299
10300         /* Force the chip into D0. */
10301         err = tg3_set_power_state(tp, PCI_D0);
10302         if (err) {
10303                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10304                        pci_name(tp->pdev));
10305                 return err;
10306         }
10307
10308         /* 5700 B0 chips do not support checksumming correctly due
10309          * to hardware bugs.
10310          */
10311         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10312                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10313
10314         /* Pseudo-header checksum is done by hardware logic and not
10315          * the offload processers, so make the chip do the pseudo-
10316          * header checksums on receive.  For transmit it is more
10317          * convenient to do the pseudo-header checksum in software
10318          * as Linux does that on transmit for us in all cases.
10319          */
10320         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10321         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10322
10323         /* Derive initial jumbo mode from MTU assigned in
10324          * ether_setup() via the alloc_etherdev() call
10325          */
10326         if (tp->dev->mtu > ETH_DATA_LEN &&
10327             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10328                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10329
10330         /* Determine WakeOnLan speed to use. */
10331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10332             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10333             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10334             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10335                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10336         } else {
10337                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10338         }
10339
10340         /* A few boards don't want Ethernet@WireSpeed phy feature */
10341         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10342             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10343              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10344              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10345             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10346                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10347
10348         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10349             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10350                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10351         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10352                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10353
10354         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10355             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10356             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10357                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10358
10359         tp->coalesce_mode = 0;
10360         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10361             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10362                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10363
10364         /* Initialize MAC MI mode, polling disabled. */
10365         tw32_f(MAC_MI_MODE, tp->mi_mode);
10366         udelay(80);
10367
10368         /* Initialize data/descriptor byte/word swapping. */
10369         val = tr32(GRC_MODE);
10370         val &= GRC_MODE_HOST_STACKUP;
10371         tw32(GRC_MODE, val | tp->grc_mode);
10372
10373         tg3_switch_clocks(tp);
10374
10375         /* Clear this out for sanity. */
10376         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10377
10378         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10379                               &pci_state_reg);
10380         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10381             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10382                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10383
10384                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10385                     chiprevid == CHIPREV_ID_5701_B0 ||
10386                     chiprevid == CHIPREV_ID_5701_B2 ||
10387                     chiprevid == CHIPREV_ID_5701_B5) {
10388                         void __iomem *sram_base;
10389
10390                         /* Write some dummy words into the SRAM status block
10391                          * area, see if it reads back correctly.  If the return
10392                          * value is bad, force enable the PCIX workaround.
10393                          */
10394                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10395
10396                         writel(0x00000000, sram_base);
10397                         writel(0x00000000, sram_base + 4);
10398                         writel(0xffffffff, sram_base + 4);
10399                         if (readl(sram_base) != 0x00000000)
10400                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10401                 }
10402         }
10403
10404         udelay(50);
10405         tg3_nvram_init(tp);
10406
10407         grc_misc_cfg = tr32(GRC_MISC_CFG);
10408         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10409
10410         /* Broadcom's driver says that CIOBE multisplit has a bug */
10411 #if 0
10412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10413             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10414                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10415                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10416         }
10417 #endif
10418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10419             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10420              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10421                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10422
10423         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10424             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10425                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10426         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10427                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10428                                       HOSTCC_MODE_CLRTICK_TXBD);
10429
10430                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10431                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10432                                        tp->misc_host_ctrl);
10433         }
10434
10435         /* these are limited to 10/100 only */
10436         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10437              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10438             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10439              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10440              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10441               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10442               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10443             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10444              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10445               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10446                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10447
10448         err = tg3_phy_probe(tp);
10449         if (err) {
10450                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10451                        pci_name(tp->pdev), err);
10452                 /* ... but do not return immediately ... */
10453         }
10454
10455         tg3_read_partno(tp);
10456         tg3_read_fw_ver(tp);
10457
10458         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10459                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10460         } else {
10461                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10462                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10463                 else
10464                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10465         }
10466
10467         /* 5700 {AX,BX} chips have a broken status block link
10468          * change bit implementation, so we must use the
10469          * status register in those cases.
10470          */
10471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10472                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10473         else
10474                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10475
10476         /* The led_ctrl is set during tg3_phy_probe, here we might
10477          * have to force the link status polling mechanism based
10478          * upon subsystem IDs.
10479          */
10480         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10481             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10482                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10483                                   TG3_FLAG_USE_LINKCHG_REG);
10484         }
10485
10486         /* For all SERDES we poll the MAC status register. */
10487         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10488                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10489         else
10490                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10491
10492         /* All chips before 5787 can get confused if TX buffers
10493          * straddle the 4GB address boundary in some cases.
10494          */
10495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10497                 tp->dev->hard_start_xmit = tg3_start_xmit;
10498         else
10499                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10500
10501         tp->rx_offset = 2;
10502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10503             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10504                 tp->rx_offset = 0;
10505
10506         /* By default, disable wake-on-lan.  User can change this
10507          * using ETHTOOL_SWOL.
10508          */
10509         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10510
10511         return err;
10512 }
10513
10514 #ifdef CONFIG_SPARC64
10515 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10516 {
10517         struct net_device *dev = tp->dev;
10518         struct pci_dev *pdev = tp->pdev;
10519         struct pcidev_cookie *pcp = pdev->sysdata;
10520
10521         if (pcp != NULL) {
10522                 int node = pcp->prom_node;
10523
10524                 if (prom_getproplen(node, "local-mac-address") == 6) {
10525                         prom_getproperty(node, "local-mac-address",
10526                                          dev->dev_addr, 6);
10527                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10528                         return 0;
10529                 }
10530         }
10531         return -ENODEV;
10532 }
10533
10534 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10535 {
10536         struct net_device *dev = tp->dev;
10537
10538         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10539         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10540         return 0;
10541 }
10542 #endif
10543
10544 static int __devinit tg3_get_device_address(struct tg3 *tp)
10545 {
10546         struct net_device *dev = tp->dev;
10547         u32 hi, lo, mac_offset;
10548         int addr_ok = 0;
10549
10550 #ifdef CONFIG_SPARC64
10551         if (!tg3_get_macaddr_sparc(tp))
10552                 return 0;
10553 #endif
10554
10555         mac_offset = 0x7c;
10556         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10557              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10558             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10559                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10560                         mac_offset = 0xcc;
10561                 if (tg3_nvram_lock(tp))
10562                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10563                 else
10564                         tg3_nvram_unlock(tp);
10565         }
10566
10567         /* First try to get it from MAC address mailbox. */
10568         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10569         if ((hi >> 16) == 0x484b) {
10570                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10571                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10572
10573                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10574                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10575                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10576                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10577                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10578
10579                 /* Some old bootcode may report a 0 MAC address in SRAM */
10580                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10581         }
10582         if (!addr_ok) {
10583                 /* Next, try NVRAM. */
10584                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10585                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10586                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10587                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10588                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10589                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10590                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10591                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10592                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10593                 }
10594                 /* Finally just fetch it out of the MAC control regs. */
10595                 else {
10596                         hi = tr32(MAC_ADDR_0_HIGH);
10597                         lo = tr32(MAC_ADDR_0_LOW);
10598
10599                         dev->dev_addr[5] = lo & 0xff;
10600                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10601                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10602                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10603                         dev->dev_addr[1] = hi & 0xff;
10604                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10605                 }
10606         }
10607
10608         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10609 #ifdef CONFIG_SPARC64
10610                 if (!tg3_get_default_macaddr_sparc(tp))
10611                         return 0;
10612 #endif
10613                 return -EINVAL;
10614         }
10615         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10616         return 0;
10617 }
10618
10619 #define BOUNDARY_SINGLE_CACHELINE       1
10620 #define BOUNDARY_MULTI_CACHELINE        2
10621
10622 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10623 {
10624         int cacheline_size;
10625         u8 byte;
10626         int goal;
10627
10628         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10629         if (byte == 0)
10630                 cacheline_size = 1024;
10631         else
10632                 cacheline_size = (int) byte * 4;
10633
10634         /* On 5703 and later chips, the boundary bits have no
10635          * effect.
10636          */
10637         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10638             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10639             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10640                 goto out;
10641
10642 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10643         goal = BOUNDARY_MULTI_CACHELINE;
10644 #else
10645 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10646         goal = BOUNDARY_SINGLE_CACHELINE;
10647 #else
10648         goal = 0;
10649 #endif
10650 #endif
10651
10652         if (!goal)
10653                 goto out;
10654
10655         /* PCI controllers on most RISC systems tend to disconnect
10656          * when a device tries to burst across a cache-line boundary.
10657          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10658          *
10659          * Unfortunately, for PCI-E there are only limited
10660          * write-side controls for this, and thus for reads
10661          * we will still get the disconnects.  We'll also waste
10662          * these PCI cycles for both read and write for chips
10663          * other than 5700 and 5701 which do not implement the
10664          * boundary bits.
10665          */
10666         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10667             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10668                 switch (cacheline_size) {
10669                 case 16:
10670                 case 32:
10671                 case 64:
10672                 case 128:
10673                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10674                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10675                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10676                         } else {
10677                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10678                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10679                         }
10680                         break;
10681
10682                 case 256:
10683                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10684                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10685                         break;
10686
10687                 default:
10688                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10689                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10690                         break;
10691                 };
10692         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10693                 switch (cacheline_size) {
10694                 case 16:
10695                 case 32:
10696                 case 64:
10697                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10698                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10699                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10700                                 break;
10701                         }
10702                         /* fallthrough */
10703                 case 128:
10704                 default:
10705                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10706                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10707                         break;
10708                 };
10709         } else {
10710                 switch (cacheline_size) {
10711                 case 16:
10712                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10713                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10714                                         DMA_RWCTRL_WRITE_BNDRY_16);
10715                                 break;
10716                         }
10717                         /* fallthrough */
10718                 case 32:
10719                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10720                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10721                                         DMA_RWCTRL_WRITE_BNDRY_32);
10722                                 break;
10723                         }
10724                         /* fallthrough */
10725                 case 64:
10726                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10727                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10728                                         DMA_RWCTRL_WRITE_BNDRY_64);
10729                                 break;
10730                         }
10731                         /* fallthrough */
10732                 case 128:
10733                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10734                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10735                                         DMA_RWCTRL_WRITE_BNDRY_128);
10736                                 break;
10737                         }
10738                         /* fallthrough */
10739                 case 256:
10740                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10741                                 DMA_RWCTRL_WRITE_BNDRY_256);
10742                         break;
10743                 case 512:
10744                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10745                                 DMA_RWCTRL_WRITE_BNDRY_512);
10746                         break;
10747                 case 1024:
10748                 default:
10749                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10750                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10751                         break;
10752                 };
10753         }
10754
10755 out:
10756         return val;
10757 }
10758
10759 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10760 {
10761         struct tg3_internal_buffer_desc test_desc;
10762         u32 sram_dma_descs;
10763         int i, ret;
10764
10765         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10766
10767         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10768         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10769         tw32(RDMAC_STATUS, 0);
10770         tw32(WDMAC_STATUS, 0);
10771
10772         tw32(BUFMGR_MODE, 0);
10773         tw32(FTQ_RESET, 0);
10774
10775         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10776         test_desc.addr_lo = buf_dma & 0xffffffff;
10777         test_desc.nic_mbuf = 0x00002100;
10778         test_desc.len = size;
10779
10780         /*
10781          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10782          * the *second* time the tg3 driver was getting loaded after an
10783          * initial scan.
10784          *
10785          * Broadcom tells me:
10786          *   ...the DMA engine is connected to the GRC block and a DMA
10787          *   reset may affect the GRC block in some unpredictable way...
10788          *   The behavior of resets to individual blocks has not been tested.
10789          *
10790          * Broadcom noted the GRC reset will also reset all sub-components.
10791          */
10792         if (to_device) {
10793                 test_desc.cqid_sqid = (13 << 8) | 2;
10794
10795                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10796                 udelay(40);
10797         } else {
10798                 test_desc.cqid_sqid = (16 << 8) | 7;
10799
10800                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10801                 udelay(40);
10802         }
10803         test_desc.flags = 0x00000005;
10804
10805         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10806                 u32 val;
10807
10808                 val = *(((u32 *)&test_desc) + i);
10809                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10810                                        sram_dma_descs + (i * sizeof(u32)));
10811                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10812         }
10813         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10814
10815         if (to_device) {
10816                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10817         } else {
10818                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10819         }
10820
10821         ret = -ENODEV;
10822         for (i = 0; i < 40; i++) {
10823                 u32 val;
10824
10825                 if (to_device)
10826                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10827                 else
10828                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10829                 if ((val & 0xffff) == sram_dma_descs) {
10830                         ret = 0;
10831                         break;
10832                 }
10833
10834                 udelay(100);
10835         }
10836
10837         return ret;
10838 }
10839
10840 #define TEST_BUFFER_SIZE        0x2000
10841
10842 static int __devinit tg3_test_dma(struct tg3 *tp)
10843 {
10844         dma_addr_t buf_dma;
10845         u32 *buf, saved_dma_rwctrl;
10846         int ret;
10847
10848         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10849         if (!buf) {
10850                 ret = -ENOMEM;
10851                 goto out_nofree;
10852         }
10853
10854         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10855                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10856
10857         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10858
10859         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10860                 /* DMA read watermark not used on PCIE */
10861                 tp->dma_rwctrl |= 0x00180000;
10862         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10863                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10864                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10865                         tp->dma_rwctrl |= 0x003f0000;
10866                 else
10867                         tp->dma_rwctrl |= 0x003f000f;
10868         } else {
10869                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10870                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10871                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10872
10873                         /* If the 5704 is behind the EPB bridge, we can
10874                          * do the less restrictive ONE_DMA workaround for
10875                          * better performance.
10876                          */
10877                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10878                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10879                                 tp->dma_rwctrl |= 0x8000;
10880                         else if (ccval == 0x6 || ccval == 0x7)
10881                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10882
10883                         /* Set bit 23 to enable PCIX hw bug fix */
10884                         tp->dma_rwctrl |= 0x009f0000;
10885                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10886                         /* 5780 always in PCIX mode */
10887                         tp->dma_rwctrl |= 0x00144000;
10888                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10889                         /* 5714 always in PCIX mode */
10890                         tp->dma_rwctrl |= 0x00148000;
10891                 } else {
10892                         tp->dma_rwctrl |= 0x001b000f;
10893                 }
10894         }
10895
10896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10898                 tp->dma_rwctrl &= 0xfffffff0;
10899
10900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10902                 /* Remove this if it causes problems for some boards. */
10903                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10904
10905                 /* On 5700/5701 chips, we need to set this bit.
10906                  * Otherwise the chip will issue cacheline transactions
10907                  * to streamable DMA memory with not all the byte
10908                  * enables turned on.  This is an error on several
10909                  * RISC PCI controllers, in particular sparc64.
10910                  *
10911                  * On 5703/5704 chips, this bit has been reassigned
10912                  * a different meaning.  In particular, it is used
10913                  * on those chips to enable a PCI-X workaround.
10914                  */
10915                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10916         }
10917
10918         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10919
10920 #if 0
10921         /* Unneeded, already done by tg3_get_invariants.  */
10922         tg3_switch_clocks(tp);
10923 #endif
10924
10925         ret = 0;
10926         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10927             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10928                 goto out;
10929
10930         /* It is best to perform DMA test with maximum write burst size
10931          * to expose the 5700/5701 write DMA bug.
10932          */
10933         saved_dma_rwctrl = tp->dma_rwctrl;
10934         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10935         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10936
10937         while (1) {
10938                 u32 *p = buf, i;
10939
10940                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10941                         p[i] = i;
10942
10943                 /* Send the buffer to the chip. */
10944                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10945                 if (ret) {
10946                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10947                         break;
10948                 }
10949
10950 #if 0
10951                 /* validate data reached card RAM correctly. */
10952                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10953                         u32 val;
10954                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10955                         if (le32_to_cpu(val) != p[i]) {
10956                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10957                                 /* ret = -ENODEV here? */
10958                         }
10959                         p[i] = 0;
10960                 }
10961 #endif
10962                 /* Now read it back. */
10963                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10964                 if (ret) {
10965                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10966
10967                         break;
10968                 }
10969
10970                 /* Verify it. */
10971                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10972                         if (p[i] == i)
10973                                 continue;
10974
10975                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10976                             DMA_RWCTRL_WRITE_BNDRY_16) {
10977                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10978                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10979                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10980                                 break;
10981                         } else {
10982                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10983                                 ret = -ENODEV;
10984                                 goto out;
10985                         }
10986                 }
10987
10988                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10989                         /* Success. */
10990                         ret = 0;
10991                         break;
10992                 }
10993         }
10994         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10995             DMA_RWCTRL_WRITE_BNDRY_16) {
10996                 static struct pci_device_id dma_wait_state_chipsets[] = {
10997                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10998                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10999                         { },
11000                 };
11001
11002                 /* DMA test passed without adjusting DMA boundary,
11003                  * now look for chipsets that are known to expose the
11004                  * DMA bug without failing the test.
11005                  */
11006                 if (pci_dev_present(dma_wait_state_chipsets)) {
11007                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11008                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11009                 }
11010                 else
11011                         /* Safe to use the calculated DMA boundary. */
11012                         tp->dma_rwctrl = saved_dma_rwctrl;
11013
11014                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11015         }
11016
11017 out:
11018         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11019 out_nofree:
11020         return ret;
11021 }
11022
11023 static void __devinit tg3_init_link_config(struct tg3 *tp)
11024 {
11025         tp->link_config.advertising =
11026                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11027                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11028                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11029                  ADVERTISED_Autoneg | ADVERTISED_MII);
11030         tp->link_config.speed = SPEED_INVALID;
11031         tp->link_config.duplex = DUPLEX_INVALID;
11032         tp->link_config.autoneg = AUTONEG_ENABLE;
11033         tp->link_config.active_speed = SPEED_INVALID;
11034         tp->link_config.active_duplex = DUPLEX_INVALID;
11035         tp->link_config.phy_is_low_power = 0;
11036         tp->link_config.orig_speed = SPEED_INVALID;
11037         tp->link_config.orig_duplex = DUPLEX_INVALID;
11038         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11039 }
11040
11041 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11042 {
11043         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11044                 tp->bufmgr_config.mbuf_read_dma_low_water =
11045                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11046                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11047                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11048                 tp->bufmgr_config.mbuf_high_water =
11049                         DEFAULT_MB_HIGH_WATER_5705;
11050
11051                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11052                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11053                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11054                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11055                 tp->bufmgr_config.mbuf_high_water_jumbo =
11056                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11057         } else {
11058                 tp->bufmgr_config.mbuf_read_dma_low_water =
11059                         DEFAULT_MB_RDMA_LOW_WATER;
11060                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11061                         DEFAULT_MB_MACRX_LOW_WATER;
11062                 tp->bufmgr_config.mbuf_high_water =
11063                         DEFAULT_MB_HIGH_WATER;
11064
11065                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11066                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11067                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11068                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11069                 tp->bufmgr_config.mbuf_high_water_jumbo =
11070                         DEFAULT_MB_HIGH_WATER_JUMBO;
11071         }
11072
11073         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11074         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11075 }
11076
11077 static char * __devinit tg3_phy_string(struct tg3 *tp)
11078 {
11079         switch (tp->phy_id & PHY_ID_MASK) {
11080         case PHY_ID_BCM5400:    return "5400";
11081         case PHY_ID_BCM5401:    return "5401";
11082         case PHY_ID_BCM5411:    return "5411";
11083         case PHY_ID_BCM5701:    return "5701";
11084         case PHY_ID_BCM5703:    return "5703";
11085         case PHY_ID_BCM5704:    return "5704";
11086         case PHY_ID_BCM5705:    return "5705";
11087         case PHY_ID_BCM5750:    return "5750";
11088         case PHY_ID_BCM5752:    return "5752";
11089         case PHY_ID_BCM5714:    return "5714";
11090         case PHY_ID_BCM5780:    return "5780";
11091         case PHY_ID_BCM5755:    return "5755";
11092         case PHY_ID_BCM5787:    return "5787";
11093         case PHY_ID_BCM8002:    return "8002/serdes";
11094         case 0:                 return "serdes";
11095         default:                return "unknown";
11096         };
11097 }
11098
11099 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11100 {
11101         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11102                 strcpy(str, "PCI Express");
11103                 return str;
11104         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11105                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11106
11107                 strcpy(str, "PCIX:");
11108
11109                 if ((clock_ctrl == 7) ||
11110                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11111                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11112                         strcat(str, "133MHz");
11113                 else if (clock_ctrl == 0)
11114                         strcat(str, "33MHz");
11115                 else if (clock_ctrl == 2)
11116                         strcat(str, "50MHz");
11117                 else if (clock_ctrl == 4)
11118                         strcat(str, "66MHz");
11119                 else if (clock_ctrl == 6)
11120                         strcat(str, "100MHz");
11121         } else {
11122                 strcpy(str, "PCI:");
11123                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11124                         strcat(str, "66MHz");
11125                 else
11126                         strcat(str, "33MHz");
11127         }
11128         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11129                 strcat(str, ":32-bit");
11130         else
11131                 strcat(str, ":64-bit");
11132         return str;
11133 }
11134
11135 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11136 {
11137         struct pci_dev *peer;
11138         unsigned int func, devnr = tp->pdev->devfn & ~7;
11139
11140         for (func = 0; func < 8; func++) {
11141                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11142                 if (peer && peer != tp->pdev)
11143                         break;
11144                 pci_dev_put(peer);
11145         }
11146         /* 5704 can be configured in single-port mode, set peer to
11147          * tp->pdev in that case.
11148          */
11149         if (!peer) {
11150                 peer = tp->pdev;
11151                 return peer;
11152         }
11153
11154         /*
11155          * We don't need to keep the refcount elevated; there's no way
11156          * to remove one half of this device without removing the other
11157          */
11158         pci_dev_put(peer);
11159
11160         return peer;
11161 }
11162
11163 static void __devinit tg3_init_coal(struct tg3 *tp)
11164 {
11165         struct ethtool_coalesce *ec = &tp->coal;
11166
11167         memset(ec, 0, sizeof(*ec));
11168         ec->cmd = ETHTOOL_GCOALESCE;
11169         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11170         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11171         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11172         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11173         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11174         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11175         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11176         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11177         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11178
11179         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11180                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11181                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11182                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11183                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11184                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11185         }
11186
11187         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11188                 ec->rx_coalesce_usecs_irq = 0;
11189                 ec->tx_coalesce_usecs_irq = 0;
11190                 ec->stats_block_coalesce_usecs = 0;
11191         }
11192 }
11193
11194 static int __devinit tg3_init_one(struct pci_dev *pdev,
11195                                   const struct pci_device_id *ent)
11196 {
11197         static int tg3_version_printed = 0;
11198         unsigned long tg3reg_base, tg3reg_len;
11199         struct net_device *dev;
11200         struct tg3 *tp;
11201         int i, err, pm_cap;
11202         char str[40];
11203         u64 dma_mask, persist_dma_mask;
11204
11205         if (tg3_version_printed++ == 0)
11206                 printk(KERN_INFO "%s", version);
11207
11208         err = pci_enable_device(pdev);
11209         if (err) {
11210                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11211                        "aborting.\n");
11212                 return err;
11213         }
11214
11215         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11216                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11217                        "base address, aborting.\n");
11218                 err = -ENODEV;
11219                 goto err_out_disable_pdev;
11220         }
11221
11222         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11223         if (err) {
11224                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11225                        "aborting.\n");
11226                 goto err_out_disable_pdev;
11227         }
11228
11229         pci_set_master(pdev);
11230
11231         /* Find power-management capability. */
11232         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11233         if (pm_cap == 0) {
11234                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11235                        "aborting.\n");
11236                 err = -EIO;
11237                 goto err_out_free_res;
11238         }
11239
11240         tg3reg_base = pci_resource_start(pdev, 0);
11241         tg3reg_len = pci_resource_len(pdev, 0);
11242
11243         dev = alloc_etherdev(sizeof(*tp));
11244         if (!dev) {
11245                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11246                 err = -ENOMEM;
11247                 goto err_out_free_res;
11248         }
11249
11250         SET_MODULE_OWNER(dev);
11251         SET_NETDEV_DEV(dev, &pdev->dev);
11252
11253         dev->features |= NETIF_F_LLTX;
11254 #if TG3_VLAN_TAG_USED
11255         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11256         dev->vlan_rx_register = tg3_vlan_rx_register;
11257         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11258 #endif
11259
11260         tp = netdev_priv(dev);
11261         tp->pdev = pdev;
11262         tp->dev = dev;
11263         tp->pm_cap = pm_cap;
11264         tp->mac_mode = TG3_DEF_MAC_MODE;
11265         tp->rx_mode = TG3_DEF_RX_MODE;
11266         tp->tx_mode = TG3_DEF_TX_MODE;
11267         tp->mi_mode = MAC_MI_MODE_BASE;
11268         if (tg3_debug > 0)
11269                 tp->msg_enable = tg3_debug;
11270         else
11271                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11272
11273         /* The word/byte swap controls here control register access byte
11274          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11275          * setting below.
11276          */
11277         tp->misc_host_ctrl =
11278                 MISC_HOST_CTRL_MASK_PCI_INT |
11279                 MISC_HOST_CTRL_WORD_SWAP |
11280                 MISC_HOST_CTRL_INDIR_ACCESS |
11281                 MISC_HOST_CTRL_PCISTATE_RW;
11282
11283         /* The NONFRM (non-frame) byte/word swap controls take effect
11284          * on descriptor entries, anything which isn't packet data.
11285          *
11286          * The StrongARM chips on the board (one for tx, one for rx)
11287          * are running in big-endian mode.
11288          */
11289         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11290                         GRC_MODE_WSWAP_NONFRM_DATA);
11291 #ifdef __BIG_ENDIAN
11292         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11293 #endif
11294         spin_lock_init(&tp->lock);
11295         spin_lock_init(&tp->tx_lock);
11296         spin_lock_init(&tp->indirect_lock);
11297         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11298
11299         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11300         if (tp->regs == 0UL) {
11301                 printk(KERN_ERR PFX "Cannot map device registers, "
11302                        "aborting.\n");
11303                 err = -ENOMEM;
11304                 goto err_out_free_dev;
11305         }
11306
11307         tg3_init_link_config(tp);
11308
11309         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11310         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11311         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11312
11313         dev->open = tg3_open;
11314         dev->stop = tg3_close;
11315         dev->get_stats = tg3_get_stats;
11316         dev->set_multicast_list = tg3_set_rx_mode;
11317         dev->set_mac_address = tg3_set_mac_addr;
11318         dev->do_ioctl = tg3_ioctl;
11319         dev->tx_timeout = tg3_tx_timeout;
11320         dev->poll = tg3_poll;
11321         dev->ethtool_ops = &tg3_ethtool_ops;
11322         dev->weight = 64;
11323         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11324         dev->change_mtu = tg3_change_mtu;
11325         dev->irq = pdev->irq;
11326 #ifdef CONFIG_NET_POLL_CONTROLLER
11327         dev->poll_controller = tg3_poll_controller;
11328 #endif
11329
11330         err = tg3_get_invariants(tp);
11331         if (err) {
11332                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11333                        "aborting.\n");
11334                 goto err_out_iounmap;
11335         }
11336
11337         /* The EPB bridge inside 5714, 5715, and 5780 and any
11338          * device behind the EPB cannot support DMA addresses > 40-bit.
11339          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11340          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11341          * do DMA address check in tg3_start_xmit().
11342          */
11343         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11344                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11345         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11346                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11347 #ifdef CONFIG_HIGHMEM
11348                 dma_mask = DMA_64BIT_MASK;
11349 #endif
11350         } else
11351                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11352
11353         /* Configure DMA attributes. */
11354         if (dma_mask > DMA_32BIT_MASK) {
11355                 err = pci_set_dma_mask(pdev, dma_mask);
11356                 if (!err) {
11357                         dev->features |= NETIF_F_HIGHDMA;
11358                         err = pci_set_consistent_dma_mask(pdev,
11359                                                           persist_dma_mask);
11360                         if (err < 0) {
11361                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11362                                        "DMA for consistent allocations\n");
11363                                 goto err_out_iounmap;
11364                         }
11365                 }
11366         }
11367         if (err || dma_mask == DMA_32BIT_MASK) {
11368                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11369                 if (err) {
11370                         printk(KERN_ERR PFX "No usable DMA configuration, "
11371                                "aborting.\n");
11372                         goto err_out_iounmap;
11373                 }
11374         }
11375
11376         tg3_init_bufmgr_config(tp);
11377
11378 #if TG3_TSO_SUPPORT != 0
11379         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11380                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11381         }
11382         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11384             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11385             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11386                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11387         } else {
11388                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11389         }
11390
11391         /* TSO is on by default on chips that support hardware TSO.
11392          * Firmware TSO on older chips gives lower performance, so it
11393          * is off by default, but can be enabled using ethtool.
11394          */
11395         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11396                 dev->features |= NETIF_F_TSO;
11397
11398 #endif
11399
11400         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11401             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11402             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11403                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11404                 tp->rx_pending = 63;
11405         }
11406
11407         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11408             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11409                 tp->pdev_peer = tg3_find_peer(tp);
11410
11411         err = tg3_get_device_address(tp);
11412         if (err) {
11413                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11414                        "aborting.\n");
11415                 goto err_out_iounmap;
11416         }
11417
11418         /*
11419          * Reset chip in case UNDI or EFI driver did not shutdown
11420          * DMA self test will enable WDMAC and we'll see (spurious)
11421          * pending DMA on the PCI bus at that point.
11422          */
11423         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11424             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11425                 pci_save_state(tp->pdev);
11426                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11427                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11428         }
11429
11430         err = tg3_test_dma(tp);
11431         if (err) {
11432                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11433                 goto err_out_iounmap;
11434         }
11435
11436         /* Tigon3 can do ipv4 only... and some chips have buggy
11437          * checksumming.
11438          */
11439         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11440                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11441                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11442                         dev->features |= NETIF_F_HW_CSUM;
11443                 else
11444                         dev->features |= NETIF_F_IP_CSUM;
11445                 dev->features |= NETIF_F_SG;
11446                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11447         } else
11448                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11449
11450         /* flow control autonegotiation is default behavior */
11451         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11452
11453         tg3_init_coal(tp);
11454
11455         /* Now that we have fully setup the chip, save away a snapshot
11456          * of the PCI config space.  We need to restore this after
11457          * GRC_MISC_CFG core clock resets and some resume events.
11458          */
11459         pci_save_state(tp->pdev);
11460
11461         err = register_netdev(dev);
11462         if (err) {
11463                 printk(KERN_ERR PFX "Cannot register net device, "
11464                        "aborting.\n");
11465                 goto err_out_iounmap;
11466         }
11467
11468         pci_set_drvdata(pdev, dev);
11469
11470         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11471                dev->name,
11472                tp->board_part_number,
11473                tp->pci_chip_rev_id,
11474                tg3_phy_string(tp),
11475                tg3_bus_string(tp, str),
11476                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11477
11478         for (i = 0; i < 6; i++)
11479                 printk("%2.2x%c", dev->dev_addr[i],
11480                        i == 5 ? '\n' : ':');
11481
11482         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11483                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11484                "TSOcap[%d] \n",
11485                dev->name,
11486                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11487                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11488                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11489                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11490                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11491                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11492                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11493         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11494                dev->name, tp->dma_rwctrl,
11495                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11496                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11497
11498         netif_carrier_off(tp->dev);
11499
11500         return 0;
11501
11502 err_out_iounmap:
11503         if (tp->regs) {
11504                 iounmap(tp->regs);
11505                 tp->regs = NULL;
11506         }
11507
11508 err_out_free_dev:
11509         free_netdev(dev);
11510
11511 err_out_free_res:
11512         pci_release_regions(pdev);
11513
11514 err_out_disable_pdev:
11515         pci_disable_device(pdev);
11516         pci_set_drvdata(pdev, NULL);
11517         return err;
11518 }
11519
11520 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11521 {
11522         struct net_device *dev = pci_get_drvdata(pdev);
11523
11524         if (dev) {
11525                 struct tg3 *tp = netdev_priv(dev);
11526
11527                 flush_scheduled_work();
11528                 unregister_netdev(dev);
11529                 if (tp->regs) {
11530                         iounmap(tp->regs);
11531                         tp->regs = NULL;
11532                 }
11533                 free_netdev(dev);
11534                 pci_release_regions(pdev);
11535                 pci_disable_device(pdev);
11536                 pci_set_drvdata(pdev, NULL);
11537         }
11538 }
11539
11540 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11541 {
11542         struct net_device *dev = pci_get_drvdata(pdev);
11543         struct tg3 *tp = netdev_priv(dev);
11544         int err;
11545
11546         if (!netif_running(dev))
11547                 return 0;
11548
11549         flush_scheduled_work();
11550         tg3_netif_stop(tp);
11551
11552         del_timer_sync(&tp->timer);
11553
11554         tg3_full_lock(tp, 1);
11555         tg3_disable_ints(tp);
11556         tg3_full_unlock(tp);
11557
11558         netif_device_detach(dev);
11559
11560         tg3_full_lock(tp, 0);
11561         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11562         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11563         tg3_full_unlock(tp);
11564
11565         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11566         if (err) {
11567                 tg3_full_lock(tp, 0);
11568
11569                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11570                 tg3_init_hw(tp);
11571
11572                 tp->timer.expires = jiffies + tp->timer_offset;
11573                 add_timer(&tp->timer);
11574
11575                 netif_device_attach(dev);
11576                 tg3_netif_start(tp);
11577
11578                 tg3_full_unlock(tp);
11579         }
11580
11581         return err;
11582 }
11583
11584 static int tg3_resume(struct pci_dev *pdev)
11585 {
11586         struct net_device *dev = pci_get_drvdata(pdev);
11587         struct tg3 *tp = netdev_priv(dev);
11588         int err;
11589
11590         if (!netif_running(dev))
11591                 return 0;
11592
11593         pci_restore_state(tp->pdev);
11594
11595         err = tg3_set_power_state(tp, PCI_D0);
11596         if (err)
11597                 return err;
11598
11599         netif_device_attach(dev);
11600
11601         tg3_full_lock(tp, 0);
11602
11603         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11604         tg3_init_hw(tp);
11605
11606         tp->timer.expires = jiffies + tp->timer_offset;
11607         add_timer(&tp->timer);
11608
11609         tg3_netif_start(tp);
11610
11611         tg3_full_unlock(tp);
11612
11613         return 0;
11614 }
11615
11616 static struct pci_driver tg3_driver = {
11617         .name           = DRV_MODULE_NAME,
11618         .id_table       = tg3_pci_tbl,
11619         .probe          = tg3_init_one,
11620         .remove         = __devexit_p(tg3_remove_one),
11621         .suspend        = tg3_suspend,
11622         .resume         = tg3_resume
11623 };
11624
11625 static int __init tg3_init(void)
11626 {
11627         return pci_module_init(&tg3_driver);
11628 }
11629
11630 static void __exit tg3_cleanup(void)
11631 {
11632         pci_unregister_driver(&tg3_driver);
11633 }
11634
11635 module_init(tg3_init);
11636 module_exit(tg3_cleanup);