]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Skip phy power down on some devices
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.52"
73 #define DRV_MODULE_RELDATE      "Mar 06, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { 0, }
261 };
262
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264
265 static struct {
266         const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
268         { "rx_octets" },
269         { "rx_fragments" },
270         { "rx_ucast_packets" },
271         { "rx_mcast_packets" },
272         { "rx_bcast_packets" },
273         { "rx_fcs_errors" },
274         { "rx_align_errors" },
275         { "rx_xon_pause_rcvd" },
276         { "rx_xoff_pause_rcvd" },
277         { "rx_mac_ctrl_rcvd" },
278         { "rx_xoff_entered" },
279         { "rx_frame_too_long_errors" },
280         { "rx_jabbers" },
281         { "rx_undersize_packets" },
282         { "rx_in_length_errors" },
283         { "rx_out_length_errors" },
284         { "rx_64_or_less_octet_packets" },
285         { "rx_65_to_127_octet_packets" },
286         { "rx_128_to_255_octet_packets" },
287         { "rx_256_to_511_octet_packets" },
288         { "rx_512_to_1023_octet_packets" },
289         { "rx_1024_to_1522_octet_packets" },
290         { "rx_1523_to_2047_octet_packets" },
291         { "rx_2048_to_4095_octet_packets" },
292         { "rx_4096_to_8191_octet_packets" },
293         { "rx_8192_to_9022_octet_packets" },
294
295         { "tx_octets" },
296         { "tx_collisions" },
297
298         { "tx_xon_sent" },
299         { "tx_xoff_sent" },
300         { "tx_flow_control" },
301         { "tx_mac_errors" },
302         { "tx_single_collisions" },
303         { "tx_mult_collisions" },
304         { "tx_deferred" },
305         { "tx_excessive_collisions" },
306         { "tx_late_collisions" },
307         { "tx_collide_2times" },
308         { "tx_collide_3times" },
309         { "tx_collide_4times" },
310         { "tx_collide_5times" },
311         { "tx_collide_6times" },
312         { "tx_collide_7times" },
313         { "tx_collide_8times" },
314         { "tx_collide_9times" },
315         { "tx_collide_10times" },
316         { "tx_collide_11times" },
317         { "tx_collide_12times" },
318         { "tx_collide_13times" },
319         { "tx_collide_14times" },
320         { "tx_collide_15times" },
321         { "tx_ucast_packets" },
322         { "tx_mcast_packets" },
323         { "tx_bcast_packets" },
324         { "tx_carrier_sense_errors" },
325         { "tx_discards" },
326         { "tx_errors" },
327
328         { "dma_writeq_full" },
329         { "dma_write_prioq_full" },
330         { "rxbds_empty" },
331         { "rx_discards" },
332         { "rx_errors" },
333         { "rx_threshold_hit" },
334
335         { "dma_readq_full" },
336         { "dma_read_prioq_full" },
337         { "tx_comp_queue_full" },
338
339         { "ring_set_send_prod_index" },
340         { "ring_status_update" },
341         { "nic_irqs" },
342         { "nic_avoided_irqs" },
343         { "nic_tx_threshold_hit" }
344 };
345
346 static struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349         { "nvram test     (online) " },
350         { "link test      (online) " },
351         { "register test  (offline)" },
352         { "memory test    (offline)" },
353         { "loopback test  (offline)" },
354         { "interrupt test (offline)" },
355 };
356
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 {
359         writel(val, tp->regs + off);
360 }
361
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 {
364         return (readl(tp->regs + off)); 
365 }
366
367 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&tp->indirect_lock, flags);
372         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
374         spin_unlock_irqrestore(&tp->indirect_lock, flags);
375 }
376
377 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
378 {
379         writel(val, tp->regs + off);
380         readl(tp->regs + off);
381 }
382
383 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
384 {
385         unsigned long flags;
386         u32 val;
387
388         spin_lock_irqsave(&tp->indirect_lock, flags);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
390         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392         return val;
393 }
394
395 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 {
397         unsigned long flags;
398
399         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
400                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
401                                        TG3_64BIT_REG_LOW, val);
402                 return;
403         }
404         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409
410         spin_lock_irqsave(&tp->indirect_lock, flags);
411         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
412         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
413         spin_unlock_irqrestore(&tp->indirect_lock, flags);
414
415         /* In indirect mode when disabling interrupts, we also need
416          * to clear the interrupt bit in the GRC local ctrl register.
417          */
418         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
419             (val == 0x1)) {
420                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
421                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
422         }
423 }
424
425 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
426 {
427         unsigned long flags;
428         u32 val;
429
430         spin_lock_irqsave(&tp->indirect_lock, flags);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
432         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
433         spin_unlock_irqrestore(&tp->indirect_lock, flags);
434         return val;
435 }
436
437 /* usec_wait specifies the wait time in usec when writing to certain registers
438  * where it is unsafe to read back the register without some delay.
439  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
440  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
441  */
442 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
443 {
444         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
445             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
446                 /* Non-posted methods */
447                 tp->write32(tp, off, val);
448         else {
449                 /* Posted method */
450                 tg3_write32(tp, off, val);
451                 if (usec_wait)
452                         udelay(usec_wait);
453                 tp->read32(tp, off);
454         }
455         /* Wait again after the read for the posted method to guarantee that
456          * the wait time is met.
457          */
458         if (usec_wait)
459                 udelay(usec_wait);
460 }
461
462 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
463 {
464         tp->write32_mbox(tp, off, val);
465         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
466             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467                 tp->read32_mbox(tp, off);
468 }
469
470 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
471 {
472         void __iomem *mbox = tp->regs + off;
473         writel(val, mbox);
474         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
475                 writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
477                 readl(mbox);
478 }
479
480 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
481 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
482 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
483 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
484 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
485
486 #define tw32(reg,val)           tp->write32(tp, reg, val)
487 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
488 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
489 #define tr32(reg)               tp->read32(tp, reg)
490
491 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
498
499         /* Always leave this as zero. */
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
505 {
506         /* If no workaround is needed, write to mem space directly */
507         if (tp->write32 != tg3_write_indirect_reg32)
508                 tw32(NIC_SRAM_WIN_BASE + off, val);
509         else
510                 tg3_write_mem(tp, off, val);
511 }
512
513 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
514 {
515         unsigned long flags;
516
517         spin_lock_irqsave(&tp->indirect_lock, flags);
518         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
519         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
520
521         /* Always leave this as zero. */
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_disable_ints(struct tg3 *tp)
527 {
528         tw32(TG3PCI_MISC_HOST_CTRL,
529              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
531 }
532
533 static inline void tg3_cond_int(struct tg3 *tp)
534 {
535         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
536             (tp->hw_status->status & SD_STATUS_UPDATED))
537                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
538 }
539
540 static void tg3_enable_ints(struct tg3 *tp)
541 {
542         tp->irq_sync = 0;
543         wmb();
544
545         tw32(TG3PCI_MISC_HOST_CTRL,
546              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
548                        (tp->last_tag << 24));
549         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
550                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
551                                (tp->last_tag << 24));
552         tg3_cond_int(tp);
553 }
554
555 static inline unsigned int tg3_has_work(struct tg3 *tp)
556 {
557         struct tg3_hw_status *sblk = tp->hw_status;
558         unsigned int work_exists = 0;
559
560         /* check for phy events */
561         if (!(tp->tg3_flags &
562               (TG3_FLAG_USE_LINKCHG_REG |
563                TG3_FLAG_POLL_SERDES))) {
564                 if (sblk->status & SD_STATUS_LINK_CHG)
565                         work_exists = 1;
566         }
567         /* check for RX/TX work to do */
568         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
569             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
570                 work_exists = 1;
571
572         return work_exists;
573 }
574
575 /* tg3_restart_ints
576  *  similar to tg3_enable_ints, but it accurately determines whether there
577  *  is new work pending and can return without flushing the PIO write
578  *  which reenables interrupts 
579  */
580 static void tg3_restart_ints(struct tg3 *tp)
581 {
582         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
583                      tp->last_tag << 24);
584         mmiowb();
585
586         /* When doing tagged status, this work check is unnecessary.
587          * The last_tag we write above tells the chip which piece of
588          * work we've completed.
589          */
590         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
591             tg3_has_work(tp))
592                 tw32(HOSTCC_MODE, tp->coalesce_mode |
593                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
594 }
595
596 static inline void tg3_netif_stop(struct tg3 *tp)
597 {
598         tp->dev->trans_start = jiffies; /* prevent tx timeout */
599         netif_poll_disable(tp->dev);
600         netif_tx_disable(tp->dev);
601 }
602
603 static inline void tg3_netif_start(struct tg3 *tp)
604 {
605         netif_wake_queue(tp->dev);
606         /* NOTE: unconditional netif_wake_queue is only appropriate
607          * so long as all callers are assured to have free tx slots
608          * (such as after tg3_init_hw)
609          */
610         netif_poll_enable(tp->dev);
611         tp->hw_status->status |= SD_STATUS_UPDATED;
612         tg3_enable_ints(tp);
613 }
614
615 static void tg3_switch_clocks(struct tg3 *tp)
616 {
617         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
618         u32 orig_clock_ctrl;
619
620         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
621                 return;
622
623         orig_clock_ctrl = clock_ctrl;
624         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
625                        CLOCK_CTRL_CLKRUN_OENABLE |
626                        0x1f);
627         tp->pci_clock_ctrl = clock_ctrl;
628
629         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
630                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
631                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
632                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
633                 }
634         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
635                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
636                             clock_ctrl |
637                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
638                             40);
639                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
640                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
641                             40);
642         }
643         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
644 }
645
646 #define PHY_BUSY_LOOPS  5000
647
648 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
649 {
650         u32 frame_val;
651         unsigned int loops;
652         int ret;
653
654         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
655                 tw32_f(MAC_MI_MODE,
656                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
657                 udelay(80);
658         }
659
660         *val = 0x0;
661
662         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
663                       MI_COM_PHY_ADDR_MASK);
664         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
665                       MI_COM_REG_ADDR_MASK);
666         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
667         
668         tw32_f(MAC_MI_COM, frame_val);
669
670         loops = PHY_BUSY_LOOPS;
671         while (loops != 0) {
672                 udelay(10);
673                 frame_val = tr32(MAC_MI_COM);
674
675                 if ((frame_val & MI_COM_BUSY) == 0) {
676                         udelay(5);
677                         frame_val = tr32(MAC_MI_COM);
678                         break;
679                 }
680                 loops -= 1;
681         }
682
683         ret = -EBUSY;
684         if (loops != 0) {
685                 *val = frame_val & MI_COM_DATA_MASK;
686                 ret = 0;
687         }
688
689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690                 tw32_f(MAC_MI_MODE, tp->mi_mode);
691                 udelay(80);
692         }
693
694         return ret;
695 }
696
697 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
698 {
699         u32 frame_val;
700         unsigned int loops;
701         int ret;
702
703         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
704                 tw32_f(MAC_MI_MODE,
705                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
706                 udelay(80);
707         }
708
709         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
710                       MI_COM_PHY_ADDR_MASK);
711         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
712                       MI_COM_REG_ADDR_MASK);
713         frame_val |= (val & MI_COM_DATA_MASK);
714         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
715         
716         tw32_f(MAC_MI_COM, frame_val);
717
718         loops = PHY_BUSY_LOOPS;
719         while (loops != 0) {
720                 udelay(10);
721                 frame_val = tr32(MAC_MI_COM);
722                 if ((frame_val & MI_COM_BUSY) == 0) {
723                         udelay(5);
724                         frame_val = tr32(MAC_MI_COM);
725                         break;
726                 }
727                 loops -= 1;
728         }
729
730         ret = -EBUSY;
731         if (loops != 0)
732                 ret = 0;
733
734         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
735                 tw32_f(MAC_MI_MODE, tp->mi_mode);
736                 udelay(80);
737         }
738
739         return ret;
740 }
741
742 static void tg3_phy_set_wirespeed(struct tg3 *tp)
743 {
744         u32 val;
745
746         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
747                 return;
748
749         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
750             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
751                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
752                              (val | (1 << 15) | (1 << 4)));
753 }
754
755 static int tg3_bmcr_reset(struct tg3 *tp)
756 {
757         u32 phy_control;
758         int limit, err;
759
760         /* OK, reset it, and poll the BMCR_RESET bit until it
761          * clears or we time out.
762          */
763         phy_control = BMCR_RESET;
764         err = tg3_writephy(tp, MII_BMCR, phy_control);
765         if (err != 0)
766                 return -EBUSY;
767
768         limit = 5000;
769         while (limit--) {
770                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
771                 if (err != 0)
772                         return -EBUSY;
773
774                 if ((phy_control & BMCR_RESET) == 0) {
775                         udelay(40);
776                         break;
777                 }
778                 udelay(10);
779         }
780         if (limit <= 0)
781                 return -EBUSY;
782
783         return 0;
784 }
785
786 static int tg3_wait_macro_done(struct tg3 *tp)
787 {
788         int limit = 100;
789
790         while (limit--) {
791                 u32 tmp32;
792
793                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
794                         if ((tmp32 & 0x1000) == 0)
795                                 break;
796                 }
797         }
798         if (limit <= 0)
799                 return -EBUSY;
800
801         return 0;
802 }
803
804 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
805 {
806         static const u32 test_pat[4][6] = {
807         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
808         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
809         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
810         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
811         };
812         int chan;
813
814         for (chan = 0; chan < 4; chan++) {
815                 int i;
816
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
818                              (chan * 0x2000) | 0x0200);
819                 tg3_writephy(tp, 0x16, 0x0002);
820
821                 for (i = 0; i < 6; i++)
822                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
823                                      test_pat[chan][i]);
824
825                 tg3_writephy(tp, 0x16, 0x0202);
826                 if (tg3_wait_macro_done(tp)) {
827                         *resetp = 1;
828                         return -EBUSY;
829                 }
830
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
832                              (chan * 0x2000) | 0x0200);
833                 tg3_writephy(tp, 0x16, 0x0082);
834                 if (tg3_wait_macro_done(tp)) {
835                         *resetp = 1;
836                         return -EBUSY;
837                 }
838
839                 tg3_writephy(tp, 0x16, 0x0802);
840                 if (tg3_wait_macro_done(tp)) {
841                         *resetp = 1;
842                         return -EBUSY;
843                 }
844
845                 for (i = 0; i < 6; i += 2) {
846                         u32 low, high;
847
848                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
849                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
850                             tg3_wait_macro_done(tp)) {
851                                 *resetp = 1;
852                                 return -EBUSY;
853                         }
854                         low &= 0x7fff;
855                         high &= 0x000f;
856                         if (low != test_pat[chan][i] ||
857                             high != test_pat[chan][i+1]) {
858                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
859                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
860                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
861
862                                 return -EBUSY;
863                         }
864                 }
865         }
866
867         return 0;
868 }
869
870 static int tg3_phy_reset_chanpat(struct tg3 *tp)
871 {
872         int chan;
873
874         for (chan = 0; chan < 4; chan++) {
875                 int i;
876
877                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
878                              (chan * 0x2000) | 0x0200);
879                 tg3_writephy(tp, 0x16, 0x0002);
880                 for (i = 0; i < 6; i++)
881                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
882                 tg3_writephy(tp, 0x16, 0x0202);
883                 if (tg3_wait_macro_done(tp))
884                         return -EBUSY;
885         }
886
887         return 0;
888 }
889
890 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
891 {
892         u32 reg32, phy9_orig;
893         int retries, do_phy_reset, err;
894
895         retries = 10;
896         do_phy_reset = 1;
897         do {
898                 if (do_phy_reset) {
899                         err = tg3_bmcr_reset(tp);
900                         if (err)
901                                 return err;
902                         do_phy_reset = 0;
903                 }
904
905                 /* Disable transmitter and interrupt.  */
906                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
907                         continue;
908
909                 reg32 |= 0x3000;
910                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
911
912                 /* Set full-duplex, 1000 mbps.  */
913                 tg3_writephy(tp, MII_BMCR,
914                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
915
916                 /* Set to master mode.  */
917                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
918                         continue;
919
920                 tg3_writephy(tp, MII_TG3_CTRL,
921                              (MII_TG3_CTRL_AS_MASTER |
922                               MII_TG3_CTRL_ENABLE_AS_MASTER));
923
924                 /* Enable SM_DSP_CLOCK and 6dB.  */
925                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
926
927                 /* Block the PHY control access.  */
928                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
929                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
930
931                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
932                 if (!err)
933                         break;
934         } while (--retries);
935
936         err = tg3_phy_reset_chanpat(tp);
937         if (err)
938                 return err;
939
940         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
942
943         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
944         tg3_writephy(tp, 0x16, 0x0000);
945
946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
948                 /* Set Extended packet length bit for jumbo frames */
949                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
950         }
951         else {
952                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
953         }
954
955         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
956
957         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
958                 reg32 &= ~0x3000;
959                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
960         } else if (!err)
961                 err = -EBUSY;
962
963         return err;
964 }
965
966 /* This will reset the tigon3 PHY if there is no valid
967  * link unless the FORCE argument is non-zero.
968  */
969 static int tg3_phy_reset(struct tg3 *tp)
970 {
971         u32 phy_status;
972         int err;
973
974         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
975         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
976         if (err != 0)
977                 return -EBUSY;
978
979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
982                 err = tg3_phy_reset_5703_4_5(tp);
983                 if (err)
984                         return err;
985                 goto out;
986         }
987
988         err = tg3_bmcr_reset(tp);
989         if (err)
990                 return err;
991
992 out:
993         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
994                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
995                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
996                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
997                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
998                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1002                 tg3_writephy(tp, 0x1c, 0x8d68);
1003                 tg3_writephy(tp, 0x1c, 0x8d68);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1009                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1010                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1014         }
1015         /* Set Extended packet length bit (bit 14) on all chips that */
1016         /* support jumbo frames */
1017         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1018                 /* Cannot do read-modify-write on 5401 */
1019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1020         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1021                 u32 phy_reg;
1022
1023                 /* Set bit 14 with read-modify-write to preserve other bits */
1024                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1025                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1026                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1027         }
1028
1029         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1030          * jumbo frames transmission.
1031          */
1032         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1033                 u32 phy_reg;
1034
1035                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1036                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1037                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1038         }
1039
1040         tg3_phy_set_wirespeed(tp);
1041         return 0;
1042 }
1043
1044 static void tg3_frob_aux_power(struct tg3 *tp)
1045 {
1046         struct tg3 *tp_peer = tp;
1047
1048         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1049                 return;
1050
1051         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1052             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1053                 struct net_device *dev_peer;
1054
1055                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1056                 /* remove_one() may have been run on the peer. */
1057                 if (!dev_peer)
1058                         tp_peer = tp;
1059                 else
1060                         tp_peer = netdev_priv(dev_peer);
1061         }
1062
1063         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1064             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1065             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1066             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1068                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1069                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1070                                     (GRC_LCLCTRL_GPIO_OE0 |
1071                                      GRC_LCLCTRL_GPIO_OE1 |
1072                                      GRC_LCLCTRL_GPIO_OE2 |
1073                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1074                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1075                                     100);
1076                 } else {
1077                         u32 no_gpio2;
1078                         u32 grc_local_ctrl = 0;
1079
1080                         if (tp_peer != tp &&
1081                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1082                                 return;
1083
1084                         /* Workaround to prevent overdrawing Amps. */
1085                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1086                             ASIC_REV_5714) {
1087                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1088                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                                             grc_local_ctrl, 100);
1090                         }
1091
1092                         /* On 5753 and variants, GPIO2 cannot be used. */
1093                         no_gpio2 = tp->nic_sram_data_cfg &
1094                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1095
1096                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1097                                          GRC_LCLCTRL_GPIO_OE1 |
1098                                          GRC_LCLCTRL_GPIO_OE2 |
1099                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1100                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1101                         if (no_gpio2) {
1102                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1103                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1104                         }
1105                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1106                                                     grc_local_ctrl, 100);
1107
1108                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1109
1110                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1111                                                     grc_local_ctrl, 100);
1112
1113                         if (!no_gpio2) {
1114                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1115                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1116                                             grc_local_ctrl, 100);
1117                         }
1118                 }
1119         } else {
1120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1121                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1122                         if (tp_peer != tp &&
1123                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1124                                 return;
1125
1126                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                     (GRC_LCLCTRL_GPIO_OE1 |
1128                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     GRC_LCLCTRL_GPIO_OE1, 100);
1132
1133                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1134                                     (GRC_LCLCTRL_GPIO_OE1 |
1135                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1136                 }
1137         }
1138 }
1139
1140 static int tg3_setup_phy(struct tg3 *, int);
1141
1142 #define RESET_KIND_SHUTDOWN     0
1143 #define RESET_KIND_INIT         1
1144 #define RESET_KIND_SUSPEND      2
1145
1146 static void tg3_write_sig_post_reset(struct tg3 *, int);
1147 static int tg3_halt_cpu(struct tg3 *, u32);
1148 static int tg3_nvram_lock(struct tg3 *);
1149 static void tg3_nvram_unlock(struct tg3 *);
1150
1151 static void tg3_power_down_phy(struct tg3 *tp)
1152 {
1153         /* The PHY should not be powered down on some chips because
1154          * of bugs.
1155          */
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1157             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1158             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1159              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1160                 return;
1161         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1162 }
1163
1164 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1165 {
1166         u32 misc_host_ctrl;
1167         u16 power_control, power_caps;
1168         int pm = tp->pm_cap;
1169
1170         /* Make sure register accesses (indirect or otherwise)
1171          * will function correctly.
1172          */
1173         pci_write_config_dword(tp->pdev,
1174                                TG3PCI_MISC_HOST_CTRL,
1175                                tp->misc_host_ctrl);
1176
1177         pci_read_config_word(tp->pdev,
1178                              pm + PCI_PM_CTRL,
1179                              &power_control);
1180         power_control |= PCI_PM_CTRL_PME_STATUS;
1181         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1182         switch (state) {
1183         case PCI_D0:
1184                 power_control |= 0;
1185                 pci_write_config_word(tp->pdev,
1186                                       pm + PCI_PM_CTRL,
1187                                       power_control);
1188                 udelay(100);    /* Delay after power state change */
1189
1190                 /* Switch out of Vaux if it is not a LOM */
1191                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1192                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1193
1194                 return 0;
1195
1196         case PCI_D1:
1197                 power_control |= 1;
1198                 break;
1199
1200         case PCI_D2:
1201                 power_control |= 2;
1202                 break;
1203
1204         case PCI_D3hot:
1205                 power_control |= 3;
1206                 break;
1207
1208         default:
1209                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1210                        "requested.\n",
1211                        tp->dev->name, state);
1212                 return -EINVAL;
1213         };
1214
1215         power_control |= PCI_PM_CTRL_PME_ENABLE;
1216
1217         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1218         tw32(TG3PCI_MISC_HOST_CTRL,
1219              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1220
1221         if (tp->link_config.phy_is_low_power == 0) {
1222                 tp->link_config.phy_is_low_power = 1;
1223                 tp->link_config.orig_speed = tp->link_config.speed;
1224                 tp->link_config.orig_duplex = tp->link_config.duplex;
1225                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1226         }
1227
1228         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1229                 tp->link_config.speed = SPEED_10;
1230                 tp->link_config.duplex = DUPLEX_HALF;
1231                 tp->link_config.autoneg = AUTONEG_ENABLE;
1232                 tg3_setup_phy(tp, 0);
1233         }
1234
1235         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1236                 int i;
1237                 u32 val;
1238
1239                 for (i = 0; i < 200; i++) {
1240                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1241                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1242                                 break;
1243                         msleep(1);
1244                 }
1245         }
1246         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1247                                              WOL_DRV_STATE_SHUTDOWN |
1248                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1249
1250         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1251
1252         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1253                 u32 mac_mode;
1254
1255                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1256                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1257                         udelay(40);
1258
1259                         mac_mode = MAC_MODE_PORT_MODE_MII;
1260
1261                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1262                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1263                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1264                 } else {
1265                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1266                 }
1267
1268                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1269                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1270
1271                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1272                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1273                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1274
1275                 tw32_f(MAC_MODE, mac_mode);
1276                 udelay(100);
1277
1278                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1279                 udelay(10);
1280         }
1281
1282         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1283             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1284              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1285                 u32 base_val;
1286
1287                 base_val = tp->pci_clock_ctrl;
1288                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1289                              CLOCK_CTRL_TXCLK_DISABLE);
1290
1291                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1292                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1293         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1294                 /* do nothing */
1295         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1296                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1297                 u32 newbits1, newbits2;
1298
1299                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1300                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1301                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1302                                     CLOCK_CTRL_TXCLK_DISABLE |
1303                                     CLOCK_CTRL_ALTCLK);
1304                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1305                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1306                         newbits1 = CLOCK_CTRL_625_CORE;
1307                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1308                 } else {
1309                         newbits1 = CLOCK_CTRL_ALTCLK;
1310                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1311                 }
1312
1313                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1314                             40);
1315
1316                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1317                             40);
1318
1319                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1320                         u32 newbits3;
1321
1322                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1323                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1324                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1325                                             CLOCK_CTRL_TXCLK_DISABLE |
1326                                             CLOCK_CTRL_44MHZ_CORE);
1327                         } else {
1328                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1329                         }
1330
1331                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1332                                     tp->pci_clock_ctrl | newbits3, 40);
1333                 }
1334         }
1335
1336         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1337             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1338                 /* Turn off the PHY */
1339                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1340                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343                         tg3_power_down_phy(tp);
1344                 }
1345         }
1346
1347         tg3_frob_aux_power(tp);
1348
1349         /* Workaround for unstable PLL clock */
1350         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1351             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1352                 u32 val = tr32(0x7d00);
1353
1354                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1355                 tw32(0x7d00, val);
1356                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1357                         int err;
1358
1359                         err = tg3_nvram_lock(tp);
1360                         tg3_halt_cpu(tp, RX_CPU_BASE);
1361                         if (!err)
1362                                 tg3_nvram_unlock(tp);
1363                 }
1364         }
1365
1366         /* Finally, set the new power state. */
1367         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1368         udelay(100);    /* Delay after power state change */
1369
1370         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1371
1372         return 0;
1373 }
1374
1375 static void tg3_link_report(struct tg3 *tp)
1376 {
1377         if (!netif_carrier_ok(tp->dev)) {
1378                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1379         } else {
1380                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1381                        tp->dev->name,
1382                        (tp->link_config.active_speed == SPEED_1000 ?
1383                         1000 :
1384                         (tp->link_config.active_speed == SPEED_100 ?
1385                          100 : 10)),
1386                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1387                         "full" : "half"));
1388
1389                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1390                        "%s for RX.\n",
1391                        tp->dev->name,
1392                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1393                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1394         }
1395 }
1396
1397 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1398 {
1399         u32 new_tg3_flags = 0;
1400         u32 old_rx_mode = tp->rx_mode;
1401         u32 old_tx_mode = tp->tx_mode;
1402
1403         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1404
1405                 /* Convert 1000BaseX flow control bits to 1000BaseT
1406                  * bits before resolving flow control.
1407                  */
1408                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1409                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1410                                        ADVERTISE_PAUSE_ASYM);
1411                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1412
1413                         if (local_adv & ADVERTISE_1000XPAUSE)
1414                                 local_adv |= ADVERTISE_PAUSE_CAP;
1415                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1416                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1417                         if (remote_adv & LPA_1000XPAUSE)
1418                                 remote_adv |= LPA_PAUSE_CAP;
1419                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1420                                 remote_adv |= LPA_PAUSE_ASYM;
1421                 }
1422
1423                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1424                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1425                                 if (remote_adv & LPA_PAUSE_CAP)
1426                                         new_tg3_flags |=
1427                                                 (TG3_FLAG_RX_PAUSE |
1428                                                 TG3_FLAG_TX_PAUSE);
1429                                 else if (remote_adv & LPA_PAUSE_ASYM)
1430                                         new_tg3_flags |=
1431                                                 (TG3_FLAG_RX_PAUSE);
1432                         } else {
1433                                 if (remote_adv & LPA_PAUSE_CAP)
1434                                         new_tg3_flags |=
1435                                                 (TG3_FLAG_RX_PAUSE |
1436                                                 TG3_FLAG_TX_PAUSE);
1437                         }
1438                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1439                         if ((remote_adv & LPA_PAUSE_CAP) &&
1440                         (remote_adv & LPA_PAUSE_ASYM))
1441                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1442                 }
1443
1444                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1445                 tp->tg3_flags |= new_tg3_flags;
1446         } else {
1447                 new_tg3_flags = tp->tg3_flags;
1448         }
1449
1450         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1451                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1452         else
1453                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1454
1455         if (old_rx_mode != tp->rx_mode) {
1456                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1457         }
1458         
1459         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1460                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1461         else
1462                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1463
1464         if (old_tx_mode != tp->tx_mode) {
1465                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1466         }
1467 }
1468
1469 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1470 {
1471         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1472         case MII_TG3_AUX_STAT_10HALF:
1473                 *speed = SPEED_10;
1474                 *duplex = DUPLEX_HALF;
1475                 break;
1476
1477         case MII_TG3_AUX_STAT_10FULL:
1478                 *speed = SPEED_10;
1479                 *duplex = DUPLEX_FULL;
1480                 break;
1481
1482         case MII_TG3_AUX_STAT_100HALF:
1483                 *speed = SPEED_100;
1484                 *duplex = DUPLEX_HALF;
1485                 break;
1486
1487         case MII_TG3_AUX_STAT_100FULL:
1488                 *speed = SPEED_100;
1489                 *duplex = DUPLEX_FULL;
1490                 break;
1491
1492         case MII_TG3_AUX_STAT_1000HALF:
1493                 *speed = SPEED_1000;
1494                 *duplex = DUPLEX_HALF;
1495                 break;
1496
1497         case MII_TG3_AUX_STAT_1000FULL:
1498                 *speed = SPEED_1000;
1499                 *duplex = DUPLEX_FULL;
1500                 break;
1501
1502         default:
1503                 *speed = SPEED_INVALID;
1504                 *duplex = DUPLEX_INVALID;
1505                 break;
1506         };
1507 }
1508
1509 static void tg3_phy_copper_begin(struct tg3 *tp)
1510 {
1511         u32 new_adv;
1512         int i;
1513
1514         if (tp->link_config.phy_is_low_power) {
1515                 /* Entering low power mode.  Disable gigabit and
1516                  * 100baseT advertisements.
1517                  */
1518                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1519
1520                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1521                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1522                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1523                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1524
1525                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1526         } else if (tp->link_config.speed == SPEED_INVALID) {
1527                 tp->link_config.advertising =
1528                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1529                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1530                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1531                          ADVERTISED_Autoneg | ADVERTISED_MII);
1532
1533                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1534                         tp->link_config.advertising &=
1535                                 ~(ADVERTISED_1000baseT_Half |
1536                                   ADVERTISED_1000baseT_Full);
1537
1538                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1539                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1540                         new_adv |= ADVERTISE_10HALF;
1541                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1542                         new_adv |= ADVERTISE_10FULL;
1543                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1544                         new_adv |= ADVERTISE_100HALF;
1545                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1546                         new_adv |= ADVERTISE_100FULL;
1547                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1548
1549                 if (tp->link_config.advertising &
1550                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1551                         new_adv = 0;
1552                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1553                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1554                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1555                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1556                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1557                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1558                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1559                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1560                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1561                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1562                 } else {
1563                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1564                 }
1565         } else {
1566                 /* Asking for a specific link mode. */
1567                 if (tp->link_config.speed == SPEED_1000) {
1568                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1569                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1570
1571                         if (tp->link_config.duplex == DUPLEX_FULL)
1572                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1573                         else
1574                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1575                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1576                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1577                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1578                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1579                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1580                 } else {
1581                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1582
1583                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1584                         if (tp->link_config.speed == SPEED_100) {
1585                                 if (tp->link_config.duplex == DUPLEX_FULL)
1586                                         new_adv |= ADVERTISE_100FULL;
1587                                 else
1588                                         new_adv |= ADVERTISE_100HALF;
1589                         } else {
1590                                 if (tp->link_config.duplex == DUPLEX_FULL)
1591                                         new_adv |= ADVERTISE_10FULL;
1592                                 else
1593                                         new_adv |= ADVERTISE_10HALF;
1594                         }
1595                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596                 }
1597         }
1598
1599         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1600             tp->link_config.speed != SPEED_INVALID) {
1601                 u32 bmcr, orig_bmcr;
1602
1603                 tp->link_config.active_speed = tp->link_config.speed;
1604                 tp->link_config.active_duplex = tp->link_config.duplex;
1605
1606                 bmcr = 0;
1607                 switch (tp->link_config.speed) {
1608                 default:
1609                 case SPEED_10:
1610                         break;
1611
1612                 case SPEED_100:
1613                         bmcr |= BMCR_SPEED100;
1614                         break;
1615
1616                 case SPEED_1000:
1617                         bmcr |= TG3_BMCR_SPEED1000;
1618                         break;
1619                 };
1620
1621                 if (tp->link_config.duplex == DUPLEX_FULL)
1622                         bmcr |= BMCR_FULLDPLX;
1623
1624                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1625                     (bmcr != orig_bmcr)) {
1626                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1627                         for (i = 0; i < 1500; i++) {
1628                                 u32 tmp;
1629
1630                                 udelay(10);
1631                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1632                                     tg3_readphy(tp, MII_BMSR, &tmp))
1633                                         continue;
1634                                 if (!(tmp & BMSR_LSTATUS)) {
1635                                         udelay(40);
1636                                         break;
1637                                 }
1638                         }
1639                         tg3_writephy(tp, MII_BMCR, bmcr);
1640                         udelay(40);
1641                 }
1642         } else {
1643                 tg3_writephy(tp, MII_BMCR,
1644                              BMCR_ANENABLE | BMCR_ANRESTART);
1645         }
1646 }
1647
1648 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1649 {
1650         int err;
1651
1652         /* Turn off tap power management. */
1653         /* Set Extended packet length bit */
1654         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1655
1656         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1657         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1658
1659         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1660         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1661
1662         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1663         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1664
1665         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1666         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1667
1668         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1669         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1670
1671         udelay(40);
1672
1673         return err;
1674 }
1675
1676 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1677 {
1678         u32 adv_reg, all_mask;
1679
1680         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1681                 return 0;
1682
1683         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1684                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1685         if ((adv_reg & all_mask) != all_mask)
1686                 return 0;
1687         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1688                 u32 tg3_ctrl;
1689
1690                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1691                         return 0;
1692
1693                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1694                             MII_TG3_CTRL_ADV_1000_FULL);
1695                 if ((tg3_ctrl & all_mask) != all_mask)
1696                         return 0;
1697         }
1698         return 1;
1699 }
1700
1701 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1702 {
1703         int current_link_up;
1704         u32 bmsr, dummy;
1705         u16 current_speed;
1706         u8 current_duplex;
1707         int i, err;
1708
1709         tw32(MAC_EVENT, 0);
1710
1711         tw32_f(MAC_STATUS,
1712              (MAC_STATUS_SYNC_CHANGED |
1713               MAC_STATUS_CFG_CHANGED |
1714               MAC_STATUS_MI_COMPLETION |
1715               MAC_STATUS_LNKSTATE_CHANGED));
1716         udelay(40);
1717
1718         tp->mi_mode = MAC_MI_MODE_BASE;
1719         tw32_f(MAC_MI_MODE, tp->mi_mode);
1720         udelay(80);
1721
1722         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1723
1724         /* Some third-party PHYs need to be reset on link going
1725          * down.
1726          */
1727         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1728              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1729              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1730             netif_carrier_ok(tp->dev)) {
1731                 tg3_readphy(tp, MII_BMSR, &bmsr);
1732                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1733                     !(bmsr & BMSR_LSTATUS))
1734                         force_reset = 1;
1735         }
1736         if (force_reset)
1737                 tg3_phy_reset(tp);
1738
1739         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1740                 tg3_readphy(tp, MII_BMSR, &bmsr);
1741                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1742                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1743                         bmsr = 0;
1744
1745                 if (!(bmsr & BMSR_LSTATUS)) {
1746                         err = tg3_init_5401phy_dsp(tp);
1747                         if (err)
1748                                 return err;
1749
1750                         tg3_readphy(tp, MII_BMSR, &bmsr);
1751                         for (i = 0; i < 1000; i++) {
1752                                 udelay(10);
1753                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1754                                     (bmsr & BMSR_LSTATUS)) {
1755                                         udelay(40);
1756                                         break;
1757                                 }
1758                         }
1759
1760                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1761                             !(bmsr & BMSR_LSTATUS) &&
1762                             tp->link_config.active_speed == SPEED_1000) {
1763                                 err = tg3_phy_reset(tp);
1764                                 if (!err)
1765                                         err = tg3_init_5401phy_dsp(tp);
1766                                 if (err)
1767                                         return err;
1768                         }
1769                 }
1770         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1771                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1772                 /* 5701 {A0,B0} CRC bug workaround */
1773                 tg3_writephy(tp, 0x15, 0x0a75);
1774                 tg3_writephy(tp, 0x1c, 0x8c68);
1775                 tg3_writephy(tp, 0x1c, 0x8d68);
1776                 tg3_writephy(tp, 0x1c, 0x8c68);
1777         }
1778
1779         /* Clear pending interrupts... */
1780         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1781         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1782
1783         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1784                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1785         else
1786                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1787
1788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1790                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1791                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1792                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1793                 else
1794                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1795         }
1796
1797         current_link_up = 0;
1798         current_speed = SPEED_INVALID;
1799         current_duplex = DUPLEX_INVALID;
1800
1801         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1802                 u32 val;
1803
1804                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1805                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1806                 if (!(val & (1 << 10))) {
1807                         val |= (1 << 10);
1808                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1809                         goto relink;
1810                 }
1811         }
1812
1813         bmsr = 0;
1814         for (i = 0; i < 100; i++) {
1815                 tg3_readphy(tp, MII_BMSR, &bmsr);
1816                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1817                     (bmsr & BMSR_LSTATUS))
1818                         break;
1819                 udelay(40);
1820         }
1821
1822         if (bmsr & BMSR_LSTATUS) {
1823                 u32 aux_stat, bmcr;
1824
1825                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1826                 for (i = 0; i < 2000; i++) {
1827                         udelay(10);
1828                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1829                             aux_stat)
1830                                 break;
1831                 }
1832
1833                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1834                                              &current_speed,
1835                                              &current_duplex);
1836
1837                 bmcr = 0;
1838                 for (i = 0; i < 200; i++) {
1839                         tg3_readphy(tp, MII_BMCR, &bmcr);
1840                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1841                                 continue;
1842                         if (bmcr && bmcr != 0x7fff)
1843                                 break;
1844                         udelay(10);
1845                 }
1846
1847                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1848                         if (bmcr & BMCR_ANENABLE) {
1849                                 current_link_up = 1;
1850
1851                                 /* Force autoneg restart if we are exiting
1852                                  * low power mode.
1853                                  */
1854                                 if (!tg3_copper_is_advertising_all(tp))
1855                                         current_link_up = 0;
1856                         } else {
1857                                 current_link_up = 0;
1858                         }
1859                 } else {
1860                         if (!(bmcr & BMCR_ANENABLE) &&
1861                             tp->link_config.speed == current_speed &&
1862                             tp->link_config.duplex == current_duplex) {
1863                                 current_link_up = 1;
1864                         } else {
1865                                 current_link_up = 0;
1866                         }
1867                 }
1868
1869                 tp->link_config.active_speed = current_speed;
1870                 tp->link_config.active_duplex = current_duplex;
1871         }
1872
1873         if (current_link_up == 1 &&
1874             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1875             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1876                 u32 local_adv, remote_adv;
1877
1878                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1879                         local_adv = 0;
1880                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1881
1882                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1883                         remote_adv = 0;
1884
1885                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1886
1887                 /* If we are not advertising full pause capability,
1888                  * something is wrong.  Bring the link down and reconfigure.
1889                  */
1890                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1891                         current_link_up = 0;
1892                 } else {
1893                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1894                 }
1895         }
1896 relink:
1897         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1898                 u32 tmp;
1899
1900                 tg3_phy_copper_begin(tp);
1901
1902                 tg3_readphy(tp, MII_BMSR, &tmp);
1903                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1904                     (tmp & BMSR_LSTATUS))
1905                         current_link_up = 1;
1906         }
1907
1908         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1909         if (current_link_up == 1) {
1910                 if (tp->link_config.active_speed == SPEED_100 ||
1911                     tp->link_config.active_speed == SPEED_10)
1912                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1913                 else
1914                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1915         } else
1916                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1917
1918         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1919         if (tp->link_config.active_duplex == DUPLEX_HALF)
1920                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1921
1922         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1924                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1925                     (current_link_up == 1 &&
1926                      tp->link_config.active_speed == SPEED_10))
1927                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1928         } else {
1929                 if (current_link_up == 1)
1930                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1931         }
1932
1933         /* ??? Without this setting Netgear GA302T PHY does not
1934          * ??? send/receive packets...
1935          */
1936         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1937             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1938                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1939                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1940                 udelay(80);
1941         }
1942
1943         tw32_f(MAC_MODE, tp->mac_mode);
1944         udelay(40);
1945
1946         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1947                 /* Polled via timer. */
1948                 tw32_f(MAC_EVENT, 0);
1949         } else {
1950                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1951         }
1952         udelay(40);
1953
1954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1955             current_link_up == 1 &&
1956             tp->link_config.active_speed == SPEED_1000 &&
1957             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1958              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1959                 udelay(120);
1960                 tw32_f(MAC_STATUS,
1961                      (MAC_STATUS_SYNC_CHANGED |
1962                       MAC_STATUS_CFG_CHANGED));
1963                 udelay(40);
1964                 tg3_write_mem(tp,
1965                               NIC_SRAM_FIRMWARE_MBOX,
1966                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1967         }
1968
1969         if (current_link_up != netif_carrier_ok(tp->dev)) {
1970                 if (current_link_up)
1971                         netif_carrier_on(tp->dev);
1972                 else
1973                         netif_carrier_off(tp->dev);
1974                 tg3_link_report(tp);
1975         }
1976
1977         return 0;
1978 }
1979
1980 struct tg3_fiber_aneginfo {
1981         int state;
1982 #define ANEG_STATE_UNKNOWN              0
1983 #define ANEG_STATE_AN_ENABLE            1
1984 #define ANEG_STATE_RESTART_INIT         2
1985 #define ANEG_STATE_RESTART              3
1986 #define ANEG_STATE_DISABLE_LINK_OK      4
1987 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1988 #define ANEG_STATE_ABILITY_DETECT       6
1989 #define ANEG_STATE_ACK_DETECT_INIT      7
1990 #define ANEG_STATE_ACK_DETECT           8
1991 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1992 #define ANEG_STATE_COMPLETE_ACK         10
1993 #define ANEG_STATE_IDLE_DETECT_INIT     11
1994 #define ANEG_STATE_IDLE_DETECT          12
1995 #define ANEG_STATE_LINK_OK              13
1996 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1997 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1998
1999         u32 flags;
2000 #define MR_AN_ENABLE            0x00000001
2001 #define MR_RESTART_AN           0x00000002
2002 #define MR_AN_COMPLETE          0x00000004
2003 #define MR_PAGE_RX              0x00000008
2004 #define MR_NP_LOADED            0x00000010
2005 #define MR_TOGGLE_TX            0x00000020
2006 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2007 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2008 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2009 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2010 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2011 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2012 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2013 #define MR_TOGGLE_RX            0x00002000
2014 #define MR_NP_RX                0x00004000
2015
2016 #define MR_LINK_OK              0x80000000
2017
2018         unsigned long link_time, cur_time;
2019
2020         u32 ability_match_cfg;
2021         int ability_match_count;
2022
2023         char ability_match, idle_match, ack_match;
2024
2025         u32 txconfig, rxconfig;
2026 #define ANEG_CFG_NP             0x00000080
2027 #define ANEG_CFG_ACK            0x00000040
2028 #define ANEG_CFG_RF2            0x00000020
2029 #define ANEG_CFG_RF1            0x00000010
2030 #define ANEG_CFG_PS2            0x00000001
2031 #define ANEG_CFG_PS1            0x00008000
2032 #define ANEG_CFG_HD             0x00004000
2033 #define ANEG_CFG_FD             0x00002000
2034 #define ANEG_CFG_INVAL          0x00001f06
2035
2036 };
2037 #define ANEG_OK         0
2038 #define ANEG_DONE       1
2039 #define ANEG_TIMER_ENAB 2
2040 #define ANEG_FAILED     -1
2041
2042 #define ANEG_STATE_SETTLE_TIME  10000
2043
2044 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2045                                    struct tg3_fiber_aneginfo *ap)
2046 {
2047         unsigned long delta;
2048         u32 rx_cfg_reg;
2049         int ret;
2050
2051         if (ap->state == ANEG_STATE_UNKNOWN) {
2052                 ap->rxconfig = 0;
2053                 ap->link_time = 0;
2054                 ap->cur_time = 0;
2055                 ap->ability_match_cfg = 0;
2056                 ap->ability_match_count = 0;
2057                 ap->ability_match = 0;
2058                 ap->idle_match = 0;
2059                 ap->ack_match = 0;
2060         }
2061         ap->cur_time++;
2062
2063         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2064                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2065
2066                 if (rx_cfg_reg != ap->ability_match_cfg) {
2067                         ap->ability_match_cfg = rx_cfg_reg;
2068                         ap->ability_match = 0;
2069                         ap->ability_match_count = 0;
2070                 } else {
2071                         if (++ap->ability_match_count > 1) {
2072                                 ap->ability_match = 1;
2073                                 ap->ability_match_cfg = rx_cfg_reg;
2074                         }
2075                 }
2076                 if (rx_cfg_reg & ANEG_CFG_ACK)
2077                         ap->ack_match = 1;
2078                 else
2079                         ap->ack_match = 0;
2080
2081                 ap->idle_match = 0;
2082         } else {
2083                 ap->idle_match = 1;
2084                 ap->ability_match_cfg = 0;
2085                 ap->ability_match_count = 0;
2086                 ap->ability_match = 0;
2087                 ap->ack_match = 0;
2088
2089                 rx_cfg_reg = 0;
2090         }
2091
2092         ap->rxconfig = rx_cfg_reg;
2093         ret = ANEG_OK;
2094
2095         switch(ap->state) {
2096         case ANEG_STATE_UNKNOWN:
2097                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2098                         ap->state = ANEG_STATE_AN_ENABLE;
2099
2100                 /* fallthru */
2101         case ANEG_STATE_AN_ENABLE:
2102                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2103                 if (ap->flags & MR_AN_ENABLE) {
2104                         ap->link_time = 0;
2105                         ap->cur_time = 0;
2106                         ap->ability_match_cfg = 0;
2107                         ap->ability_match_count = 0;
2108                         ap->ability_match = 0;
2109                         ap->idle_match = 0;
2110                         ap->ack_match = 0;
2111
2112                         ap->state = ANEG_STATE_RESTART_INIT;
2113                 } else {
2114                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2115                 }
2116                 break;
2117
2118         case ANEG_STATE_RESTART_INIT:
2119                 ap->link_time = ap->cur_time;
2120                 ap->flags &= ~(MR_NP_LOADED);
2121                 ap->txconfig = 0;
2122                 tw32(MAC_TX_AUTO_NEG, 0);
2123                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2124                 tw32_f(MAC_MODE, tp->mac_mode);
2125                 udelay(40);
2126
2127                 ret = ANEG_TIMER_ENAB;
2128                 ap->state = ANEG_STATE_RESTART;
2129
2130                 /* fallthru */
2131         case ANEG_STATE_RESTART:
2132                 delta = ap->cur_time - ap->link_time;
2133                 if (delta > ANEG_STATE_SETTLE_TIME) {
2134                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2135                 } else {
2136                         ret = ANEG_TIMER_ENAB;
2137                 }
2138                 break;
2139
2140         case ANEG_STATE_DISABLE_LINK_OK:
2141                 ret = ANEG_DONE;
2142                 break;
2143
2144         case ANEG_STATE_ABILITY_DETECT_INIT:
2145                 ap->flags &= ~(MR_TOGGLE_TX);
2146                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2147                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2148                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149                 tw32_f(MAC_MODE, tp->mac_mode);
2150                 udelay(40);
2151
2152                 ap->state = ANEG_STATE_ABILITY_DETECT;
2153                 break;
2154
2155         case ANEG_STATE_ABILITY_DETECT:
2156                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2157                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2158                 }
2159                 break;
2160
2161         case ANEG_STATE_ACK_DETECT_INIT:
2162                 ap->txconfig |= ANEG_CFG_ACK;
2163                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2164                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2165                 tw32_f(MAC_MODE, tp->mac_mode);
2166                 udelay(40);
2167
2168                 ap->state = ANEG_STATE_ACK_DETECT;
2169
2170                 /* fallthru */
2171         case ANEG_STATE_ACK_DETECT:
2172                 if (ap->ack_match != 0) {
2173                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2174                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2175                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2176                         } else {
2177                                 ap->state = ANEG_STATE_AN_ENABLE;
2178                         }
2179                 } else if (ap->ability_match != 0 &&
2180                            ap->rxconfig == 0) {
2181                         ap->state = ANEG_STATE_AN_ENABLE;
2182                 }
2183                 break;
2184
2185         case ANEG_STATE_COMPLETE_ACK_INIT:
2186                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2187                         ret = ANEG_FAILED;
2188                         break;
2189                 }
2190                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2191                                MR_LP_ADV_HALF_DUPLEX |
2192                                MR_LP_ADV_SYM_PAUSE |
2193                                MR_LP_ADV_ASYM_PAUSE |
2194                                MR_LP_ADV_REMOTE_FAULT1 |
2195                                MR_LP_ADV_REMOTE_FAULT2 |
2196                                MR_LP_ADV_NEXT_PAGE |
2197                                MR_TOGGLE_RX |
2198                                MR_NP_RX);
2199                 if (ap->rxconfig & ANEG_CFG_FD)
2200                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2201                 if (ap->rxconfig & ANEG_CFG_HD)
2202                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2203                 if (ap->rxconfig & ANEG_CFG_PS1)
2204                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2205                 if (ap->rxconfig & ANEG_CFG_PS2)
2206                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2207                 if (ap->rxconfig & ANEG_CFG_RF1)
2208                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2209                 if (ap->rxconfig & ANEG_CFG_RF2)
2210                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2211                 if (ap->rxconfig & ANEG_CFG_NP)
2212                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2213
2214                 ap->link_time = ap->cur_time;
2215
2216                 ap->flags ^= (MR_TOGGLE_TX);
2217                 if (ap->rxconfig & 0x0008)
2218                         ap->flags |= MR_TOGGLE_RX;
2219                 if (ap->rxconfig & ANEG_CFG_NP)
2220                         ap->flags |= MR_NP_RX;
2221                 ap->flags |= MR_PAGE_RX;
2222
2223                 ap->state = ANEG_STATE_COMPLETE_ACK;
2224                 ret = ANEG_TIMER_ENAB;
2225                 break;
2226
2227         case ANEG_STATE_COMPLETE_ACK:
2228                 if (ap->ability_match != 0 &&
2229                     ap->rxconfig == 0) {
2230                         ap->state = ANEG_STATE_AN_ENABLE;
2231                         break;
2232                 }
2233                 delta = ap->cur_time - ap->link_time;
2234                 if (delta > ANEG_STATE_SETTLE_TIME) {
2235                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2236                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2237                         } else {
2238                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2239                                     !(ap->flags & MR_NP_RX)) {
2240                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2241                                 } else {
2242                                         ret = ANEG_FAILED;
2243                                 }
2244                         }
2245                 }
2246                 break;
2247
2248         case ANEG_STATE_IDLE_DETECT_INIT:
2249                 ap->link_time = ap->cur_time;
2250                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2251                 tw32_f(MAC_MODE, tp->mac_mode);
2252                 udelay(40);
2253
2254                 ap->state = ANEG_STATE_IDLE_DETECT;
2255                 ret = ANEG_TIMER_ENAB;
2256                 break;
2257
2258         case ANEG_STATE_IDLE_DETECT:
2259                 if (ap->ability_match != 0 &&
2260                     ap->rxconfig == 0) {
2261                         ap->state = ANEG_STATE_AN_ENABLE;
2262                         break;
2263                 }
2264                 delta = ap->cur_time - ap->link_time;
2265                 if (delta > ANEG_STATE_SETTLE_TIME) {
2266                         /* XXX another gem from the Broadcom driver :( */
2267                         ap->state = ANEG_STATE_LINK_OK;
2268                 }
2269                 break;
2270
2271         case ANEG_STATE_LINK_OK:
2272                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2273                 ret = ANEG_DONE;
2274                 break;
2275
2276         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2277                 /* ??? unimplemented */
2278                 break;
2279
2280         case ANEG_STATE_NEXT_PAGE_WAIT:
2281                 /* ??? unimplemented */
2282                 break;
2283
2284         default:
2285                 ret = ANEG_FAILED;
2286                 break;
2287         };
2288
2289         return ret;
2290 }
2291
2292 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2293 {
2294         int res = 0;
2295         struct tg3_fiber_aneginfo aninfo;
2296         int status = ANEG_FAILED;
2297         unsigned int tick;
2298         u32 tmp;
2299
2300         tw32_f(MAC_TX_AUTO_NEG, 0);
2301
2302         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2303         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2304         udelay(40);
2305
2306         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2307         udelay(40);
2308
2309         memset(&aninfo, 0, sizeof(aninfo));
2310         aninfo.flags |= MR_AN_ENABLE;
2311         aninfo.state = ANEG_STATE_UNKNOWN;
2312         aninfo.cur_time = 0;
2313         tick = 0;
2314         while (++tick < 195000) {
2315                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2316                 if (status == ANEG_DONE || status == ANEG_FAILED)
2317                         break;
2318
2319                 udelay(1);
2320         }
2321
2322         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2323         tw32_f(MAC_MODE, tp->mac_mode);
2324         udelay(40);
2325
2326         *flags = aninfo.flags;
2327
2328         if (status == ANEG_DONE &&
2329             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2330                              MR_LP_ADV_FULL_DUPLEX)))
2331                 res = 1;
2332
2333         return res;
2334 }
2335
2336 static void tg3_init_bcm8002(struct tg3 *tp)
2337 {
2338         u32 mac_status = tr32(MAC_STATUS);
2339         int i;
2340
2341         /* Reset when initting first time or we have a link. */
2342         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2343             !(mac_status & MAC_STATUS_PCS_SYNCED))
2344                 return;
2345
2346         /* Set PLL lock range. */
2347         tg3_writephy(tp, 0x16, 0x8007);
2348
2349         /* SW reset */
2350         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2351
2352         /* Wait for reset to complete. */
2353         /* XXX schedule_timeout() ... */
2354         for (i = 0; i < 500; i++)
2355                 udelay(10);
2356
2357         /* Config mode; select PMA/Ch 1 regs. */
2358         tg3_writephy(tp, 0x10, 0x8411);
2359
2360         /* Enable auto-lock and comdet, select txclk for tx. */
2361         tg3_writephy(tp, 0x11, 0x0a10);
2362
2363         tg3_writephy(tp, 0x18, 0x00a0);
2364         tg3_writephy(tp, 0x16, 0x41ff);
2365
2366         /* Assert and deassert POR. */
2367         tg3_writephy(tp, 0x13, 0x0400);
2368         udelay(40);
2369         tg3_writephy(tp, 0x13, 0x0000);
2370
2371         tg3_writephy(tp, 0x11, 0x0a50);
2372         udelay(40);
2373         tg3_writephy(tp, 0x11, 0x0a10);
2374
2375         /* Wait for signal to stabilize */
2376         /* XXX schedule_timeout() ... */
2377         for (i = 0; i < 15000; i++)
2378                 udelay(10);
2379
2380         /* Deselect the channel register so we can read the PHYID
2381          * later.
2382          */
2383         tg3_writephy(tp, 0x10, 0x8011);
2384 }
2385
2386 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2387 {
2388         u32 sg_dig_ctrl, sg_dig_status;
2389         u32 serdes_cfg, expected_sg_dig_ctrl;
2390         int workaround, port_a;
2391         int current_link_up;
2392
2393         serdes_cfg = 0;
2394         expected_sg_dig_ctrl = 0;
2395         workaround = 0;
2396         port_a = 1;
2397         current_link_up = 0;
2398
2399         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2400             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2401                 workaround = 1;
2402                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2403                         port_a = 0;
2404
2405                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2406                 /* preserve bits 20-23 for voltage regulator */
2407                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2408         }
2409
2410         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2411
2412         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2413                 if (sg_dig_ctrl & (1 << 31)) {
2414                         if (workaround) {
2415                                 u32 val = serdes_cfg;
2416
2417                                 if (port_a)
2418                                         val |= 0xc010000;
2419                                 else
2420                                         val |= 0x4010000;
2421                                 tw32_f(MAC_SERDES_CFG, val);
2422                         }
2423                         tw32_f(SG_DIG_CTRL, 0x01388400);
2424                 }
2425                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2426                         tg3_setup_flow_control(tp, 0, 0);
2427                         current_link_up = 1;
2428                 }
2429                 goto out;
2430         }
2431
2432         /* Want auto-negotiation.  */
2433         expected_sg_dig_ctrl = 0x81388400;
2434
2435         /* Pause capability */
2436         expected_sg_dig_ctrl |= (1 << 11);
2437
2438         /* Asymettric pause */
2439         expected_sg_dig_ctrl |= (1 << 12);
2440
2441         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2442                 if (workaround)
2443                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2444                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2445                 udelay(5);
2446                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2447
2448                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2449         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2450                                  MAC_STATUS_SIGNAL_DET)) {
2451                 int i;
2452
2453                 /* Giver time to negotiate (~200ms) */
2454                 for (i = 0; i < 40000; i++) {
2455                         sg_dig_status = tr32(SG_DIG_STATUS);
2456                         if (sg_dig_status & (0x3))
2457                                 break;
2458                         udelay(5);
2459                 }
2460                 mac_status = tr32(MAC_STATUS);
2461
2462                 if ((sg_dig_status & (1 << 1)) &&
2463                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2464                         u32 local_adv, remote_adv;
2465
2466                         local_adv = ADVERTISE_PAUSE_CAP;
2467                         remote_adv = 0;
2468                         if (sg_dig_status & (1 << 19))
2469                                 remote_adv |= LPA_PAUSE_CAP;
2470                         if (sg_dig_status & (1 << 20))
2471                                 remote_adv |= LPA_PAUSE_ASYM;
2472
2473                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2474                         current_link_up = 1;
2475                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2476                 } else if (!(sg_dig_status & (1 << 1))) {
2477                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2478                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2479                         else {
2480                                 if (workaround) {
2481                                         u32 val = serdes_cfg;
2482
2483                                         if (port_a)
2484                                                 val |= 0xc010000;
2485                                         else
2486                                                 val |= 0x4010000;
2487
2488                                         tw32_f(MAC_SERDES_CFG, val);
2489                                 }
2490
2491                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2492                                 udelay(40);
2493
2494                                 /* Link parallel detection - link is up */
2495                                 /* only if we have PCS_SYNC and not */
2496                                 /* receiving config code words */
2497                                 mac_status = tr32(MAC_STATUS);
2498                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2499                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2500                                         tg3_setup_flow_control(tp, 0, 0);
2501                                         current_link_up = 1;
2502                                 }
2503                         }
2504                 }
2505         }
2506
2507 out:
2508         return current_link_up;
2509 }
2510
2511 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2512 {
2513         int current_link_up = 0;
2514
2515         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2516                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2517                 goto out;
2518         }
2519
2520         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2521                 u32 flags;
2522                 int i;
2523   
2524                 if (fiber_autoneg(tp, &flags)) {
2525                         u32 local_adv, remote_adv;
2526
2527                         local_adv = ADVERTISE_PAUSE_CAP;
2528                         remote_adv = 0;
2529                         if (flags & MR_LP_ADV_SYM_PAUSE)
2530                                 remote_adv |= LPA_PAUSE_CAP;
2531                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2532                                 remote_adv |= LPA_PAUSE_ASYM;
2533
2534                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2535
2536                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2537                         current_link_up = 1;
2538                 }
2539                 for (i = 0; i < 30; i++) {
2540                         udelay(20);
2541                         tw32_f(MAC_STATUS,
2542                                (MAC_STATUS_SYNC_CHANGED |
2543                                 MAC_STATUS_CFG_CHANGED));
2544                         udelay(40);
2545                         if ((tr32(MAC_STATUS) &
2546                              (MAC_STATUS_SYNC_CHANGED |
2547                               MAC_STATUS_CFG_CHANGED)) == 0)
2548                                 break;
2549                 }
2550
2551                 mac_status = tr32(MAC_STATUS);
2552                 if (current_link_up == 0 &&
2553                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2554                     !(mac_status & MAC_STATUS_RCVD_CFG))
2555                         current_link_up = 1;
2556         } else {
2557                 /* Forcing 1000FD link up. */
2558                 current_link_up = 1;
2559                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2560
2561                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2562                 udelay(40);
2563         }
2564
2565 out:
2566         return current_link_up;
2567 }
2568
2569 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2570 {
2571         u32 orig_pause_cfg;
2572         u16 orig_active_speed;
2573         u8 orig_active_duplex;
2574         u32 mac_status;
2575         int current_link_up;
2576         int i;
2577
2578         orig_pause_cfg =
2579                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2580                                   TG3_FLAG_TX_PAUSE));
2581         orig_active_speed = tp->link_config.active_speed;
2582         orig_active_duplex = tp->link_config.active_duplex;
2583
2584         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2585             netif_carrier_ok(tp->dev) &&
2586             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2587                 mac_status = tr32(MAC_STATUS);
2588                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2589                                MAC_STATUS_SIGNAL_DET |
2590                                MAC_STATUS_CFG_CHANGED |
2591                                MAC_STATUS_RCVD_CFG);
2592                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2593                                    MAC_STATUS_SIGNAL_DET)) {
2594                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2595                                             MAC_STATUS_CFG_CHANGED));
2596                         return 0;
2597                 }
2598         }
2599
2600         tw32_f(MAC_TX_AUTO_NEG, 0);
2601
2602         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2603         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2604         tw32_f(MAC_MODE, tp->mac_mode);
2605         udelay(40);
2606
2607         if (tp->phy_id == PHY_ID_BCM8002)
2608                 tg3_init_bcm8002(tp);
2609
2610         /* Enable link change event even when serdes polling.  */
2611         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2612         udelay(40);
2613
2614         current_link_up = 0;
2615         mac_status = tr32(MAC_STATUS);
2616
2617         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2618                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2619         else
2620                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2621
2622         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2623         tw32_f(MAC_MODE, tp->mac_mode);
2624         udelay(40);
2625
2626         tp->hw_status->status =
2627                 (SD_STATUS_UPDATED |
2628                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2629
2630         for (i = 0; i < 100; i++) {
2631                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2632                                     MAC_STATUS_CFG_CHANGED));
2633                 udelay(5);
2634                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2635                                          MAC_STATUS_CFG_CHANGED)) == 0)
2636                         break;
2637         }
2638
2639         mac_status = tr32(MAC_STATUS);
2640         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2641                 current_link_up = 0;
2642                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2643                         tw32_f(MAC_MODE, (tp->mac_mode |
2644                                           MAC_MODE_SEND_CONFIGS));
2645                         udelay(1);
2646                         tw32_f(MAC_MODE, tp->mac_mode);
2647                 }
2648         }
2649
2650         if (current_link_up == 1) {
2651                 tp->link_config.active_speed = SPEED_1000;
2652                 tp->link_config.active_duplex = DUPLEX_FULL;
2653                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2654                                     LED_CTRL_LNKLED_OVERRIDE |
2655                                     LED_CTRL_1000MBPS_ON));
2656         } else {
2657                 tp->link_config.active_speed = SPEED_INVALID;
2658                 tp->link_config.active_duplex = DUPLEX_INVALID;
2659                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2660                                     LED_CTRL_LNKLED_OVERRIDE |
2661                                     LED_CTRL_TRAFFIC_OVERRIDE));
2662         }
2663
2664         if (current_link_up != netif_carrier_ok(tp->dev)) {
2665                 if (current_link_up)
2666                         netif_carrier_on(tp->dev);
2667                 else
2668                         netif_carrier_off(tp->dev);
2669                 tg3_link_report(tp);
2670         } else {
2671                 u32 now_pause_cfg =
2672                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2673                                          TG3_FLAG_TX_PAUSE);
2674                 if (orig_pause_cfg != now_pause_cfg ||
2675                     orig_active_speed != tp->link_config.active_speed ||
2676                     orig_active_duplex != tp->link_config.active_duplex)
2677                         tg3_link_report(tp);
2678         }
2679
2680         return 0;
2681 }
2682
2683 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2684 {
2685         int current_link_up, err = 0;
2686         u32 bmsr, bmcr;
2687         u16 current_speed;
2688         u8 current_duplex;
2689
2690         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2691         tw32_f(MAC_MODE, tp->mac_mode);
2692         udelay(40);
2693
2694         tw32(MAC_EVENT, 0);
2695
2696         tw32_f(MAC_STATUS,
2697              (MAC_STATUS_SYNC_CHANGED |
2698               MAC_STATUS_CFG_CHANGED |
2699               MAC_STATUS_MI_COMPLETION |
2700               MAC_STATUS_LNKSTATE_CHANGED));
2701         udelay(40);
2702
2703         if (force_reset)
2704                 tg3_phy_reset(tp);
2705
2706         current_link_up = 0;
2707         current_speed = SPEED_INVALID;
2708         current_duplex = DUPLEX_INVALID;
2709
2710         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2711         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2713                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2714                         bmsr |= BMSR_LSTATUS;
2715                 else
2716                         bmsr &= ~BMSR_LSTATUS;
2717         }
2718
2719         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2720
2721         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2722             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2723                 /* do nothing, just check for link up at the end */
2724         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2725                 u32 adv, new_adv;
2726
2727                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2728                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2729                                   ADVERTISE_1000XPAUSE |
2730                                   ADVERTISE_1000XPSE_ASYM |
2731                                   ADVERTISE_SLCT);
2732
2733                 /* Always advertise symmetric PAUSE just like copper */
2734                 new_adv |= ADVERTISE_1000XPAUSE;
2735
2736                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2737                         new_adv |= ADVERTISE_1000XHALF;
2738                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2739                         new_adv |= ADVERTISE_1000XFULL;
2740
2741                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2742                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2743                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2744                         tg3_writephy(tp, MII_BMCR, bmcr);
2745
2746                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2747                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2749
2750                         return err;
2751                 }
2752         } else {
2753                 u32 new_bmcr;
2754
2755                 bmcr &= ~BMCR_SPEED1000;
2756                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2757
2758                 if (tp->link_config.duplex == DUPLEX_FULL)
2759                         new_bmcr |= BMCR_FULLDPLX;
2760
2761                 if (new_bmcr != bmcr) {
2762                         /* BMCR_SPEED1000 is a reserved bit that needs
2763                          * to be set on write.
2764                          */
2765                         new_bmcr |= BMCR_SPEED1000;
2766
2767                         /* Force a linkdown */
2768                         if (netif_carrier_ok(tp->dev)) {
2769                                 u32 adv;
2770
2771                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2772                                 adv &= ~(ADVERTISE_1000XFULL |
2773                                          ADVERTISE_1000XHALF |
2774                                          ADVERTISE_SLCT);
2775                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2776                                 tg3_writephy(tp, MII_BMCR, bmcr |
2777                                                            BMCR_ANRESTART |
2778                                                            BMCR_ANENABLE);
2779                                 udelay(10);
2780                                 netif_carrier_off(tp->dev);
2781                         }
2782                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2783                         bmcr = new_bmcr;
2784                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2785                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2786                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2787                             ASIC_REV_5714) {
2788                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2789                                         bmsr |= BMSR_LSTATUS;
2790                                 else
2791                                         bmsr &= ~BMSR_LSTATUS;
2792                         }
2793                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2794                 }
2795         }
2796
2797         if (bmsr & BMSR_LSTATUS) {
2798                 current_speed = SPEED_1000;
2799                 current_link_up = 1;
2800                 if (bmcr & BMCR_FULLDPLX)
2801                         current_duplex = DUPLEX_FULL;
2802                 else
2803                         current_duplex = DUPLEX_HALF;
2804
2805                 if (bmcr & BMCR_ANENABLE) {
2806                         u32 local_adv, remote_adv, common;
2807
2808                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2809                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2810                         common = local_adv & remote_adv;
2811                         if (common & (ADVERTISE_1000XHALF |
2812                                       ADVERTISE_1000XFULL)) {
2813                                 if (common & ADVERTISE_1000XFULL)
2814                                         current_duplex = DUPLEX_FULL;
2815                                 else
2816                                         current_duplex = DUPLEX_HALF;
2817
2818                                 tg3_setup_flow_control(tp, local_adv,
2819                                                        remote_adv);
2820                         }
2821                         else
2822                                 current_link_up = 0;
2823                 }
2824         }
2825
2826         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2827         if (tp->link_config.active_duplex == DUPLEX_HALF)
2828                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2829
2830         tw32_f(MAC_MODE, tp->mac_mode);
2831         udelay(40);
2832
2833         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2834
2835         tp->link_config.active_speed = current_speed;
2836         tp->link_config.active_duplex = current_duplex;
2837
2838         if (current_link_up != netif_carrier_ok(tp->dev)) {
2839                 if (current_link_up)
2840                         netif_carrier_on(tp->dev);
2841                 else {
2842                         netif_carrier_off(tp->dev);
2843                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2844                 }
2845                 tg3_link_report(tp);
2846         }
2847         return err;
2848 }
2849
2850 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2851 {
2852         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2853                 /* Give autoneg time to complete. */
2854                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2855                 return;
2856         }
2857         if (!netif_carrier_ok(tp->dev) &&
2858             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2859                 u32 bmcr;
2860
2861                 tg3_readphy(tp, MII_BMCR, &bmcr);
2862                 if (bmcr & BMCR_ANENABLE) {
2863                         u32 phy1, phy2;
2864
2865                         /* Select shadow register 0x1f */
2866                         tg3_writephy(tp, 0x1c, 0x7c00);
2867                         tg3_readphy(tp, 0x1c, &phy1);
2868
2869                         /* Select expansion interrupt status register */
2870                         tg3_writephy(tp, 0x17, 0x0f01);
2871                         tg3_readphy(tp, 0x15, &phy2);
2872                         tg3_readphy(tp, 0x15, &phy2);
2873
2874                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2875                                 /* We have signal detect and not receiving
2876                                  * config code words, link is up by parallel
2877                                  * detection.
2878                                  */
2879
2880                                 bmcr &= ~BMCR_ANENABLE;
2881                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2882                                 tg3_writephy(tp, MII_BMCR, bmcr);
2883                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2884                         }
2885                 }
2886         }
2887         else if (netif_carrier_ok(tp->dev) &&
2888                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2889                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2890                 u32 phy2;
2891
2892                 /* Select expansion interrupt status register */
2893                 tg3_writephy(tp, 0x17, 0x0f01);
2894                 tg3_readphy(tp, 0x15, &phy2);
2895                 if (phy2 & 0x20) {
2896                         u32 bmcr;
2897
2898                         /* Config code words received, turn on autoneg. */
2899                         tg3_readphy(tp, MII_BMCR, &bmcr);
2900                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2901
2902                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2903
2904                 }
2905         }
2906 }
2907
2908 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2909 {
2910         int err;
2911
2912         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2913                 err = tg3_setup_fiber_phy(tp, force_reset);
2914         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2915                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2916         } else {
2917                 err = tg3_setup_copper_phy(tp, force_reset);
2918         }
2919
2920         if (tp->link_config.active_speed == SPEED_1000 &&
2921             tp->link_config.active_duplex == DUPLEX_HALF)
2922                 tw32(MAC_TX_LENGTHS,
2923                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2924                       (6 << TX_LENGTHS_IPG_SHIFT) |
2925                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2926         else
2927                 tw32(MAC_TX_LENGTHS,
2928                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2929                       (6 << TX_LENGTHS_IPG_SHIFT) |
2930                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2931
2932         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2933                 if (netif_carrier_ok(tp->dev)) {
2934                         tw32(HOSTCC_STAT_COAL_TICKS,
2935                              tp->coal.stats_block_coalesce_usecs);
2936                 } else {
2937                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2938                 }
2939         }
2940
2941         return err;
2942 }
2943
2944 /* Tigon3 never reports partial packet sends.  So we do not
2945  * need special logic to handle SKBs that have not had all
2946  * of their frags sent yet, like SunGEM does.
2947  */
2948 static void tg3_tx(struct tg3 *tp)
2949 {
2950         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2951         u32 sw_idx = tp->tx_cons;
2952
2953         while (sw_idx != hw_idx) {
2954                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2955                 struct sk_buff *skb = ri->skb;
2956                 int i;
2957
2958                 if (unlikely(skb == NULL))
2959                         BUG();
2960
2961                 pci_unmap_single(tp->pdev,
2962                                  pci_unmap_addr(ri, mapping),
2963                                  skb_headlen(skb),
2964                                  PCI_DMA_TODEVICE);
2965
2966                 ri->skb = NULL;
2967
2968                 sw_idx = NEXT_TX(sw_idx);
2969
2970                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2971                         if (unlikely(sw_idx == hw_idx))
2972                                 BUG();
2973
2974                         ri = &tp->tx_buffers[sw_idx];
2975                         if (unlikely(ri->skb != NULL))
2976                                 BUG();
2977
2978                         pci_unmap_page(tp->pdev,
2979                                        pci_unmap_addr(ri, mapping),
2980                                        skb_shinfo(skb)->frags[i].size,
2981                                        PCI_DMA_TODEVICE);
2982
2983                         sw_idx = NEXT_TX(sw_idx);
2984                 }
2985
2986                 dev_kfree_skb(skb);
2987         }
2988
2989         tp->tx_cons = sw_idx;
2990
2991         if (unlikely(netif_queue_stopped(tp->dev))) {
2992                 spin_lock(&tp->tx_lock);
2993                 if (netif_queue_stopped(tp->dev) &&
2994                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2995                         netif_wake_queue(tp->dev);
2996                 spin_unlock(&tp->tx_lock);
2997         }
2998 }
2999
3000 /* Returns size of skb allocated or < 0 on error.
3001  *
3002  * We only need to fill in the address because the other members
3003  * of the RX descriptor are invariant, see tg3_init_rings.
3004  *
3005  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3006  * posting buffers we only dirty the first cache line of the RX
3007  * descriptor (containing the address).  Whereas for the RX status
3008  * buffers the cpu only reads the last cacheline of the RX descriptor
3009  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3010  */
3011 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3012                             int src_idx, u32 dest_idx_unmasked)
3013 {
3014         struct tg3_rx_buffer_desc *desc;
3015         struct ring_info *map, *src_map;
3016         struct sk_buff *skb;
3017         dma_addr_t mapping;
3018         int skb_size, dest_idx;
3019
3020         src_map = NULL;
3021         switch (opaque_key) {
3022         case RXD_OPAQUE_RING_STD:
3023                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3024                 desc = &tp->rx_std[dest_idx];
3025                 map = &tp->rx_std_buffers[dest_idx];
3026                 if (src_idx >= 0)
3027                         src_map = &tp->rx_std_buffers[src_idx];
3028                 skb_size = tp->rx_pkt_buf_sz;
3029                 break;
3030
3031         case RXD_OPAQUE_RING_JUMBO:
3032                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3033                 desc = &tp->rx_jumbo[dest_idx];
3034                 map = &tp->rx_jumbo_buffers[dest_idx];
3035                 if (src_idx >= 0)
3036                         src_map = &tp->rx_jumbo_buffers[src_idx];
3037                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3038                 break;
3039
3040         default:
3041                 return -EINVAL;
3042         };
3043
3044         /* Do not overwrite any of the map or rp information
3045          * until we are sure we can commit to a new buffer.
3046          *
3047          * Callers depend upon this behavior and assume that
3048          * we leave everything unchanged if we fail.
3049          */
3050         skb = dev_alloc_skb(skb_size);
3051         if (skb == NULL)
3052                 return -ENOMEM;
3053
3054         skb->dev = tp->dev;
3055         skb_reserve(skb, tp->rx_offset);
3056
3057         mapping = pci_map_single(tp->pdev, skb->data,
3058                                  skb_size - tp->rx_offset,
3059                                  PCI_DMA_FROMDEVICE);
3060
3061         map->skb = skb;
3062         pci_unmap_addr_set(map, mapping, mapping);
3063
3064         if (src_map != NULL)
3065                 src_map->skb = NULL;
3066
3067         desc->addr_hi = ((u64)mapping >> 32);
3068         desc->addr_lo = ((u64)mapping & 0xffffffff);
3069
3070         return skb_size;
3071 }
3072
3073 /* We only need to move over in the address because the other
3074  * members of the RX descriptor are invariant.  See notes above
3075  * tg3_alloc_rx_skb for full details.
3076  */
3077 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3078                            int src_idx, u32 dest_idx_unmasked)
3079 {
3080         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3081         struct ring_info *src_map, *dest_map;
3082         int dest_idx;
3083
3084         switch (opaque_key) {
3085         case RXD_OPAQUE_RING_STD:
3086                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3087                 dest_desc = &tp->rx_std[dest_idx];
3088                 dest_map = &tp->rx_std_buffers[dest_idx];
3089                 src_desc = &tp->rx_std[src_idx];
3090                 src_map = &tp->rx_std_buffers[src_idx];
3091                 break;
3092
3093         case RXD_OPAQUE_RING_JUMBO:
3094                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3095                 dest_desc = &tp->rx_jumbo[dest_idx];
3096                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3097                 src_desc = &tp->rx_jumbo[src_idx];
3098                 src_map = &tp->rx_jumbo_buffers[src_idx];
3099                 break;
3100
3101         default:
3102                 return;
3103         };
3104
3105         dest_map->skb = src_map->skb;
3106         pci_unmap_addr_set(dest_map, mapping,
3107                            pci_unmap_addr(src_map, mapping));
3108         dest_desc->addr_hi = src_desc->addr_hi;
3109         dest_desc->addr_lo = src_desc->addr_lo;
3110
3111         src_map->skb = NULL;
3112 }
3113
3114 #if TG3_VLAN_TAG_USED
3115 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3116 {
3117         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3118 }
3119 #endif
3120
3121 /* The RX ring scheme is composed of multiple rings which post fresh
3122  * buffers to the chip, and one special ring the chip uses to report
3123  * status back to the host.
3124  *
3125  * The special ring reports the status of received packets to the
3126  * host.  The chip does not write into the original descriptor the
3127  * RX buffer was obtained from.  The chip simply takes the original
3128  * descriptor as provided by the host, updates the status and length
3129  * field, then writes this into the next status ring entry.
3130  *
3131  * Each ring the host uses to post buffers to the chip is described
3132  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3133  * it is first placed into the on-chip ram.  When the packet's length
3134  * is known, it walks down the TG3_BDINFO entries to select the ring.
3135  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3136  * which is within the range of the new packet's length is chosen.
3137  *
3138  * The "separate ring for rx status" scheme may sound queer, but it makes
3139  * sense from a cache coherency perspective.  If only the host writes
3140  * to the buffer post rings, and only the chip writes to the rx status
3141  * rings, then cache lines never move beyond shared-modified state.
3142  * If both the host and chip were to write into the same ring, cache line
3143  * eviction could occur since both entities want it in an exclusive state.
3144  */
3145 static int tg3_rx(struct tg3 *tp, int budget)
3146 {
3147         u32 work_mask;
3148         u32 sw_idx = tp->rx_rcb_ptr;
3149         u16 hw_idx;
3150         int received;
3151
3152         hw_idx = tp->hw_status->idx[0].rx_producer;
3153         /*
3154          * We need to order the read of hw_idx and the read of
3155          * the opaque cookie.
3156          */
3157         rmb();
3158         work_mask = 0;
3159         received = 0;
3160         while (sw_idx != hw_idx && budget > 0) {
3161                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3162                 unsigned int len;
3163                 struct sk_buff *skb;
3164                 dma_addr_t dma_addr;
3165                 u32 opaque_key, desc_idx, *post_ptr;
3166
3167                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3168                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3169                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3170                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3171                                                   mapping);
3172                         skb = tp->rx_std_buffers[desc_idx].skb;
3173                         post_ptr = &tp->rx_std_ptr;
3174                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3175                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3176                                                   mapping);
3177                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3178                         post_ptr = &tp->rx_jumbo_ptr;
3179                 }
3180                 else {
3181                         goto next_pkt_nopost;
3182                 }
3183
3184                 work_mask |= opaque_key;
3185
3186                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3187                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3188                 drop_it:
3189                         tg3_recycle_rx(tp, opaque_key,
3190                                        desc_idx, *post_ptr);
3191                 drop_it_no_recycle:
3192                         /* Other statistics kept track of by card. */
3193                         tp->net_stats.rx_dropped++;
3194                         goto next_pkt;
3195                 }
3196
3197                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3198
3199                 if (len > RX_COPY_THRESHOLD 
3200                         && tp->rx_offset == 2
3201                         /* rx_offset != 2 iff this is a 5701 card running
3202                          * in PCI-X mode [see tg3_get_invariants()] */
3203                 ) {
3204                         int skb_size;
3205
3206                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3207                                                     desc_idx, *post_ptr);
3208                         if (skb_size < 0)
3209                                 goto drop_it;
3210
3211                         pci_unmap_single(tp->pdev, dma_addr,
3212                                          skb_size - tp->rx_offset,
3213                                          PCI_DMA_FROMDEVICE);
3214
3215                         skb_put(skb, len);
3216                 } else {
3217                         struct sk_buff *copy_skb;
3218
3219                         tg3_recycle_rx(tp, opaque_key,
3220                                        desc_idx, *post_ptr);
3221
3222                         copy_skb = dev_alloc_skb(len + 2);
3223                         if (copy_skb == NULL)
3224                                 goto drop_it_no_recycle;
3225
3226                         copy_skb->dev = tp->dev;
3227                         skb_reserve(copy_skb, 2);
3228                         skb_put(copy_skb, len);
3229                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3230                         memcpy(copy_skb->data, skb->data, len);
3231                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3232
3233                         /* We'll reuse the original ring buffer. */
3234                         skb = copy_skb;
3235                 }
3236
3237                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3238                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3239                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3240                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3241                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3242                 else
3243                         skb->ip_summed = CHECKSUM_NONE;
3244
3245                 skb->protocol = eth_type_trans(skb, tp->dev);
3246 #if TG3_VLAN_TAG_USED
3247                 if (tp->vlgrp != NULL &&
3248                     desc->type_flags & RXD_FLAG_VLAN) {
3249                         tg3_vlan_rx(tp, skb,
3250                                     desc->err_vlan & RXD_VLAN_MASK);
3251                 } else
3252 #endif
3253                         netif_receive_skb(skb);
3254
3255                 tp->dev->last_rx = jiffies;
3256                 received++;
3257                 budget--;
3258
3259 next_pkt:
3260                 (*post_ptr)++;
3261 next_pkt_nopost:
3262                 sw_idx++;
3263                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3264
3265                 /* Refresh hw_idx to see if there is new work */
3266                 if (sw_idx == hw_idx) {
3267                         hw_idx = tp->hw_status->idx[0].rx_producer;
3268                         rmb();
3269                 }
3270         }
3271
3272         /* ACK the status ring. */
3273         tp->rx_rcb_ptr = sw_idx;
3274         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3275
3276         /* Refill RX ring(s). */
3277         if (work_mask & RXD_OPAQUE_RING_STD) {
3278                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3279                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3280                              sw_idx);
3281         }
3282         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3283                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3284                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3285                              sw_idx);
3286         }
3287         mmiowb();
3288
3289         return received;
3290 }
3291
3292 static int tg3_poll(struct net_device *netdev, int *budget)
3293 {
3294         struct tg3 *tp = netdev_priv(netdev);
3295         struct tg3_hw_status *sblk = tp->hw_status;
3296         int done;
3297
3298         /* handle link change and other phy events */
3299         if (!(tp->tg3_flags &
3300               (TG3_FLAG_USE_LINKCHG_REG |
3301                TG3_FLAG_POLL_SERDES))) {
3302                 if (sblk->status & SD_STATUS_LINK_CHG) {
3303                         sblk->status = SD_STATUS_UPDATED |
3304                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3305                         spin_lock(&tp->lock);
3306                         tg3_setup_phy(tp, 0);
3307                         spin_unlock(&tp->lock);
3308                 }
3309         }
3310
3311         /* run TX completion thread */
3312         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3313                 tg3_tx(tp);
3314         }
3315
3316         /* run RX thread, within the bounds set by NAPI.
3317          * All RX "locking" is done by ensuring outside
3318          * code synchronizes with dev->poll()
3319          */
3320         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3321                 int orig_budget = *budget;
3322                 int work_done;
3323
3324                 if (orig_budget > netdev->quota)
3325                         orig_budget = netdev->quota;
3326
3327                 work_done = tg3_rx(tp, orig_budget);
3328
3329                 *budget -= work_done;
3330                 netdev->quota -= work_done;
3331         }
3332
3333         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3334                 tp->last_tag = sblk->status_tag;
3335                 rmb();
3336         } else
3337                 sblk->status &= ~SD_STATUS_UPDATED;
3338
3339         /* if no more work, tell net stack and NIC we're done */
3340         done = !tg3_has_work(tp);
3341         if (done) {
3342                 netif_rx_complete(netdev);
3343                 tg3_restart_ints(tp);
3344         }
3345
3346         return (done ? 0 : 1);
3347 }
3348
3349 static void tg3_irq_quiesce(struct tg3 *tp)
3350 {
3351         BUG_ON(tp->irq_sync);
3352
3353         tp->irq_sync = 1;
3354         smp_mb();
3355
3356         synchronize_irq(tp->pdev->irq);
3357 }
3358
3359 static inline int tg3_irq_sync(struct tg3 *tp)
3360 {
3361         return tp->irq_sync;
3362 }
3363
3364 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3365  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3366  * with as well.  Most of the time, this is not necessary except when
3367  * shutting down the device.
3368  */
3369 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3370 {
3371         if (irq_sync)
3372                 tg3_irq_quiesce(tp);
3373         spin_lock_bh(&tp->lock);
3374         spin_lock(&tp->tx_lock);
3375 }
3376
3377 static inline void tg3_full_unlock(struct tg3 *tp)
3378 {
3379         spin_unlock(&tp->tx_lock);
3380         spin_unlock_bh(&tp->lock);
3381 }
3382
3383 /* One-shot MSI handler - Chip automatically disables interrupt
3384  * after sending MSI so driver doesn't have to do it.
3385  */
3386 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3387 {
3388         struct net_device *dev = dev_id;
3389         struct tg3 *tp = netdev_priv(dev);
3390
3391         prefetch(tp->hw_status);
3392         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3393
3394         if (likely(!tg3_irq_sync(tp)))
3395                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3396
3397         return IRQ_HANDLED;
3398 }
3399
3400 /* MSI ISR - No need to check for interrupt sharing and no need to
3401  * flush status block and interrupt mailbox. PCI ordering rules
3402  * guarantee that MSI will arrive after the status block.
3403  */
3404 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3405 {
3406         struct net_device *dev = dev_id;
3407         struct tg3 *tp = netdev_priv(dev);
3408
3409         prefetch(tp->hw_status);
3410         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3411         /*
3412          * Writing any value to intr-mbox-0 clears PCI INTA# and
3413          * chip-internal interrupt pending events.
3414          * Writing non-zero to intr-mbox-0 additional tells the
3415          * NIC to stop sending us irqs, engaging "in-intr-handler"
3416          * event coalescing.
3417          */
3418         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3419         if (likely(!tg3_irq_sync(tp)))
3420                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3421
3422         return IRQ_RETVAL(1);
3423 }
3424
3425 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3426 {
3427         struct net_device *dev = dev_id;
3428         struct tg3 *tp = netdev_priv(dev);
3429         struct tg3_hw_status *sblk = tp->hw_status;
3430         unsigned int handled = 1;
3431
3432         /* In INTx mode, it is possible for the interrupt to arrive at
3433          * the CPU before the status block posted prior to the interrupt.
3434          * Reading the PCI State register will confirm whether the
3435          * interrupt is ours and will flush the status block.
3436          */
3437         if ((sblk->status & SD_STATUS_UPDATED) ||
3438             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3439                 /*
3440                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3441                  * chip-internal interrupt pending events.
3442                  * Writing non-zero to intr-mbox-0 additional tells the
3443                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3444                  * event coalescing.
3445                  */
3446                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3447                              0x00000001);
3448                 if (tg3_irq_sync(tp))
3449                         goto out;
3450                 sblk->status &= ~SD_STATUS_UPDATED;
3451                 if (likely(tg3_has_work(tp))) {
3452                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3453                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3454                 } else {
3455                         /* No work, shared interrupt perhaps?  re-enable
3456                          * interrupts, and flush that PCI write
3457                          */
3458                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3459                                 0x00000000);
3460                 }
3461         } else {        /* shared interrupt */
3462                 handled = 0;
3463         }
3464 out:
3465         return IRQ_RETVAL(handled);
3466 }
3467
3468 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3469 {
3470         struct net_device *dev = dev_id;
3471         struct tg3 *tp = netdev_priv(dev);
3472         struct tg3_hw_status *sblk = tp->hw_status;
3473         unsigned int handled = 1;
3474
3475         /* In INTx mode, it is possible for the interrupt to arrive at
3476          * the CPU before the status block posted prior to the interrupt.
3477          * Reading the PCI State register will confirm whether the
3478          * interrupt is ours and will flush the status block.
3479          */
3480         if ((sblk->status_tag != tp->last_tag) ||
3481             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3482                 /*
3483                  * writing any value to intr-mbox-0 clears PCI INTA# and
3484                  * chip-internal interrupt pending events.
3485                  * writing non-zero to intr-mbox-0 additional tells the
3486                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3487                  * event coalescing.
3488                  */
3489                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3490                              0x00000001);
3491                 if (tg3_irq_sync(tp))
3492                         goto out;
3493                 if (netif_rx_schedule_prep(dev)) {
3494                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3495                         /* Update last_tag to mark that this status has been
3496                          * seen. Because interrupt may be shared, we may be
3497                          * racing with tg3_poll(), so only update last_tag
3498                          * if tg3_poll() is not scheduled.
3499                          */
3500                         tp->last_tag = sblk->status_tag;
3501                         __netif_rx_schedule(dev);
3502                 }
3503         } else {        /* shared interrupt */
3504                 handled = 0;
3505         }
3506 out:
3507         return IRQ_RETVAL(handled);
3508 }
3509
3510 /* ISR for interrupt test */
3511 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3512                 struct pt_regs *regs)
3513 {
3514         struct net_device *dev = dev_id;
3515         struct tg3 *tp = netdev_priv(dev);
3516         struct tg3_hw_status *sblk = tp->hw_status;
3517
3518         if ((sblk->status & SD_STATUS_UPDATED) ||
3519             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3520                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3521                              0x00000001);
3522                 return IRQ_RETVAL(1);
3523         }
3524         return IRQ_RETVAL(0);
3525 }
3526
3527 static int tg3_init_hw(struct tg3 *);
3528 static int tg3_halt(struct tg3 *, int, int);
3529
3530 #ifdef CONFIG_NET_POLL_CONTROLLER
3531 static void tg3_poll_controller(struct net_device *dev)
3532 {
3533         struct tg3 *tp = netdev_priv(dev);
3534
3535         tg3_interrupt(tp->pdev->irq, dev, NULL);
3536 }
3537 #endif
3538
3539 static void tg3_reset_task(void *_data)
3540 {
3541         struct tg3 *tp = _data;
3542         unsigned int restart_timer;
3543
3544         tg3_full_lock(tp, 0);
3545         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3546
3547         if (!netif_running(tp->dev)) {
3548                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3549                 tg3_full_unlock(tp);
3550                 return;
3551         }
3552
3553         tg3_full_unlock(tp);
3554
3555         tg3_netif_stop(tp);
3556
3557         tg3_full_lock(tp, 1);
3558
3559         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3560         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3561
3562         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3563         tg3_init_hw(tp);
3564
3565         tg3_netif_start(tp);
3566
3567         if (restart_timer)
3568                 mod_timer(&tp->timer, jiffies + 1);
3569
3570         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3571
3572         tg3_full_unlock(tp);
3573 }
3574
3575 static void tg3_tx_timeout(struct net_device *dev)
3576 {
3577         struct tg3 *tp = netdev_priv(dev);
3578
3579         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3580                dev->name);
3581
3582         schedule_work(&tp->reset_task);
3583 }
3584
3585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3587 {
3588         u32 base = (u32) mapping & 0xffffffff;
3589
3590         return ((base > 0xffffdcc0) &&
3591                 (base + len + 8 < base));
3592 }
3593
3594 /* Test for DMA addresses > 40-bit */
3595 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3596                                           int len)
3597 {
3598 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3599         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3600                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3601         return 0;
3602 #else
3603         return 0;
3604 #endif
3605 }
3606
3607 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3608
3609 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3610 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3611                                        u32 last_plus_one, u32 *start,
3612                                        u32 base_flags, u32 mss)
3613 {
3614         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3615         dma_addr_t new_addr = 0;
3616         u32 entry = *start;
3617         int i, ret = 0;
3618
3619         if (!new_skb) {
3620                 ret = -1;
3621         } else {
3622                 /* New SKB is guaranteed to be linear. */
3623                 entry = *start;
3624                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3625                                           PCI_DMA_TODEVICE);
3626                 /* Make sure new skb does not cross any 4G boundaries.
3627                  * Drop the packet if it does.
3628                  */
3629                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3630                         ret = -1;
3631                         dev_kfree_skb(new_skb);
3632                         new_skb = NULL;
3633                 } else {
3634                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3635                                     base_flags, 1 | (mss << 1));
3636                         *start = NEXT_TX(entry);
3637                 }
3638         }
3639
3640         /* Now clean up the sw ring entries. */
3641         i = 0;
3642         while (entry != last_plus_one) {
3643                 int len;
3644
3645                 if (i == 0)
3646                         len = skb_headlen(skb);
3647                 else
3648                         len = skb_shinfo(skb)->frags[i-1].size;
3649                 pci_unmap_single(tp->pdev,
3650                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3651                                  len, PCI_DMA_TODEVICE);
3652                 if (i == 0) {
3653                         tp->tx_buffers[entry].skb = new_skb;
3654                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3655                 } else {
3656                         tp->tx_buffers[entry].skb = NULL;
3657                 }
3658                 entry = NEXT_TX(entry);
3659                 i++;
3660         }
3661
3662         dev_kfree_skb(skb);
3663
3664         return ret;
3665 }
3666
3667 static void tg3_set_txd(struct tg3 *tp, int entry,
3668                         dma_addr_t mapping, int len, u32 flags,
3669                         u32 mss_and_is_end)
3670 {
3671         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3672         int is_end = (mss_and_is_end & 0x1);
3673         u32 mss = (mss_and_is_end >> 1);
3674         u32 vlan_tag = 0;
3675
3676         if (is_end)
3677                 flags |= TXD_FLAG_END;
3678         if (flags & TXD_FLAG_VLAN) {
3679                 vlan_tag = flags >> 16;
3680                 flags &= 0xffff;
3681         }
3682         vlan_tag |= (mss << TXD_MSS_SHIFT);
3683
3684         txd->addr_hi = ((u64) mapping >> 32);
3685         txd->addr_lo = ((u64) mapping & 0xffffffff);
3686         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3687         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3688 }
3689
3690 /* hard_start_xmit for devices that don't have any bugs and
3691  * support TG3_FLG2_HW_TSO_2 only.
3692  */
3693 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3694 {
3695         struct tg3 *tp = netdev_priv(dev);
3696         dma_addr_t mapping;
3697         u32 len, entry, base_flags, mss;
3698
3699         len = skb_headlen(skb);
3700
3701         /* No BH disabling for tx_lock here.  We are running in BH disabled
3702          * context and TX reclaim runs via tp->poll inside of a software
3703          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3704          * no IRQ context deadlocks to worry about either.  Rejoice!
3705          */
3706         if (!spin_trylock(&tp->tx_lock))
3707                 return NETDEV_TX_LOCKED;
3708
3709         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3710                 if (!netif_queue_stopped(dev)) {
3711                         netif_stop_queue(dev);
3712
3713                         /* This is a hard error, log it. */
3714                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3715                                "queue awake!\n", dev->name);
3716                 }
3717                 spin_unlock(&tp->tx_lock);
3718                 return NETDEV_TX_BUSY;
3719         }
3720
3721         entry = tp->tx_prod;
3722         base_flags = 0;
3723 #if TG3_TSO_SUPPORT != 0
3724         mss = 0;
3725         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3726             (mss = skb_shinfo(skb)->tso_size) != 0) {
3727                 int tcp_opt_len, ip_tcp_len;
3728
3729                 if (skb_header_cloned(skb) &&
3730                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3731                         dev_kfree_skb(skb);
3732                         goto out_unlock;
3733                 }
3734
3735                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3736                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3737
3738                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3739                                TXD_FLAG_CPU_POST_DMA);
3740
3741                 skb->nh.iph->check = 0;
3742                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3743
3744                 skb->h.th->check = 0;
3745
3746                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3747         }
3748         else if (skb->ip_summed == CHECKSUM_HW)
3749                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3750 #else
3751         mss = 0;
3752         if (skb->ip_summed == CHECKSUM_HW)
3753                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3754 #endif
3755 #if TG3_VLAN_TAG_USED
3756         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3757                 base_flags |= (TXD_FLAG_VLAN |
3758                                (vlan_tx_tag_get(skb) << 16));
3759 #endif
3760
3761         /* Queue skb data, a.k.a. the main skb fragment. */
3762         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3763
3764         tp->tx_buffers[entry].skb = skb;
3765         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3766
3767         tg3_set_txd(tp, entry, mapping, len, base_flags,
3768                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3769
3770         entry = NEXT_TX(entry);
3771
3772         /* Now loop through additional data fragments, and queue them. */
3773         if (skb_shinfo(skb)->nr_frags > 0) {
3774                 unsigned int i, last;
3775
3776                 last = skb_shinfo(skb)->nr_frags - 1;
3777                 for (i = 0; i <= last; i++) {
3778                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3779
3780                         len = frag->size;
3781                         mapping = pci_map_page(tp->pdev,
3782                                                frag->page,
3783                                                frag->page_offset,
3784                                                len, PCI_DMA_TODEVICE);
3785
3786                         tp->tx_buffers[entry].skb = NULL;
3787                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3788
3789                         tg3_set_txd(tp, entry, mapping, len,
3790                                     base_flags, (i == last) | (mss << 1));
3791
3792                         entry = NEXT_TX(entry);
3793                 }
3794         }
3795
3796         /* Packets are ready, update Tx producer idx local and on card. */
3797         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3798
3799         tp->tx_prod = entry;
3800         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3801                 netif_stop_queue(dev);
3802                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3803                         netif_wake_queue(tp->dev);
3804         }
3805
3806 out_unlock:
3807         mmiowb();
3808         spin_unlock(&tp->tx_lock);
3809
3810         dev->trans_start = jiffies;
3811
3812         return NETDEV_TX_OK;
3813 }
3814
3815 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3816  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3817  */
3818 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3819 {
3820         struct tg3 *tp = netdev_priv(dev);
3821         dma_addr_t mapping;
3822         u32 len, entry, base_flags, mss;
3823         int would_hit_hwbug;
3824
3825         len = skb_headlen(skb);
3826
3827         /* No BH disabling for tx_lock here.  We are running in BH disabled
3828          * context and TX reclaim runs via tp->poll inside of a software
3829          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3830          * no IRQ context deadlocks to worry about either.  Rejoice!
3831          */
3832         if (!spin_trylock(&tp->tx_lock))
3833                 return NETDEV_TX_LOCKED; 
3834
3835         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3836                 if (!netif_queue_stopped(dev)) {
3837                         netif_stop_queue(dev);
3838
3839                         /* This is a hard error, log it. */
3840                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3841                                "queue awake!\n", dev->name);
3842                 }
3843                 spin_unlock(&tp->tx_lock);
3844                 return NETDEV_TX_BUSY;
3845         }
3846
3847         entry = tp->tx_prod;
3848         base_flags = 0;
3849         if (skb->ip_summed == CHECKSUM_HW)
3850                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3851 #if TG3_TSO_SUPPORT != 0
3852         mss = 0;
3853         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3854             (mss = skb_shinfo(skb)->tso_size) != 0) {
3855                 int tcp_opt_len, ip_tcp_len;
3856
3857                 if (skb_header_cloned(skb) &&
3858                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3859                         dev_kfree_skb(skb);
3860                         goto out_unlock;
3861                 }
3862
3863                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3864                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3865
3866                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3867                                TXD_FLAG_CPU_POST_DMA);
3868
3869                 skb->nh.iph->check = 0;
3870                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3871                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3872                         skb->h.th->check = 0;
3873                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3874                 }
3875                 else {
3876                         skb->h.th->check =
3877                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3878                                                    skb->nh.iph->daddr,
3879                                                    0, IPPROTO_TCP, 0);
3880                 }
3881
3882                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3883                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3884                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3885                                 int tsflags;
3886
3887                                 tsflags = ((skb->nh.iph->ihl - 5) +
3888                                            (tcp_opt_len >> 2));
3889                                 mss |= (tsflags << 11);
3890                         }
3891                 } else {
3892                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3893                                 int tsflags;
3894
3895                                 tsflags = ((skb->nh.iph->ihl - 5) +
3896                                            (tcp_opt_len >> 2));
3897                                 base_flags |= tsflags << 12;
3898                         }
3899                 }
3900         }
3901 #else
3902         mss = 0;
3903 #endif
3904 #if TG3_VLAN_TAG_USED
3905         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3906                 base_flags |= (TXD_FLAG_VLAN |
3907                                (vlan_tx_tag_get(skb) << 16));
3908 #endif
3909
3910         /* Queue skb data, a.k.a. the main skb fragment. */
3911         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3912
3913         tp->tx_buffers[entry].skb = skb;
3914         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3915
3916         would_hit_hwbug = 0;
3917
3918         if (tg3_4g_overflow_test(mapping, len))
3919                 would_hit_hwbug = 1;
3920
3921         tg3_set_txd(tp, entry, mapping, len, base_flags,
3922                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3923
3924         entry = NEXT_TX(entry);
3925
3926         /* Now loop through additional data fragments, and queue them. */
3927         if (skb_shinfo(skb)->nr_frags > 0) {
3928                 unsigned int i, last;
3929
3930                 last = skb_shinfo(skb)->nr_frags - 1;
3931                 for (i = 0; i <= last; i++) {
3932                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3933
3934                         len = frag->size;
3935                         mapping = pci_map_page(tp->pdev,
3936                                                frag->page,
3937                                                frag->page_offset,
3938                                                len, PCI_DMA_TODEVICE);
3939
3940                         tp->tx_buffers[entry].skb = NULL;
3941                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3942
3943                         if (tg3_4g_overflow_test(mapping, len))
3944                                 would_hit_hwbug = 1;
3945
3946                         if (tg3_40bit_overflow_test(tp, mapping, len))
3947                                 would_hit_hwbug = 1;
3948
3949                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3950                                 tg3_set_txd(tp, entry, mapping, len,
3951                                             base_flags, (i == last)|(mss << 1));
3952                         else
3953                                 tg3_set_txd(tp, entry, mapping, len,
3954                                             base_flags, (i == last));
3955
3956                         entry = NEXT_TX(entry);
3957                 }
3958         }
3959
3960         if (would_hit_hwbug) {
3961                 u32 last_plus_one = entry;
3962                 u32 start;
3963
3964                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3965                 start &= (TG3_TX_RING_SIZE - 1);
3966
3967                 /* If the workaround fails due to memory/mapping
3968                  * failure, silently drop this packet.
3969                  */
3970                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3971                                                 &start, base_flags, mss))
3972                         goto out_unlock;
3973
3974                 entry = start;
3975         }
3976
3977         /* Packets are ready, update Tx producer idx local and on card. */
3978         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3979
3980         tp->tx_prod = entry;
3981         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3982                 netif_stop_queue(dev);
3983                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3984                         netif_wake_queue(tp->dev);
3985         }
3986
3987 out_unlock:
3988         mmiowb();
3989         spin_unlock(&tp->tx_lock);
3990
3991         dev->trans_start = jiffies;
3992
3993         return NETDEV_TX_OK;
3994 }
3995
3996 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3997                                int new_mtu)
3998 {
3999         dev->mtu = new_mtu;
4000
4001         if (new_mtu > ETH_DATA_LEN) {
4002                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4003                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4004                         ethtool_op_set_tso(dev, 0);
4005                 }
4006                 else
4007                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4008         } else {
4009                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4010                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4011                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4012         }
4013 }
4014
4015 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4016 {
4017         struct tg3 *tp = netdev_priv(dev);
4018
4019         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4020                 return -EINVAL;
4021
4022         if (!netif_running(dev)) {
4023                 /* We'll just catch it later when the
4024                  * device is up'd.
4025                  */
4026                 tg3_set_mtu(dev, tp, new_mtu);
4027                 return 0;
4028         }
4029
4030         tg3_netif_stop(tp);
4031
4032         tg3_full_lock(tp, 1);
4033
4034         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4035
4036         tg3_set_mtu(dev, tp, new_mtu);
4037
4038         tg3_init_hw(tp);
4039
4040         tg3_netif_start(tp);
4041
4042         tg3_full_unlock(tp);
4043
4044         return 0;
4045 }
4046
4047 /* Free up pending packets in all rx/tx rings.
4048  *
4049  * The chip has been shut down and the driver detached from
4050  * the networking, so no interrupts or new tx packets will
4051  * end up in the driver.  tp->{tx,}lock is not held and we are not
4052  * in an interrupt context and thus may sleep.
4053  */
4054 static void tg3_free_rings(struct tg3 *tp)
4055 {
4056         struct ring_info *rxp;
4057         int i;
4058
4059         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4060                 rxp = &tp->rx_std_buffers[i];
4061
4062                 if (rxp->skb == NULL)
4063                         continue;
4064                 pci_unmap_single(tp->pdev,
4065                                  pci_unmap_addr(rxp, mapping),
4066                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4067                                  PCI_DMA_FROMDEVICE);
4068                 dev_kfree_skb_any(rxp->skb);
4069                 rxp->skb = NULL;
4070         }
4071
4072         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4073                 rxp = &tp->rx_jumbo_buffers[i];
4074
4075                 if (rxp->skb == NULL)
4076                         continue;
4077                 pci_unmap_single(tp->pdev,
4078                                  pci_unmap_addr(rxp, mapping),
4079                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4080                                  PCI_DMA_FROMDEVICE);
4081                 dev_kfree_skb_any(rxp->skb);
4082                 rxp->skb = NULL;
4083         }
4084
4085         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4086                 struct tx_ring_info *txp;
4087                 struct sk_buff *skb;
4088                 int j;
4089
4090                 txp = &tp->tx_buffers[i];
4091                 skb = txp->skb;
4092
4093                 if (skb == NULL) {
4094                         i++;
4095                         continue;
4096                 }
4097
4098                 pci_unmap_single(tp->pdev,
4099                                  pci_unmap_addr(txp, mapping),
4100                                  skb_headlen(skb),
4101                                  PCI_DMA_TODEVICE);
4102                 txp->skb = NULL;
4103
4104                 i++;
4105
4106                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4107                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4108                         pci_unmap_page(tp->pdev,
4109                                        pci_unmap_addr(txp, mapping),
4110                                        skb_shinfo(skb)->frags[j].size,
4111                                        PCI_DMA_TODEVICE);
4112                         i++;
4113                 }
4114
4115                 dev_kfree_skb_any(skb);
4116         }
4117 }
4118
4119 /* Initialize tx/rx rings for packet processing.
4120  *
4121  * The chip has been shut down and the driver detached from
4122  * the networking, so no interrupts or new tx packets will
4123  * end up in the driver.  tp->{tx,}lock are held and thus
4124  * we may not sleep.
4125  */
4126 static void tg3_init_rings(struct tg3 *tp)
4127 {
4128         u32 i;
4129
4130         /* Free up all the SKBs. */
4131         tg3_free_rings(tp);
4132
4133         /* Zero out all descriptors. */
4134         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4135         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4136         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4137         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4138
4139         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4140         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4141             (tp->dev->mtu > ETH_DATA_LEN))
4142                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4143
4144         /* Initialize invariants of the rings, we only set this
4145          * stuff once.  This works because the card does not
4146          * write into the rx buffer posting rings.
4147          */
4148         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4149                 struct tg3_rx_buffer_desc *rxd;
4150
4151                 rxd = &tp->rx_std[i];
4152                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4153                         << RXD_LEN_SHIFT;
4154                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4155                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4156                                (i << RXD_OPAQUE_INDEX_SHIFT));
4157         }
4158
4159         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4160                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4161                         struct tg3_rx_buffer_desc *rxd;
4162
4163                         rxd = &tp->rx_jumbo[i];
4164                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4165                                 << RXD_LEN_SHIFT;
4166                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4167                                 RXD_FLAG_JUMBO;
4168                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4169                                (i << RXD_OPAQUE_INDEX_SHIFT));
4170                 }
4171         }
4172
4173         /* Now allocate fresh SKBs for each rx ring. */
4174         for (i = 0; i < tp->rx_pending; i++) {
4175                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4176                                      -1, i) < 0)
4177                         break;
4178         }
4179
4180         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4181                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4182                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4183                                              -1, i) < 0)
4184                                 break;
4185                 }
4186         }
4187 }
4188
4189 /*
4190  * Must not be invoked with interrupt sources disabled and
4191  * the hardware shutdown down.
4192  */
4193 static void tg3_free_consistent(struct tg3 *tp)
4194 {
4195         kfree(tp->rx_std_buffers);
4196         tp->rx_std_buffers = NULL;
4197         if (tp->rx_std) {
4198                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4199                                     tp->rx_std, tp->rx_std_mapping);
4200                 tp->rx_std = NULL;
4201         }
4202         if (tp->rx_jumbo) {
4203                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4204                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4205                 tp->rx_jumbo = NULL;
4206         }
4207         if (tp->rx_rcb) {
4208                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4209                                     tp->rx_rcb, tp->rx_rcb_mapping);
4210                 tp->rx_rcb = NULL;
4211         }
4212         if (tp->tx_ring) {
4213                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4214                         tp->tx_ring, tp->tx_desc_mapping);
4215                 tp->tx_ring = NULL;
4216         }
4217         if (tp->hw_status) {
4218                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4219                                     tp->hw_status, tp->status_mapping);
4220                 tp->hw_status = NULL;
4221         }
4222         if (tp->hw_stats) {
4223                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4224                                     tp->hw_stats, tp->stats_mapping);
4225                 tp->hw_stats = NULL;
4226         }
4227 }
4228
4229 /*
4230  * Must not be invoked with interrupt sources disabled and
4231  * the hardware shutdown down.  Can sleep.
4232  */
4233 static int tg3_alloc_consistent(struct tg3 *tp)
4234 {
4235         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4236                                       (TG3_RX_RING_SIZE +
4237                                        TG3_RX_JUMBO_RING_SIZE)) +
4238                                      (sizeof(struct tx_ring_info) *
4239                                       TG3_TX_RING_SIZE),
4240                                      GFP_KERNEL);
4241         if (!tp->rx_std_buffers)
4242                 return -ENOMEM;
4243
4244         memset(tp->rx_std_buffers, 0,
4245                (sizeof(struct ring_info) *
4246                 (TG3_RX_RING_SIZE +
4247                  TG3_RX_JUMBO_RING_SIZE)) +
4248                (sizeof(struct tx_ring_info) *
4249                 TG3_TX_RING_SIZE));
4250
4251         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4252         tp->tx_buffers = (struct tx_ring_info *)
4253                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4254
4255         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4256                                           &tp->rx_std_mapping);
4257         if (!tp->rx_std)
4258                 goto err_out;
4259
4260         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4261                                             &tp->rx_jumbo_mapping);
4262
4263         if (!tp->rx_jumbo)
4264                 goto err_out;
4265
4266         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4267                                           &tp->rx_rcb_mapping);
4268         if (!tp->rx_rcb)
4269                 goto err_out;
4270
4271         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4272                                            &tp->tx_desc_mapping);
4273         if (!tp->tx_ring)
4274                 goto err_out;
4275
4276         tp->hw_status = pci_alloc_consistent(tp->pdev,
4277                                              TG3_HW_STATUS_SIZE,
4278                                              &tp->status_mapping);
4279         if (!tp->hw_status)
4280                 goto err_out;
4281
4282         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4283                                             sizeof(struct tg3_hw_stats),
4284                                             &tp->stats_mapping);
4285         if (!tp->hw_stats)
4286                 goto err_out;
4287
4288         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4289         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4290
4291         return 0;
4292
4293 err_out:
4294         tg3_free_consistent(tp);
4295         return -ENOMEM;
4296 }
4297
4298 #define MAX_WAIT_CNT 1000
4299
4300 /* To stop a block, clear the enable bit and poll till it
4301  * clears.  tp->lock is held.
4302  */
4303 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4304 {
4305         unsigned int i;
4306         u32 val;
4307
4308         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4309                 switch (ofs) {
4310                 case RCVLSC_MODE:
4311                 case DMAC_MODE:
4312                 case MBFREE_MODE:
4313                 case BUFMGR_MODE:
4314                 case MEMARB_MODE:
4315                         /* We can't enable/disable these bits of the
4316                          * 5705/5750, just say success.
4317                          */
4318                         return 0;
4319
4320                 default:
4321                         break;
4322                 };
4323         }
4324
4325         val = tr32(ofs);
4326         val &= ~enable_bit;
4327         tw32_f(ofs, val);
4328
4329         for (i = 0; i < MAX_WAIT_CNT; i++) {
4330                 udelay(100);
4331                 val = tr32(ofs);
4332                 if ((val & enable_bit) == 0)
4333                         break;
4334         }
4335
4336         if (i == MAX_WAIT_CNT && !silent) {
4337                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4338                        "ofs=%lx enable_bit=%x\n",
4339                        ofs, enable_bit);
4340                 return -ENODEV;
4341         }
4342
4343         return 0;
4344 }
4345
4346 /* tp->lock is held. */
4347 static int tg3_abort_hw(struct tg3 *tp, int silent)
4348 {
4349         int i, err;
4350
4351         tg3_disable_ints(tp);
4352
4353         tp->rx_mode &= ~RX_MODE_ENABLE;
4354         tw32_f(MAC_RX_MODE, tp->rx_mode);
4355         udelay(10);
4356
4357         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4358         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4359         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4360         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4361         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4362         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4363
4364         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4368         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4371
4372         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4373         tw32_f(MAC_MODE, tp->mac_mode);
4374         udelay(40);
4375
4376         tp->tx_mode &= ~TX_MODE_ENABLE;
4377         tw32_f(MAC_TX_MODE, tp->tx_mode);
4378
4379         for (i = 0; i < MAX_WAIT_CNT; i++) {
4380                 udelay(100);
4381                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4382                         break;
4383         }
4384         if (i >= MAX_WAIT_CNT) {
4385                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4386                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4387                        tp->dev->name, tr32(MAC_TX_MODE));
4388                 err |= -ENODEV;
4389         }
4390
4391         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4392         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4393         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4394
4395         tw32(FTQ_RESET, 0xffffffff);
4396         tw32(FTQ_RESET, 0x00000000);
4397
4398         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4399         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4400
4401         if (tp->hw_status)
4402                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4403         if (tp->hw_stats)
4404                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4405
4406         return err;
4407 }
4408
4409 /* tp->lock is held. */
4410 static int tg3_nvram_lock(struct tg3 *tp)
4411 {
4412         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4413                 int i;
4414
4415                 if (tp->nvram_lock_cnt == 0) {
4416                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4417                         for (i = 0; i < 8000; i++) {
4418                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4419                                         break;
4420                                 udelay(20);
4421                         }
4422                         if (i == 8000) {
4423                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4424                                 return -ENODEV;
4425                         }
4426                 }
4427                 tp->nvram_lock_cnt++;
4428         }
4429         return 0;
4430 }
4431
4432 /* tp->lock is held. */
4433 static void tg3_nvram_unlock(struct tg3 *tp)
4434 {
4435         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4436                 if (tp->nvram_lock_cnt > 0)
4437                         tp->nvram_lock_cnt--;
4438                 if (tp->nvram_lock_cnt == 0)
4439                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4440         }
4441 }
4442
4443 /* tp->lock is held. */
4444 static void tg3_enable_nvram_access(struct tg3 *tp)
4445 {
4446         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4447             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4448                 u32 nvaccess = tr32(NVRAM_ACCESS);
4449
4450                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4451         }
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_disable_nvram_access(struct tg3 *tp)
4456 {
4457         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4458             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4459                 u32 nvaccess = tr32(NVRAM_ACCESS);
4460
4461                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4467 {
4468         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4469                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4470                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4471
4472         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4473                 switch (kind) {
4474                 case RESET_KIND_INIT:
4475                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4476                                       DRV_STATE_START);
4477                         break;
4478
4479                 case RESET_KIND_SHUTDOWN:
4480                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4481                                       DRV_STATE_UNLOAD);
4482                         break;
4483
4484                 case RESET_KIND_SUSPEND:
4485                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4486                                       DRV_STATE_SUSPEND);
4487                         break;
4488
4489                 default:
4490                         break;
4491                 };
4492         }
4493 }
4494
4495 /* tp->lock is held. */
4496 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4497 {
4498         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4499                 switch (kind) {
4500                 case RESET_KIND_INIT:
4501                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4502                                       DRV_STATE_START_DONE);
4503                         break;
4504
4505                 case RESET_KIND_SHUTDOWN:
4506                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4507                                       DRV_STATE_UNLOAD_DONE);
4508                         break;
4509
4510                 default:
4511                         break;
4512                 };
4513         }
4514 }
4515
4516 /* tp->lock is held. */
4517 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4518 {
4519         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4520                 switch (kind) {
4521                 case RESET_KIND_INIT:
4522                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4523                                       DRV_STATE_START);
4524                         break;
4525
4526                 case RESET_KIND_SHUTDOWN:
4527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4528                                       DRV_STATE_UNLOAD);
4529                         break;
4530
4531                 case RESET_KIND_SUSPEND:
4532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4533                                       DRV_STATE_SUSPEND);
4534                         break;
4535
4536                 default:
4537                         break;
4538                 };
4539         }
4540 }
4541
4542 static void tg3_stop_fw(struct tg3 *);
4543
4544 /* tp->lock is held. */
4545 static int tg3_chip_reset(struct tg3 *tp)
4546 {
4547         u32 val;
4548         void (*write_op)(struct tg3 *, u32, u32);
4549         int i;
4550
4551         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4552                 tg3_nvram_lock(tp);
4553                 /* No matching tg3_nvram_unlock() after this because
4554                  * chip reset below will undo the nvram lock.
4555                  */
4556                 tp->nvram_lock_cnt = 0;
4557         }
4558
4559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4560             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4561                 tw32(GRC_FASTBOOT_PC, 0);
4562
4563         /*
4564          * We must avoid the readl() that normally takes place.
4565          * It locks machines, causes machine checks, and other
4566          * fun things.  So, temporarily disable the 5701
4567          * hardware workaround, while we do the reset.
4568          */
4569         write_op = tp->write32;
4570         if (write_op == tg3_write_flush_reg32)
4571                 tp->write32 = tg3_write32;
4572
4573         /* do the reset */
4574         val = GRC_MISC_CFG_CORECLK_RESET;
4575
4576         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4577                 if (tr32(0x7e2c) == 0x60) {
4578                         tw32(0x7e2c, 0x20);
4579                 }
4580                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4581                         tw32(GRC_MISC_CFG, (1 << 29));
4582                         val |= (1 << 29);
4583                 }
4584         }
4585
4586         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4587                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4588         tw32(GRC_MISC_CFG, val);
4589
4590         /* restore 5701 hardware bug workaround write method */
4591         tp->write32 = write_op;
4592
4593         /* Unfortunately, we have to delay before the PCI read back.
4594          * Some 575X chips even will not respond to a PCI cfg access
4595          * when the reset command is given to the chip.
4596          *
4597          * How do these hardware designers expect things to work
4598          * properly if the PCI write is posted for a long period
4599          * of time?  It is always necessary to have some method by
4600          * which a register read back can occur to push the write
4601          * out which does the reset.
4602          *
4603          * For most tg3 variants the trick below was working.
4604          * Ho hum...
4605          */
4606         udelay(120);
4607
4608         /* Flush PCI posted writes.  The normal MMIO registers
4609          * are inaccessible at this time so this is the only
4610          * way to make this reliably (actually, this is no longer
4611          * the case, see above).  I tried to use indirect
4612          * register read/write but this upset some 5701 variants.
4613          */
4614         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4615
4616         udelay(120);
4617
4618         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4619                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4620                         int i;
4621                         u32 cfg_val;
4622
4623                         /* Wait for link training to complete.  */
4624                         for (i = 0; i < 5000; i++)
4625                                 udelay(100);
4626
4627                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4628                         pci_write_config_dword(tp->pdev, 0xc4,
4629                                                cfg_val | (1 << 15));
4630                 }
4631                 /* Set PCIE max payload size and clear error status.  */
4632                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4633         }
4634
4635         /* Re-enable indirect register accesses. */
4636         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4637                                tp->misc_host_ctrl);
4638
4639         /* Set MAX PCI retry to zero. */
4640         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4641         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4642             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4643                 val |= PCISTATE_RETRY_SAME_DMA;
4644         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4645
4646         pci_restore_state(tp->pdev);
4647
4648         /* Make sure PCI-X relaxed ordering bit is clear. */
4649         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4650         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4651         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4652
4653         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4654                 u32 val;
4655
4656                 /* Chip reset on 5780 will reset MSI enable bit,
4657                  * so need to restore it.
4658                  */
4659                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4660                         u16 ctrl;
4661
4662                         pci_read_config_word(tp->pdev,
4663                                              tp->msi_cap + PCI_MSI_FLAGS,
4664                                              &ctrl);
4665                         pci_write_config_word(tp->pdev,
4666                                               tp->msi_cap + PCI_MSI_FLAGS,
4667                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4668                         val = tr32(MSGINT_MODE);
4669                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4670                 }
4671
4672                 val = tr32(MEMARB_MODE);
4673                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4674
4675         } else
4676                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4677
4678         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4679                 tg3_stop_fw(tp);
4680                 tw32(0x5000, 0x400);
4681         }
4682
4683         tw32(GRC_MODE, tp->grc_mode);
4684
4685         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4686                 u32 val = tr32(0xc4);
4687
4688                 tw32(0xc4, val | (1 << 15));
4689         }
4690
4691         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4693                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4694                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4695                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4696                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4697         }
4698
4699         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4700                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4701                 tw32_f(MAC_MODE, tp->mac_mode);
4702         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4703                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4704                 tw32_f(MAC_MODE, tp->mac_mode);
4705         } else
4706                 tw32_f(MAC_MODE, 0);
4707         udelay(40);
4708
4709         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4710                 /* Wait for firmware initialization to complete. */
4711                 for (i = 0; i < 100000; i++) {
4712                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4713                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4714                                 break;
4715                         udelay(10);
4716                 }
4717                 if (i >= 100000) {
4718                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4719                                "firmware will not restart magic=%08x\n",
4720                                tp->dev->name, val);
4721                         return -ENODEV;
4722                 }
4723         }
4724
4725         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4726             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4727                 u32 val = tr32(0x7c00);
4728
4729                 tw32(0x7c00, val | (1 << 25));
4730         }
4731
4732         /* Reprobe ASF enable state.  */
4733         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4734         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4735         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4736         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4737                 u32 nic_cfg;
4738
4739                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4740                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4741                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4742                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4743                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4744                 }
4745         }
4746
4747         return 0;
4748 }
4749
4750 /* tp->lock is held. */
4751 static void tg3_stop_fw(struct tg3 *tp)
4752 {
4753         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4754                 u32 val;
4755                 int i;
4756
4757                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4758                 val = tr32(GRC_RX_CPU_EVENT);
4759                 val |= (1 << 14);
4760                 tw32(GRC_RX_CPU_EVENT, val);
4761
4762                 /* Wait for RX cpu to ACK the event.  */
4763                 for (i = 0; i < 100; i++) {
4764                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4765                                 break;
4766                         udelay(1);
4767                 }
4768         }
4769 }
4770
4771 /* tp->lock is held. */
4772 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4773 {
4774         int err;
4775
4776         tg3_stop_fw(tp);
4777
4778         tg3_write_sig_pre_reset(tp, kind);
4779
4780         tg3_abort_hw(tp, silent);
4781         err = tg3_chip_reset(tp);
4782
4783         tg3_write_sig_legacy(tp, kind);
4784         tg3_write_sig_post_reset(tp, kind);
4785
4786         if (err)
4787                 return err;
4788
4789         return 0;
4790 }
4791
4792 #define TG3_FW_RELEASE_MAJOR    0x0
4793 #define TG3_FW_RELASE_MINOR     0x0
4794 #define TG3_FW_RELEASE_FIX      0x0
4795 #define TG3_FW_START_ADDR       0x08000000
4796 #define TG3_FW_TEXT_ADDR        0x08000000
4797 #define TG3_FW_TEXT_LEN         0x9c0
4798 #define TG3_FW_RODATA_ADDR      0x080009c0
4799 #define TG3_FW_RODATA_LEN       0x60
4800 #define TG3_FW_DATA_ADDR        0x08000a40
4801 #define TG3_FW_DATA_LEN         0x20
4802 #define TG3_FW_SBSS_ADDR        0x08000a60
4803 #define TG3_FW_SBSS_LEN         0xc
4804 #define TG3_FW_BSS_ADDR         0x08000a70
4805 #define TG3_FW_BSS_LEN          0x10
4806
4807 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4808         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4809         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4810         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4811         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4812         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4813         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4814         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4815         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4816         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4817         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4818         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4819         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4820         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4821         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4822         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4823         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4824         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4825         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4826         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4827         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4828         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4829         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4830         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4831         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4833         0, 0, 0, 0, 0, 0,
4834         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4835         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4836         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4837         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4838         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4839         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4840         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4841         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4842         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4843         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4845         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4846         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4847         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4848         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4849         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4850         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4851         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4852         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4853         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4854         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4855         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4856         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4857         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4858         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4859         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4860         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4861         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4862         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4863         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4864         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4865         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4866         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4867         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4868         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4869         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4870         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4871         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4872         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4873         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4874         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4875         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4876         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4877         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4878         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4879         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4880         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4881         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4882         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4883         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4884         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4885         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4886         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4887         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4888         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4889         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4890         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4891         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4892         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4893         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4894         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4895         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4896         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4897         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4898         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4899 };
4900
4901 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4902         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4903         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4904         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4905         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4906         0x00000000
4907 };
4908
4909 #if 0 /* All zeros, don't eat up space with it. */
4910 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4911         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4912         0x00000000, 0x00000000, 0x00000000, 0x00000000
4913 };
4914 #endif
4915
4916 #define RX_CPU_SCRATCH_BASE     0x30000
4917 #define RX_CPU_SCRATCH_SIZE     0x04000
4918 #define TX_CPU_SCRATCH_BASE     0x34000
4919 #define TX_CPU_SCRATCH_SIZE     0x04000
4920
4921 /* tp->lock is held. */
4922 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4923 {
4924         int i;
4925
4926         if (offset == TX_CPU_BASE &&
4927             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4928                 BUG();
4929
4930         if (offset == RX_CPU_BASE) {
4931                 for (i = 0; i < 10000; i++) {
4932                         tw32(offset + CPU_STATE, 0xffffffff);
4933                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4934                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4935                                 break;
4936                 }
4937
4938                 tw32(offset + CPU_STATE, 0xffffffff);
4939                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4940                 udelay(10);
4941         } else {
4942                 for (i = 0; i < 10000; i++) {
4943                         tw32(offset + CPU_STATE, 0xffffffff);
4944                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4945                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4946                                 break;
4947                 }
4948         }
4949
4950         if (i >= 10000) {
4951                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4952                        "and %s CPU\n",
4953                        tp->dev->name,
4954                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4955                 return -ENODEV;
4956         }
4957
4958         /* Clear firmware's nvram arbitration. */
4959         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4960                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4961         return 0;
4962 }
4963
4964 struct fw_info {
4965         unsigned int text_base;
4966         unsigned int text_len;
4967         u32 *text_data;
4968         unsigned int rodata_base;
4969         unsigned int rodata_len;
4970         u32 *rodata_data;
4971         unsigned int data_base;
4972         unsigned int data_len;
4973         u32 *data_data;
4974 };
4975
4976 /* tp->lock is held. */
4977 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4978                                  int cpu_scratch_size, struct fw_info *info)
4979 {
4980         int err, lock_err, i;
4981         void (*write_op)(struct tg3 *, u32, u32);
4982
4983         if (cpu_base == TX_CPU_BASE &&
4984             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4985                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4986                        "TX cpu firmware on %s which is 5705.\n",
4987                        tp->dev->name);
4988                 return -EINVAL;
4989         }
4990
4991         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4992                 write_op = tg3_write_mem;
4993         else
4994                 write_op = tg3_write_indirect_reg32;
4995
4996         /* It is possible that bootcode is still loading at this point.
4997          * Get the nvram lock first before halting the cpu.
4998          */
4999         lock_err = tg3_nvram_lock(tp);
5000         err = tg3_halt_cpu(tp, cpu_base);
5001         if (!lock_err)
5002                 tg3_nvram_unlock(tp);
5003         if (err)
5004                 goto out;
5005
5006         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5007                 write_op(tp, cpu_scratch_base + i, 0);
5008         tw32(cpu_base + CPU_STATE, 0xffffffff);
5009         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5010         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5011                 write_op(tp, (cpu_scratch_base +
5012                               (info->text_base & 0xffff) +
5013                               (i * sizeof(u32))),
5014                          (info->text_data ?
5015                           info->text_data[i] : 0));
5016         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5017                 write_op(tp, (cpu_scratch_base +
5018                               (info->rodata_base & 0xffff) +
5019                               (i * sizeof(u32))),
5020                          (info->rodata_data ?
5021                           info->rodata_data[i] : 0));
5022         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5023                 write_op(tp, (cpu_scratch_base +
5024                               (info->data_base & 0xffff) +
5025                               (i * sizeof(u32))),
5026                          (info->data_data ?
5027                           info->data_data[i] : 0));
5028
5029         err = 0;
5030
5031 out:
5032         return err;
5033 }
5034
5035 /* tp->lock is held. */
5036 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5037 {
5038         struct fw_info info;
5039         int err, i;
5040
5041         info.text_base = TG3_FW_TEXT_ADDR;
5042         info.text_len = TG3_FW_TEXT_LEN;
5043         info.text_data = &tg3FwText[0];
5044         info.rodata_base = TG3_FW_RODATA_ADDR;
5045         info.rodata_len = TG3_FW_RODATA_LEN;
5046         info.rodata_data = &tg3FwRodata[0];
5047         info.data_base = TG3_FW_DATA_ADDR;
5048         info.data_len = TG3_FW_DATA_LEN;
5049         info.data_data = NULL;
5050
5051         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5052                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5053                                     &info);
5054         if (err)
5055                 return err;
5056
5057         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5058                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5059                                     &info);
5060         if (err)
5061                 return err;
5062
5063         /* Now startup only the RX cpu. */
5064         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5065         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5066
5067         for (i = 0; i < 5; i++) {
5068                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5069                         break;
5070                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5071                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5072                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5073                 udelay(1000);
5074         }
5075         if (i >= 5) {
5076                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5077                        "to set RX CPU PC, is %08x should be %08x\n",
5078                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5079                        TG3_FW_TEXT_ADDR);
5080                 return -ENODEV;
5081         }
5082         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5083         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5084
5085         return 0;
5086 }
5087
5088 #if TG3_TSO_SUPPORT != 0
5089
5090 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5091 #define TG3_TSO_FW_RELASE_MINOR         0x6
5092 #define TG3_TSO_FW_RELEASE_FIX          0x0
5093 #define TG3_TSO_FW_START_ADDR           0x08000000
5094 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5095 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5096 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5097 #define TG3_TSO_FW_RODATA_LEN           0x60
5098 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5099 #define TG3_TSO_FW_DATA_LEN             0x30
5100 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5101 #define TG3_TSO_FW_SBSS_LEN             0x2c
5102 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5103 #define TG3_TSO_FW_BSS_LEN              0x894
5104
5105 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5106         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5107         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5108         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5109         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5110         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5111         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5112         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5113         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5114         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5115         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5116         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5117         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5118         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5119         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5120         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5121         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5122         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5123         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5124         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5125         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5126         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5127         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5128         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5129         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5130         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5131         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5132         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5133         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5134         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5135         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5136         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5137         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5138         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5139         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5140         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5141         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5142         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5143         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5144         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5145         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5146         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5147         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5148         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5149         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5150         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5151         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5152         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5153         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5154         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5155         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5156         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5157         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5158         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5159         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5160         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5161         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5162         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5163         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5164         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5165         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5166         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5167         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5168         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5169         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5170         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5171         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5172         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5173         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5174         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5175         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5176         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5177         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5178         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5179         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5180         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5181         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5182         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5183         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5184         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5185         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5186         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5187         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5188         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5189         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5190         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5191         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5192         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5193         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5194         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5195         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5196         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5197         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5198         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5199         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5200         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5201         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5202         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5203         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5204         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5205         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5206         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5207         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5208         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5209         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5210         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5211         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5212         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5213         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5214         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5215         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5216         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5217         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5218         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5219         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5220         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5221         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5222         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5223         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5224         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5225         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5226         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5227         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5228         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5229         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5230         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5231         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5232         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5233         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5234         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5235         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5236         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5237         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5238         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5239         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5240         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5241         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5242         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5243         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5244         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5245         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5246         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5247         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5248         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5249         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5250         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5251         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5252         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5253         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5254         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5255         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5256         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5257         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5258         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5259         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5260         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5261         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5262         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5263         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5264         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5265         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5266         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5267         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5268         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5269         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5270         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5271         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5272         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5273         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5274         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5275         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5276         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5277         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5278         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5279         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5280         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5281         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5282         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5283         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5284         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5285         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5286         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5287         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5288         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5289         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5290         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5291         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5292         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5293         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5294         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5295         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5296         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5297         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5298         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5299         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5300         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5301         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5302         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5303         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5304         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5305         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5306         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5307         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5308         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5309         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5310         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5311         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5312         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5313         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5314         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5315         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5316         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5317         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5318         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5319         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5320         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5321         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5322         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5323         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5324         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5325         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5326         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5327         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5328         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5329         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5330         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5331         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5332         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5333         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5334         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5335         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5336         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5337         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5338         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5339         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5340         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5341         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5342         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5343         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5344         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5345         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5346         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5347         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5348         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5349         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5350         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5351         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5352         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5353         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5354         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5355         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5356         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5357         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5358         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5359         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5360         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5361         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5362         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5363         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5364         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5365         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5366         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5367         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5368         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5369         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5370         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5371         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5372         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5373         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5374         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5375         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5376         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5377         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5378         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5379         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5380         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5381         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5382         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5383         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5384         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5385         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5386         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5387         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5388         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5389         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5390 };
5391
5392 static u32 tg3TsoFwRodata[] = {
5393         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5394         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5395         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5396         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5397         0x00000000,
5398 };
5399
5400 static u32 tg3TsoFwData[] = {
5401         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5402         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5403         0x00000000,
5404 };
5405
5406 /* 5705 needs a special version of the TSO firmware.  */
5407 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5408 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5409 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5410 #define TG3_TSO5_FW_START_ADDR          0x00010000
5411 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5412 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5413 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5414 #define TG3_TSO5_FW_RODATA_LEN          0x50
5415 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5416 #define TG3_TSO5_FW_DATA_LEN            0x20
5417 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5418 #define TG3_TSO5_FW_SBSS_LEN            0x28
5419 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5420 #define TG3_TSO5_FW_BSS_LEN             0x88
5421
5422 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5423         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5424         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5425         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5426         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5427         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5428         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5429         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5430         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5431         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5432         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5433         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5434         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5435         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5436         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5437         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5438         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5439         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5440         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5441         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5442         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5443         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5444         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5445         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5446         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5447         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5448         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5449         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5450         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5451         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5452         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5453         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5454         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5455         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5456         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5457         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5458         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5459         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5460         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5461         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5462         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5463         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5464         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5465         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5466         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5467         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5468         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5469         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5470         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5471         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5472         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5473         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5474         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5475         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5476         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5477         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5478         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5479         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5480         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5481         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5482         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5483         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5484         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5485         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5486         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5487         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5488         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5489         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5490         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5491         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5492         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5493         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5494         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5495         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5496         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5497         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5498         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5499         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5500         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5501         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5502         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5503         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5504         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5505         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5506         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5507         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5508         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5509         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5510         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5511         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5512         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5513         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5514         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5515         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5516         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5517         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5518         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5519         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5520         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5521         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5522         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5523         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5524         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5525         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5526         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5527         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5528         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5529         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5530         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5531         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5532         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5533         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5534         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5535         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5536         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5537         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5538         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5539         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5540         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5541         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5542         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5543         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5544         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5545         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5546         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5547         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5548         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5549         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5550         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5551         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5552         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5553         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5554         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5555         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5556         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5557         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5558         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5559         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5560         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5561         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5562         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5563         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5564         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5565         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5566         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5567         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5568         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5569         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5570         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5571         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5572         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5573         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5574         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5575         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5576         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5577         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5578         0x00000000, 0x00000000, 0x00000000,
5579 };
5580
5581 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5582         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5583         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5584         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5585         0x00000000, 0x00000000, 0x00000000,
5586 };
5587
5588 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5589         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 /* tp->lock is held. */
5594 static int tg3_load_tso_firmware(struct tg3 *tp)
5595 {
5596         struct fw_info info;
5597         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5598         int err, i;
5599
5600         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5601                 return 0;
5602
5603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5604                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5605                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5606                 info.text_data = &tg3Tso5FwText[0];
5607                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5608                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5609                 info.rodata_data = &tg3Tso5FwRodata[0];
5610                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5611                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5612                 info.data_data = &tg3Tso5FwData[0];
5613                 cpu_base = RX_CPU_BASE;
5614                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5615                 cpu_scratch_size = (info.text_len +
5616                                     info.rodata_len +
5617                                     info.data_len +
5618                                     TG3_TSO5_FW_SBSS_LEN +
5619                                     TG3_TSO5_FW_BSS_LEN);
5620         } else {
5621                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5622                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5623                 info.text_data = &tg3TsoFwText[0];
5624                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5625                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5626                 info.rodata_data = &tg3TsoFwRodata[0];
5627                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5628                 info.data_len = TG3_TSO_FW_DATA_LEN;
5629                 info.data_data = &tg3TsoFwData[0];
5630                 cpu_base = TX_CPU_BASE;
5631                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5632                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5633         }
5634
5635         err = tg3_load_firmware_cpu(tp, cpu_base,
5636                                     cpu_scratch_base, cpu_scratch_size,
5637                                     &info);
5638         if (err)
5639                 return err;
5640
5641         /* Now startup the cpu. */
5642         tw32(cpu_base + CPU_STATE, 0xffffffff);
5643         tw32_f(cpu_base + CPU_PC,    info.text_base);
5644
5645         for (i = 0; i < 5; i++) {
5646                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5647                         break;
5648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5650                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5651                 udelay(1000);
5652         }
5653         if (i >= 5) {
5654                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5655                        "to set CPU PC, is %08x should be %08x\n",
5656                        tp->dev->name, tr32(cpu_base + CPU_PC),
5657                        info.text_base);
5658                 return -ENODEV;
5659         }
5660         tw32(cpu_base + CPU_STATE, 0xffffffff);
5661         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5662         return 0;
5663 }
5664
5665 #endif /* TG3_TSO_SUPPORT != 0 */
5666
5667 /* tp->lock is held. */
5668 static void __tg3_set_mac_addr(struct tg3 *tp)
5669 {
5670         u32 addr_high, addr_low;
5671         int i;
5672
5673         addr_high = ((tp->dev->dev_addr[0] << 8) |
5674                      tp->dev->dev_addr[1]);
5675         addr_low = ((tp->dev->dev_addr[2] << 24) |
5676                     (tp->dev->dev_addr[3] << 16) |
5677                     (tp->dev->dev_addr[4] <<  8) |
5678                     (tp->dev->dev_addr[5] <<  0));
5679         for (i = 0; i < 4; i++) {
5680                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5681                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5682         }
5683
5684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5686                 for (i = 0; i < 12; i++) {
5687                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5688                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5689                 }
5690         }
5691
5692         addr_high = (tp->dev->dev_addr[0] +
5693                      tp->dev->dev_addr[1] +
5694                      tp->dev->dev_addr[2] +
5695                      tp->dev->dev_addr[3] +
5696                      tp->dev->dev_addr[4] +
5697                      tp->dev->dev_addr[5]) &
5698                 TX_BACKOFF_SEED_MASK;
5699         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5700 }
5701
5702 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5703 {
5704         struct tg3 *tp = netdev_priv(dev);
5705         struct sockaddr *addr = p;
5706
5707         if (!is_valid_ether_addr(addr->sa_data))
5708                 return -EINVAL;
5709
5710         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5711
5712         if (!netif_running(dev))
5713                 return 0;
5714
5715         spin_lock_bh(&tp->lock);
5716         __tg3_set_mac_addr(tp);
5717         spin_unlock_bh(&tp->lock);
5718
5719         return 0;
5720 }
5721
5722 /* tp->lock is held. */
5723 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5724                            dma_addr_t mapping, u32 maxlen_flags,
5725                            u32 nic_addr)
5726 {
5727         tg3_write_mem(tp,
5728                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5729                       ((u64) mapping >> 32));
5730         tg3_write_mem(tp,
5731                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5732                       ((u64) mapping & 0xffffffff));
5733         tg3_write_mem(tp,
5734                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5735                        maxlen_flags);
5736
5737         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5738                 tg3_write_mem(tp,
5739                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5740                               nic_addr);
5741 }
5742
5743 static void __tg3_set_rx_mode(struct net_device *);
5744 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5745 {
5746         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5747         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5748         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5749         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5750         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5751                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5752                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5753         }
5754         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5755         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5756         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5757                 u32 val = ec->stats_block_coalesce_usecs;
5758
5759                 if (!netif_carrier_ok(tp->dev))
5760                         val = 0;
5761
5762                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5763         }
5764 }
5765
5766 /* tp->lock is held. */
5767 static int tg3_reset_hw(struct tg3 *tp)
5768 {
5769         u32 val, rdmac_mode;
5770         int i, err, limit;
5771
5772         tg3_disable_ints(tp);
5773
5774         tg3_stop_fw(tp);
5775
5776         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5777
5778         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5779                 tg3_abort_hw(tp, 1);
5780         }
5781
5782         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5783                 tg3_phy_reset(tp);
5784
5785         err = tg3_chip_reset(tp);
5786         if (err)
5787                 return err;
5788
5789         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5790
5791         /* This works around an issue with Athlon chipsets on
5792          * B3 tigon3 silicon.  This bit has no effect on any
5793          * other revision.  But do not set this on PCI Express
5794          * chips.
5795          */
5796         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5797                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5798         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5799
5800         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5801             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5802                 val = tr32(TG3PCI_PCISTATE);
5803                 val |= PCISTATE_RETRY_SAME_DMA;
5804                 tw32(TG3PCI_PCISTATE, val);
5805         }
5806
5807         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5808                 /* Enable some hw fixes.  */
5809                 val = tr32(TG3PCI_MSI_DATA);
5810                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5811                 tw32(TG3PCI_MSI_DATA, val);
5812         }
5813
5814         /* Descriptor ring init may make accesses to the
5815          * NIC SRAM area to setup the TX descriptors, so we
5816          * can only do this after the hardware has been
5817          * successfully reset.
5818          */
5819         tg3_init_rings(tp);
5820
5821         /* This value is determined during the probe time DMA
5822          * engine test, tg3_test_dma.
5823          */
5824         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5825
5826         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5827                           GRC_MODE_4X_NIC_SEND_RINGS |
5828                           GRC_MODE_NO_TX_PHDR_CSUM |
5829                           GRC_MODE_NO_RX_PHDR_CSUM);
5830         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5831         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5832                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5833         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5834                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5835
5836         tw32(GRC_MODE,
5837              tp->grc_mode |
5838              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5839
5840         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5841         val = tr32(GRC_MISC_CFG);
5842         val &= ~0xff;
5843         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5844         tw32(GRC_MISC_CFG, val);
5845
5846         /* Initialize MBUF/DESC pool. */
5847         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5848                 /* Do nothing.  */
5849         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5850                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5852                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5853                 else
5854                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5855                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5856                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5857         }
5858 #if TG3_TSO_SUPPORT != 0
5859         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5860                 int fw_len;
5861
5862                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5863                           TG3_TSO5_FW_RODATA_LEN +
5864                           TG3_TSO5_FW_DATA_LEN +
5865                           TG3_TSO5_FW_SBSS_LEN +
5866                           TG3_TSO5_FW_BSS_LEN);
5867                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5868                 tw32(BUFMGR_MB_POOL_ADDR,
5869                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5870                 tw32(BUFMGR_MB_POOL_SIZE,
5871                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5872         }
5873 #endif
5874
5875         if (tp->dev->mtu <= ETH_DATA_LEN) {
5876                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5877                      tp->bufmgr_config.mbuf_read_dma_low_water);
5878                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5879                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5880                 tw32(BUFMGR_MB_HIGH_WATER,
5881                      tp->bufmgr_config.mbuf_high_water);
5882         } else {
5883                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5884                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5885                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5886                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5887                 tw32(BUFMGR_MB_HIGH_WATER,
5888                      tp->bufmgr_config.mbuf_high_water_jumbo);
5889         }
5890         tw32(BUFMGR_DMA_LOW_WATER,
5891              tp->bufmgr_config.dma_low_water);
5892         tw32(BUFMGR_DMA_HIGH_WATER,
5893              tp->bufmgr_config.dma_high_water);
5894
5895         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5896         for (i = 0; i < 2000; i++) {
5897                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5898                         break;
5899                 udelay(10);
5900         }
5901         if (i >= 2000) {
5902                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5903                        tp->dev->name);
5904                 return -ENODEV;
5905         }
5906
5907         /* Setup replenish threshold. */
5908         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5909
5910         /* Initialize TG3_BDINFO's at:
5911          *  RCVDBDI_STD_BD:     standard eth size rx ring
5912          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5913          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5914          *
5915          * like so:
5916          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5917          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5918          *                              ring attribute flags
5919          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5920          *
5921          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5922          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5923          *
5924          * The size of each ring is fixed in the firmware, but the location is
5925          * configurable.
5926          */
5927         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5928              ((u64) tp->rx_std_mapping >> 32));
5929         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5930              ((u64) tp->rx_std_mapping & 0xffffffff));
5931         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5932              NIC_SRAM_RX_BUFFER_DESC);
5933
5934         /* Don't even try to program the JUMBO/MINI buffer descriptor
5935          * configs on 5705.
5936          */
5937         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5938                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5939                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5940         } else {
5941                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5942                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5943
5944                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5945                      BDINFO_FLAGS_DISABLED);
5946
5947                 /* Setup replenish threshold. */
5948                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5949
5950                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5951                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5952                              ((u64) tp->rx_jumbo_mapping >> 32));
5953                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5954                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5955                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5956                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5957                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5958                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5959                 } else {
5960                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5961                              BDINFO_FLAGS_DISABLED);
5962                 }
5963
5964         }
5965
5966         /* There is only one send ring on 5705/5750, no need to explicitly
5967          * disable the others.
5968          */
5969         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5970                 /* Clear out send RCB ring in SRAM. */
5971                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5972                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5973                                       BDINFO_FLAGS_DISABLED);
5974         }
5975
5976         tp->tx_prod = 0;
5977         tp->tx_cons = 0;
5978         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5979         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5980
5981         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5982                        tp->tx_desc_mapping,
5983                        (TG3_TX_RING_SIZE <<
5984                         BDINFO_FLAGS_MAXLEN_SHIFT),
5985                        NIC_SRAM_TX_BUFFER_DESC);
5986
5987         /* There is only one receive return ring on 5705/5750, no need
5988          * to explicitly disable the others.
5989          */
5990         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5991                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5992                      i += TG3_BDINFO_SIZE) {
5993                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5994                                       BDINFO_FLAGS_DISABLED);
5995                 }
5996         }
5997
5998         tp->rx_rcb_ptr = 0;
5999         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6000
6001         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6002                        tp->rx_rcb_mapping,
6003                        (TG3_RX_RCB_RING_SIZE(tp) <<
6004                         BDINFO_FLAGS_MAXLEN_SHIFT),
6005                        0);
6006
6007         tp->rx_std_ptr = tp->rx_pending;
6008         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6009                      tp->rx_std_ptr);
6010
6011         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6012                                                 tp->rx_jumbo_pending : 0;
6013         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6014                      tp->rx_jumbo_ptr);
6015
6016         /* Initialize MAC address and backoff seed. */
6017         __tg3_set_mac_addr(tp);
6018
6019         /* MTU + ethernet header + FCS + optional VLAN tag */
6020         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6021
6022         /* The slot time is changed by tg3_setup_phy if we
6023          * run at gigabit with half duplex.
6024          */
6025         tw32(MAC_TX_LENGTHS,
6026              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6027              (6 << TX_LENGTHS_IPG_SHIFT) |
6028              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6029
6030         /* Receive rules. */
6031         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6032         tw32(RCVLPC_CONFIG, 0x0181);
6033
6034         /* Calculate RDMAC_MODE setting early, we need it to determine
6035          * the RCVLPC_STATE_ENABLE mask.
6036          */
6037         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6038                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6039                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6040                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6041                       RDMAC_MODE_LNGREAD_ENAB);
6042         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6043                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6044
6045         /* If statement applies to 5705 and 5750 PCI devices only */
6046         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6047              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6048             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6049                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6050                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6051                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6052                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6053                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6054                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6055                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6056                 }
6057         }
6058
6059         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6060                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6061
6062 #if TG3_TSO_SUPPORT != 0
6063         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6064                 rdmac_mode |= (1 << 27);
6065 #endif
6066
6067         /* Receive/send statistics. */
6068         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6069             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6070                 val = tr32(RCVLPC_STATS_ENABLE);
6071                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6072                 tw32(RCVLPC_STATS_ENABLE, val);
6073         } else {
6074                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6075         }
6076         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6077         tw32(SNDDATAI_STATSENAB, 0xffffff);
6078         tw32(SNDDATAI_STATSCTRL,
6079              (SNDDATAI_SCTRL_ENABLE |
6080               SNDDATAI_SCTRL_FASTUPD));
6081
6082         /* Setup host coalescing engine. */
6083         tw32(HOSTCC_MODE, 0);
6084         for (i = 0; i < 2000; i++) {
6085                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6086                         break;
6087                 udelay(10);
6088         }
6089
6090         __tg3_set_coalesce(tp, &tp->coal);
6091
6092         /* set status block DMA address */
6093         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6094              ((u64) tp->status_mapping >> 32));
6095         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6096              ((u64) tp->status_mapping & 0xffffffff));
6097
6098         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6099                 /* Status/statistics block address.  See tg3_timer,
6100                  * the tg3_periodic_fetch_stats call there, and
6101                  * tg3_get_stats to see how this works for 5705/5750 chips.
6102                  */
6103                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6104                      ((u64) tp->stats_mapping >> 32));
6105                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6106                      ((u64) tp->stats_mapping & 0xffffffff));
6107                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6108                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6109         }
6110
6111         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6112
6113         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6114         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6115         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6116                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6117
6118         /* Clear statistics/status block in chip, and status block in ram. */
6119         for (i = NIC_SRAM_STATS_BLK;
6120              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6121              i += sizeof(u32)) {
6122                 tg3_write_mem(tp, i, 0);
6123                 udelay(40);
6124         }
6125         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6126
6127         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6128                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6129                 /* reset to prevent losing 1st rx packet intermittently */
6130                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6131                 udelay(10);
6132         }
6133
6134         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6135                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6136         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6137         udelay(40);
6138
6139         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6140          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6141          * register to preserve the GPIO settings for LOMs. The GPIOs,
6142          * whether used as inputs or outputs, are set by boot code after
6143          * reset.
6144          */
6145         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6146                 u32 gpio_mask;
6147
6148                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6149                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6150
6151                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6152                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6153                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6154
6155                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6156
6157                 /* GPIO1 must be driven high for eeprom write protect */
6158                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6159                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6160         }
6161         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6162         udelay(100);
6163
6164         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6165         tp->last_tag = 0;
6166
6167         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6168                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6169                 udelay(40);
6170         }
6171
6172         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6173                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6174                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6175                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6176                WDMAC_MODE_LNGREAD_ENAB);
6177
6178         /* If statement applies to 5705 and 5750 PCI devices only */
6179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6180              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6181             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6182                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6183                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6184                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6185                         /* nothing */
6186                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6187                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6188                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6189                         val |= WDMAC_MODE_RX_ACCEL;
6190                 }
6191         }
6192
6193         /* Enable host coalescing bug fix */
6194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
6195                 val |= (1 << 29);
6196
6197         tw32_f(WDMAC_MODE, val);
6198         udelay(40);
6199
6200         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6201                 val = tr32(TG3PCI_X_CAPS);
6202                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6203                         val &= ~PCIX_CAPS_BURST_MASK;
6204                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6205                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6206                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6207                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6208                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6209                                 val |= (tp->split_mode_max_reqs <<
6210                                         PCIX_CAPS_SPLIT_SHIFT);
6211                 }
6212                 tw32(TG3PCI_X_CAPS, val);
6213         }
6214
6215         tw32_f(RDMAC_MODE, rdmac_mode);
6216         udelay(40);
6217
6218         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6219         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6220                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6221         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6222         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6223         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6224         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6225         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6226 #if TG3_TSO_SUPPORT != 0
6227         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6228                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6229 #endif
6230         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6231         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6232
6233         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6234                 err = tg3_load_5701_a0_firmware_fix(tp);
6235                 if (err)
6236                         return err;
6237         }
6238
6239 #if TG3_TSO_SUPPORT != 0
6240         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6241                 err = tg3_load_tso_firmware(tp);
6242                 if (err)
6243                         return err;
6244         }
6245 #endif
6246
6247         tp->tx_mode = TX_MODE_ENABLE;
6248         tw32_f(MAC_TX_MODE, tp->tx_mode);
6249         udelay(100);
6250
6251         tp->rx_mode = RX_MODE_ENABLE;
6252         tw32_f(MAC_RX_MODE, tp->rx_mode);
6253         udelay(10);
6254
6255         if (tp->link_config.phy_is_low_power) {
6256                 tp->link_config.phy_is_low_power = 0;
6257                 tp->link_config.speed = tp->link_config.orig_speed;
6258                 tp->link_config.duplex = tp->link_config.orig_duplex;
6259                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6260         }
6261
6262         tp->mi_mode = MAC_MI_MODE_BASE;
6263         tw32_f(MAC_MI_MODE, tp->mi_mode);
6264         udelay(80);
6265
6266         tw32(MAC_LED_CTRL, tp->led_ctrl);
6267
6268         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6269         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6270                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6271                 udelay(10);
6272         }
6273         tw32_f(MAC_RX_MODE, tp->rx_mode);
6274         udelay(10);
6275
6276         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6277                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6278                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6279                         /* Set drive transmission level to 1.2V  */
6280                         /* only if the signal pre-emphasis bit is not set  */
6281                         val = tr32(MAC_SERDES_CFG);
6282                         val &= 0xfffff000;
6283                         val |= 0x880;
6284                         tw32(MAC_SERDES_CFG, val);
6285                 }
6286                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6287                         tw32(MAC_SERDES_CFG, 0x616000);
6288         }
6289
6290         /* Prevent chip from dropping frames when flow control
6291          * is enabled.
6292          */
6293         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6294
6295         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6296             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6297                 /* Use hardware link auto-negotiation */
6298                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6299         }
6300
6301         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6302             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6303                 u32 tmp;
6304
6305                 tmp = tr32(SERDES_RX_CTRL);
6306                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6307                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6308                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6309                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6310         }
6311
6312         err = tg3_setup_phy(tp, 1);
6313         if (err)
6314                 return err;
6315
6316         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6317                 u32 tmp;
6318
6319                 /* Clear CRC stats. */
6320                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6321                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6322                         tg3_readphy(tp, 0x14, &tmp);
6323                 }
6324         }
6325
6326         __tg3_set_rx_mode(tp->dev);
6327
6328         /* Initialize receive rules. */
6329         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6330         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6331         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6332         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6333
6334         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6335             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6336                 limit = 8;
6337         else
6338                 limit = 16;
6339         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6340                 limit -= 4;
6341         switch (limit) {
6342         case 16:
6343                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6344         case 15:
6345                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6346         case 14:
6347                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6348         case 13:
6349                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6350         case 12:
6351                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6352         case 11:
6353                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6354         case 10:
6355                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6356         case 9:
6357                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6358         case 8:
6359                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6360         case 7:
6361                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6362         case 6:
6363                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6364         case 5:
6365                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6366         case 4:
6367                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6368         case 3:
6369                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6370         case 2:
6371         case 1:
6372
6373         default:
6374                 break;
6375         };
6376
6377         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6378
6379         return 0;
6380 }
6381
6382 /* Called at device open time to get the chip ready for
6383  * packet processing.  Invoked with tp->lock held.
6384  */
6385 static int tg3_init_hw(struct tg3 *tp)
6386 {
6387         int err;
6388
6389         /* Force the chip into D0. */
6390         err = tg3_set_power_state(tp, PCI_D0);
6391         if (err)
6392                 goto out;
6393
6394         tg3_switch_clocks(tp);
6395
6396         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6397
6398         err = tg3_reset_hw(tp);
6399
6400 out:
6401         return err;
6402 }
6403
6404 #define TG3_STAT_ADD32(PSTAT, REG) \
6405 do {    u32 __val = tr32(REG); \
6406         (PSTAT)->low += __val; \
6407         if ((PSTAT)->low < __val) \
6408                 (PSTAT)->high += 1; \
6409 } while (0)
6410
6411 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6412 {
6413         struct tg3_hw_stats *sp = tp->hw_stats;
6414
6415         if (!netif_carrier_ok(tp->dev))
6416                 return;
6417
6418         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6419         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6420         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6421         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6422         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6423         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6424         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6425         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6426         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6427         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6428         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6429         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6430         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6431
6432         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6433         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6434         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6435         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6436         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6437         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6438         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6439         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6440         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6441         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6442         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6443         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6444         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6445         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6446 }
6447
6448 static void tg3_timer(unsigned long __opaque)
6449 {
6450         struct tg3 *tp = (struct tg3 *) __opaque;
6451
6452         spin_lock(&tp->lock);
6453
6454         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6455                 /* All of this garbage is because when using non-tagged
6456                  * IRQ status the mailbox/status_block protocol the chip
6457                  * uses with the cpu is race prone.
6458                  */
6459                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6460                         tw32(GRC_LOCAL_CTRL,
6461                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6462                 } else {
6463                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6464                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6465                 }
6466
6467                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6468                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6469                         spin_unlock(&tp->lock);
6470                         schedule_work(&tp->reset_task);
6471                         return;
6472                 }
6473         }
6474
6475         /* This part only runs once per second. */
6476         if (!--tp->timer_counter) {
6477                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6478                         tg3_periodic_fetch_stats(tp);
6479
6480                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6481                         u32 mac_stat;
6482                         int phy_event;
6483
6484                         mac_stat = tr32(MAC_STATUS);
6485
6486                         phy_event = 0;
6487                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6488                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6489                                         phy_event = 1;
6490                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6491                                 phy_event = 1;
6492
6493                         if (phy_event)
6494                                 tg3_setup_phy(tp, 0);
6495                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6496                         u32 mac_stat = tr32(MAC_STATUS);
6497                         int need_setup = 0;
6498
6499                         if (netif_carrier_ok(tp->dev) &&
6500                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6501                                 need_setup = 1;
6502                         }
6503                         if (! netif_carrier_ok(tp->dev) &&
6504                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6505                                          MAC_STATUS_SIGNAL_DET))) {
6506                                 need_setup = 1;
6507                         }
6508                         if (need_setup) {
6509                                 tw32_f(MAC_MODE,
6510                                      (tp->mac_mode &
6511                                       ~MAC_MODE_PORT_MODE_MASK));
6512                                 udelay(40);
6513                                 tw32_f(MAC_MODE, tp->mac_mode);
6514                                 udelay(40);
6515                                 tg3_setup_phy(tp, 0);
6516                         }
6517                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6518                         tg3_serdes_parallel_detect(tp);
6519
6520                 tp->timer_counter = tp->timer_multiplier;
6521         }
6522
6523         /* Heartbeat is only sent once every 2 seconds.  */
6524         if (!--tp->asf_counter) {
6525                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6526                         u32 val;
6527
6528                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6529                                            FWCMD_NICDRV_ALIVE2);
6530                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6531                         /* 5 seconds timeout */
6532                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6533                         val = tr32(GRC_RX_CPU_EVENT);
6534                         val |= (1 << 14);
6535                         tw32(GRC_RX_CPU_EVENT, val);
6536                 }
6537                 tp->asf_counter = tp->asf_multiplier;
6538         }
6539
6540         spin_unlock(&tp->lock);
6541
6542         tp->timer.expires = jiffies + tp->timer_offset;
6543         add_timer(&tp->timer);
6544 }
6545
6546 static int tg3_request_irq(struct tg3 *tp)
6547 {
6548         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6549         unsigned long flags;
6550         struct net_device *dev = tp->dev;
6551
6552         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6553                 fn = tg3_msi;
6554                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6555                         fn = tg3_msi_1shot;
6556                 flags = SA_SAMPLE_RANDOM;
6557         } else {
6558                 fn = tg3_interrupt;
6559                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6560                         fn = tg3_interrupt_tagged;
6561                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6562         }
6563         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6564 }
6565
6566 static int tg3_test_interrupt(struct tg3 *tp)
6567 {
6568         struct net_device *dev = tp->dev;
6569         int err, i;
6570         u32 int_mbox = 0;
6571
6572         if (!netif_running(dev))
6573                 return -ENODEV;
6574
6575         tg3_disable_ints(tp);
6576
6577         free_irq(tp->pdev->irq, dev);
6578
6579         err = request_irq(tp->pdev->irq, tg3_test_isr,
6580                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6581         if (err)
6582                 return err;
6583
6584         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6585         tg3_enable_ints(tp);
6586
6587         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6588                HOSTCC_MODE_NOW);
6589
6590         for (i = 0; i < 5; i++) {
6591                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6592                                         TG3_64BIT_REG_LOW);
6593                 if (int_mbox != 0)
6594                         break;
6595                 msleep(10);
6596         }
6597
6598         tg3_disable_ints(tp);
6599
6600         free_irq(tp->pdev->irq, dev);
6601         
6602         err = tg3_request_irq(tp);
6603
6604         if (err)
6605                 return err;
6606
6607         if (int_mbox != 0)
6608                 return 0;
6609
6610         return -EIO;
6611 }
6612
6613 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6614  * successfully restored
6615  */
6616 static int tg3_test_msi(struct tg3 *tp)
6617 {
6618         struct net_device *dev = tp->dev;
6619         int err;
6620         u16 pci_cmd;
6621
6622         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6623                 return 0;
6624
6625         /* Turn off SERR reporting in case MSI terminates with Master
6626          * Abort.
6627          */
6628         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6629         pci_write_config_word(tp->pdev, PCI_COMMAND,
6630                               pci_cmd & ~PCI_COMMAND_SERR);
6631
6632         err = tg3_test_interrupt(tp);
6633
6634         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6635
6636         if (!err)
6637                 return 0;
6638
6639         /* other failures */
6640         if (err != -EIO)
6641                 return err;
6642
6643         /* MSI test failed, go back to INTx mode */
6644         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6645                "switching to INTx mode. Please report this failure to "
6646                "the PCI maintainer and include system chipset information.\n",
6647                        tp->dev->name);
6648
6649         free_irq(tp->pdev->irq, dev);
6650         pci_disable_msi(tp->pdev);
6651
6652         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6653
6654         err = tg3_request_irq(tp);
6655         if (err)
6656                 return err;
6657
6658         /* Need to reset the chip because the MSI cycle may have terminated
6659          * with Master Abort.
6660          */
6661         tg3_full_lock(tp, 1);
6662
6663         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6664         err = tg3_init_hw(tp);
6665
6666         tg3_full_unlock(tp);
6667
6668         if (err)
6669                 free_irq(tp->pdev->irq, dev);
6670
6671         return err;
6672 }
6673
6674 static int tg3_open(struct net_device *dev)
6675 {
6676         struct tg3 *tp = netdev_priv(dev);
6677         int err;
6678
6679         tg3_full_lock(tp, 0);
6680
6681         err = tg3_set_power_state(tp, PCI_D0);
6682         if (err)
6683                 return err;
6684
6685         tg3_disable_ints(tp);
6686         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6687
6688         tg3_full_unlock(tp);
6689
6690         /* The placement of this call is tied
6691          * to the setup and use of Host TX descriptors.
6692          */
6693         err = tg3_alloc_consistent(tp);
6694         if (err)
6695                 return err;
6696
6697         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6698             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6699             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6700             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6701               (tp->pdev_peer == tp->pdev))) {
6702                 /* All MSI supporting chips should support tagged
6703                  * status.  Assert that this is the case.
6704                  */
6705                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6706                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6707                                "Not using MSI.\n", tp->dev->name);
6708                 } else if (pci_enable_msi(tp->pdev) == 0) {
6709                         u32 msi_mode;
6710
6711                         msi_mode = tr32(MSGINT_MODE);
6712                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6713                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6714                 }
6715         }
6716         err = tg3_request_irq(tp);
6717
6718         if (err) {
6719                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6720                         pci_disable_msi(tp->pdev);
6721                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6722                 }
6723                 tg3_free_consistent(tp);
6724                 return err;
6725         }
6726
6727         tg3_full_lock(tp, 0);
6728
6729         err = tg3_init_hw(tp);
6730         if (err) {
6731                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6732                 tg3_free_rings(tp);
6733         } else {
6734                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6735                         tp->timer_offset = HZ;
6736                 else
6737                         tp->timer_offset = HZ / 10;
6738
6739                 BUG_ON(tp->timer_offset > HZ);
6740                 tp->timer_counter = tp->timer_multiplier =
6741                         (HZ / tp->timer_offset);
6742                 tp->asf_counter = tp->asf_multiplier =
6743                         ((HZ / tp->timer_offset) * 2);
6744
6745                 init_timer(&tp->timer);
6746                 tp->timer.expires = jiffies + tp->timer_offset;
6747                 tp->timer.data = (unsigned long) tp;
6748                 tp->timer.function = tg3_timer;
6749         }
6750
6751         tg3_full_unlock(tp);
6752
6753         if (err) {
6754                 free_irq(tp->pdev->irq, dev);
6755                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6756                         pci_disable_msi(tp->pdev);
6757                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6758                 }
6759                 tg3_free_consistent(tp);
6760                 return err;
6761         }
6762
6763         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6764                 err = tg3_test_msi(tp);
6765
6766                 if (err) {
6767                         tg3_full_lock(tp, 0);
6768
6769                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6770                                 pci_disable_msi(tp->pdev);
6771                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6772                         }
6773                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6774                         tg3_free_rings(tp);
6775                         tg3_free_consistent(tp);
6776
6777                         tg3_full_unlock(tp);
6778
6779                         return err;
6780                 }
6781
6782                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6783                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6784                                 u32 val = tr32(0x7c04);
6785
6786                                 tw32(0x7c04, val | (1 << 29));
6787                         }
6788                 }
6789         }
6790
6791         tg3_full_lock(tp, 0);
6792
6793         add_timer(&tp->timer);
6794         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6795         tg3_enable_ints(tp);
6796
6797         tg3_full_unlock(tp);
6798
6799         netif_start_queue(dev);
6800
6801         return 0;
6802 }
6803
6804 #if 0
6805 /*static*/ void tg3_dump_state(struct tg3 *tp)
6806 {
6807         u32 val32, val32_2, val32_3, val32_4, val32_5;
6808         u16 val16;
6809         int i;
6810
6811         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6812         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6813         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6814                val16, val32);
6815
6816         /* MAC block */
6817         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6818                tr32(MAC_MODE), tr32(MAC_STATUS));
6819         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6820                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6821         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6822                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6823         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6824                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6825
6826         /* Send data initiator control block */
6827         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6828                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6829         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6830                tr32(SNDDATAI_STATSCTRL));
6831
6832         /* Send data completion control block */
6833         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6834
6835         /* Send BD ring selector block */
6836         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6837                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6838
6839         /* Send BD initiator control block */
6840         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6841                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6842
6843         /* Send BD completion control block */
6844         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6845
6846         /* Receive list placement control block */
6847         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6848                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6849         printk("       RCVLPC_STATSCTRL[%08x]\n",
6850                tr32(RCVLPC_STATSCTRL));
6851
6852         /* Receive data and receive BD initiator control block */
6853         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6854                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6855
6856         /* Receive data completion control block */
6857         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6858                tr32(RCVDCC_MODE));
6859
6860         /* Receive BD initiator control block */
6861         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6862                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6863
6864         /* Receive BD completion control block */
6865         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6866                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6867
6868         /* Receive list selector control block */
6869         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6870                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6871
6872         /* Mbuf cluster free block */
6873         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6874                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6875
6876         /* Host coalescing control block */
6877         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6878                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6879         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6880                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6881                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6882         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6883                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6884                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6885         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6886                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6887         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6888                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6889
6890         /* Memory arbiter control block */
6891         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6892                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6893
6894         /* Buffer manager control block */
6895         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6896                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6897         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6898                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6899         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6900                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6901                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6902                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6903
6904         /* Read DMA control block */
6905         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6906                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6907
6908         /* Write DMA control block */
6909         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6910                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6911
6912         /* DMA completion block */
6913         printk("DEBUG: DMAC_MODE[%08x]\n",
6914                tr32(DMAC_MODE));
6915
6916         /* GRC block */
6917         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6918                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6919         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6920                tr32(GRC_LOCAL_CTRL));
6921
6922         /* TG3_BDINFOs */
6923         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6924                tr32(RCVDBDI_JUMBO_BD + 0x0),
6925                tr32(RCVDBDI_JUMBO_BD + 0x4),
6926                tr32(RCVDBDI_JUMBO_BD + 0x8),
6927                tr32(RCVDBDI_JUMBO_BD + 0xc));
6928         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6929                tr32(RCVDBDI_STD_BD + 0x0),
6930                tr32(RCVDBDI_STD_BD + 0x4),
6931                tr32(RCVDBDI_STD_BD + 0x8),
6932                tr32(RCVDBDI_STD_BD + 0xc));
6933         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6934                tr32(RCVDBDI_MINI_BD + 0x0),
6935                tr32(RCVDBDI_MINI_BD + 0x4),
6936                tr32(RCVDBDI_MINI_BD + 0x8),
6937                tr32(RCVDBDI_MINI_BD + 0xc));
6938
6939         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6940         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6941         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6942         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6943         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6944                val32, val32_2, val32_3, val32_4);
6945
6946         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6947         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6948         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6949         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6950         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6951                val32, val32_2, val32_3, val32_4);
6952
6953         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6954         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6955         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6956         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6957         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6958         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6959                val32, val32_2, val32_3, val32_4, val32_5);
6960
6961         /* SW status block */
6962         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6963                tp->hw_status->status,
6964                tp->hw_status->status_tag,
6965                tp->hw_status->rx_jumbo_consumer,
6966                tp->hw_status->rx_consumer,
6967                tp->hw_status->rx_mini_consumer,
6968                tp->hw_status->idx[0].rx_producer,
6969                tp->hw_status->idx[0].tx_consumer);
6970
6971         /* SW statistics block */
6972         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6973                ((u32 *)tp->hw_stats)[0],
6974                ((u32 *)tp->hw_stats)[1],
6975                ((u32 *)tp->hw_stats)[2],
6976                ((u32 *)tp->hw_stats)[3]);
6977
6978         /* Mailboxes */
6979         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6980                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6981                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6982                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6983                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6984
6985         /* NIC side send descriptors. */
6986         for (i = 0; i < 6; i++) {
6987                 unsigned long txd;
6988
6989                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6990                         + (i * sizeof(struct tg3_tx_buffer_desc));
6991                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6992                        i,
6993                        readl(txd + 0x0), readl(txd + 0x4),
6994                        readl(txd + 0x8), readl(txd + 0xc));
6995         }
6996
6997         /* NIC side RX descriptors. */
6998         for (i = 0; i < 6; i++) {
6999                 unsigned long rxd;
7000
7001                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7002                         + (i * sizeof(struct tg3_rx_buffer_desc));
7003                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7004                        i,
7005                        readl(rxd + 0x0), readl(rxd + 0x4),
7006                        readl(rxd + 0x8), readl(rxd + 0xc));
7007                 rxd += (4 * sizeof(u32));
7008                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7009                        i,
7010                        readl(rxd + 0x0), readl(rxd + 0x4),
7011                        readl(rxd + 0x8), readl(rxd + 0xc));
7012         }
7013
7014         for (i = 0; i < 6; i++) {
7015                 unsigned long rxd;
7016
7017                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7018                         + (i * sizeof(struct tg3_rx_buffer_desc));
7019                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7020                        i,
7021                        readl(rxd + 0x0), readl(rxd + 0x4),
7022                        readl(rxd + 0x8), readl(rxd + 0xc));
7023                 rxd += (4 * sizeof(u32));
7024                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7025                        i,
7026                        readl(rxd + 0x0), readl(rxd + 0x4),
7027                        readl(rxd + 0x8), readl(rxd + 0xc));
7028         }
7029 }
7030 #endif
7031
7032 static struct net_device_stats *tg3_get_stats(struct net_device *);
7033 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7034
7035 static int tg3_close(struct net_device *dev)
7036 {
7037         struct tg3 *tp = netdev_priv(dev);
7038
7039         /* Calling flush_scheduled_work() may deadlock because
7040          * linkwatch_event() may be on the workqueue and it will try to get
7041          * the rtnl_lock which we are holding.
7042          */
7043         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7044                 msleep(1);
7045
7046         netif_stop_queue(dev);
7047
7048         del_timer_sync(&tp->timer);
7049
7050         tg3_full_lock(tp, 1);
7051 #if 0
7052         tg3_dump_state(tp);
7053 #endif
7054
7055         tg3_disable_ints(tp);
7056
7057         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7058         tg3_free_rings(tp);
7059         tp->tg3_flags &=
7060                 ~(TG3_FLAG_INIT_COMPLETE |
7061                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7062
7063         tg3_full_unlock(tp);
7064
7065         free_irq(tp->pdev->irq, dev);
7066         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7067                 pci_disable_msi(tp->pdev);
7068                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7069         }
7070
7071         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7072                sizeof(tp->net_stats_prev));
7073         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7074                sizeof(tp->estats_prev));
7075
7076         tg3_free_consistent(tp);
7077
7078         tg3_set_power_state(tp, PCI_D3hot);
7079
7080         netif_carrier_off(tp->dev);
7081
7082         return 0;
7083 }
7084
7085 static inline unsigned long get_stat64(tg3_stat64_t *val)
7086 {
7087         unsigned long ret;
7088
7089 #if (BITS_PER_LONG == 32)
7090         ret = val->low;
7091 #else
7092         ret = ((u64)val->high << 32) | ((u64)val->low);
7093 #endif
7094         return ret;
7095 }
7096
7097 static unsigned long calc_crc_errors(struct tg3 *tp)
7098 {
7099         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7100
7101         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7102             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7103              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7104                 u32 val;
7105
7106                 spin_lock_bh(&tp->lock);
7107                 if (!tg3_readphy(tp, 0x1e, &val)) {
7108                         tg3_writephy(tp, 0x1e, val | 0x8000);
7109                         tg3_readphy(tp, 0x14, &val);
7110                 } else
7111                         val = 0;
7112                 spin_unlock_bh(&tp->lock);
7113
7114                 tp->phy_crc_errors += val;
7115
7116                 return tp->phy_crc_errors;
7117         }
7118
7119         return get_stat64(&hw_stats->rx_fcs_errors);
7120 }
7121
7122 #define ESTAT_ADD(member) \
7123         estats->member =        old_estats->member + \
7124                                 get_stat64(&hw_stats->member)
7125
7126 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7127 {
7128         struct tg3_ethtool_stats *estats = &tp->estats;
7129         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7130         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7131
7132         if (!hw_stats)
7133                 return old_estats;
7134
7135         ESTAT_ADD(rx_octets);
7136         ESTAT_ADD(rx_fragments);
7137         ESTAT_ADD(rx_ucast_packets);
7138         ESTAT_ADD(rx_mcast_packets);
7139         ESTAT_ADD(rx_bcast_packets);
7140         ESTAT_ADD(rx_fcs_errors);
7141         ESTAT_ADD(rx_align_errors);
7142         ESTAT_ADD(rx_xon_pause_rcvd);
7143         ESTAT_ADD(rx_xoff_pause_rcvd);
7144         ESTAT_ADD(rx_mac_ctrl_rcvd);
7145         ESTAT_ADD(rx_xoff_entered);
7146         ESTAT_ADD(rx_frame_too_long_errors);
7147         ESTAT_ADD(rx_jabbers);
7148         ESTAT_ADD(rx_undersize_packets);
7149         ESTAT_ADD(rx_in_length_errors);
7150         ESTAT_ADD(rx_out_length_errors);
7151         ESTAT_ADD(rx_64_or_less_octet_packets);
7152         ESTAT_ADD(rx_65_to_127_octet_packets);
7153         ESTAT_ADD(rx_128_to_255_octet_packets);
7154         ESTAT_ADD(rx_256_to_511_octet_packets);
7155         ESTAT_ADD(rx_512_to_1023_octet_packets);
7156         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7157         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7158         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7159         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7160         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7161
7162         ESTAT_ADD(tx_octets);
7163         ESTAT_ADD(tx_collisions);
7164         ESTAT_ADD(tx_xon_sent);
7165         ESTAT_ADD(tx_xoff_sent);
7166         ESTAT_ADD(tx_flow_control);
7167         ESTAT_ADD(tx_mac_errors);
7168         ESTAT_ADD(tx_single_collisions);
7169         ESTAT_ADD(tx_mult_collisions);
7170         ESTAT_ADD(tx_deferred);
7171         ESTAT_ADD(tx_excessive_collisions);
7172         ESTAT_ADD(tx_late_collisions);
7173         ESTAT_ADD(tx_collide_2times);
7174         ESTAT_ADD(tx_collide_3times);
7175         ESTAT_ADD(tx_collide_4times);
7176         ESTAT_ADD(tx_collide_5times);
7177         ESTAT_ADD(tx_collide_6times);
7178         ESTAT_ADD(tx_collide_7times);
7179         ESTAT_ADD(tx_collide_8times);
7180         ESTAT_ADD(tx_collide_9times);
7181         ESTAT_ADD(tx_collide_10times);
7182         ESTAT_ADD(tx_collide_11times);
7183         ESTAT_ADD(tx_collide_12times);
7184         ESTAT_ADD(tx_collide_13times);
7185         ESTAT_ADD(tx_collide_14times);
7186         ESTAT_ADD(tx_collide_15times);
7187         ESTAT_ADD(tx_ucast_packets);
7188         ESTAT_ADD(tx_mcast_packets);
7189         ESTAT_ADD(tx_bcast_packets);
7190         ESTAT_ADD(tx_carrier_sense_errors);
7191         ESTAT_ADD(tx_discards);
7192         ESTAT_ADD(tx_errors);
7193
7194         ESTAT_ADD(dma_writeq_full);
7195         ESTAT_ADD(dma_write_prioq_full);
7196         ESTAT_ADD(rxbds_empty);
7197         ESTAT_ADD(rx_discards);
7198         ESTAT_ADD(rx_errors);
7199         ESTAT_ADD(rx_threshold_hit);
7200
7201         ESTAT_ADD(dma_readq_full);
7202         ESTAT_ADD(dma_read_prioq_full);
7203         ESTAT_ADD(tx_comp_queue_full);
7204
7205         ESTAT_ADD(ring_set_send_prod_index);
7206         ESTAT_ADD(ring_status_update);
7207         ESTAT_ADD(nic_irqs);
7208         ESTAT_ADD(nic_avoided_irqs);
7209         ESTAT_ADD(nic_tx_threshold_hit);
7210
7211         return estats;
7212 }
7213
7214 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7215 {
7216         struct tg3 *tp = netdev_priv(dev);
7217         struct net_device_stats *stats = &tp->net_stats;
7218         struct net_device_stats *old_stats = &tp->net_stats_prev;
7219         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7220
7221         if (!hw_stats)
7222                 return old_stats;
7223
7224         stats->rx_packets = old_stats->rx_packets +
7225                 get_stat64(&hw_stats->rx_ucast_packets) +
7226                 get_stat64(&hw_stats->rx_mcast_packets) +
7227                 get_stat64(&hw_stats->rx_bcast_packets);
7228                 
7229         stats->tx_packets = old_stats->tx_packets +
7230                 get_stat64(&hw_stats->tx_ucast_packets) +
7231                 get_stat64(&hw_stats->tx_mcast_packets) +
7232                 get_stat64(&hw_stats->tx_bcast_packets);
7233
7234         stats->rx_bytes = old_stats->rx_bytes +
7235                 get_stat64(&hw_stats->rx_octets);
7236         stats->tx_bytes = old_stats->tx_bytes +
7237                 get_stat64(&hw_stats->tx_octets);
7238
7239         stats->rx_errors = old_stats->rx_errors +
7240                 get_stat64(&hw_stats->rx_errors);
7241         stats->tx_errors = old_stats->tx_errors +
7242                 get_stat64(&hw_stats->tx_errors) +
7243                 get_stat64(&hw_stats->tx_mac_errors) +
7244                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7245                 get_stat64(&hw_stats->tx_discards);
7246
7247         stats->multicast = old_stats->multicast +
7248                 get_stat64(&hw_stats->rx_mcast_packets);
7249         stats->collisions = old_stats->collisions +
7250                 get_stat64(&hw_stats->tx_collisions);
7251
7252         stats->rx_length_errors = old_stats->rx_length_errors +
7253                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7254                 get_stat64(&hw_stats->rx_undersize_packets);
7255
7256         stats->rx_over_errors = old_stats->rx_over_errors +
7257                 get_stat64(&hw_stats->rxbds_empty);
7258         stats->rx_frame_errors = old_stats->rx_frame_errors +
7259                 get_stat64(&hw_stats->rx_align_errors);
7260         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7261                 get_stat64(&hw_stats->tx_discards);
7262         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7263                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7264
7265         stats->rx_crc_errors = old_stats->rx_crc_errors +
7266                 calc_crc_errors(tp);
7267
7268         stats->rx_missed_errors = old_stats->rx_missed_errors +
7269                 get_stat64(&hw_stats->rx_discards);
7270
7271         return stats;
7272 }
7273
7274 static inline u32 calc_crc(unsigned char *buf, int len)
7275 {
7276         u32 reg;
7277         u32 tmp;
7278         int j, k;
7279
7280         reg = 0xffffffff;
7281
7282         for (j = 0; j < len; j++) {
7283                 reg ^= buf[j];
7284
7285                 for (k = 0; k < 8; k++) {
7286                         tmp = reg & 0x01;
7287
7288                         reg >>= 1;
7289
7290                         if (tmp) {
7291                                 reg ^= 0xedb88320;
7292                         }
7293                 }
7294         }
7295
7296         return ~reg;
7297 }
7298
7299 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7300 {
7301         /* accept or reject all multicast frames */
7302         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7303         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7304         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7305         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7306 }
7307
7308 static void __tg3_set_rx_mode(struct net_device *dev)
7309 {
7310         struct tg3 *tp = netdev_priv(dev);
7311         u32 rx_mode;
7312
7313         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7314                                   RX_MODE_KEEP_VLAN_TAG);
7315
7316         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7317          * flag clear.
7318          */
7319 #if TG3_VLAN_TAG_USED
7320         if (!tp->vlgrp &&
7321             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7322                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7323 #else
7324         /* By definition, VLAN is disabled always in this
7325          * case.
7326          */
7327         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7328                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7329 #endif
7330
7331         if (dev->flags & IFF_PROMISC) {
7332                 /* Promiscuous mode. */
7333                 rx_mode |= RX_MODE_PROMISC;
7334         } else if (dev->flags & IFF_ALLMULTI) {
7335                 /* Accept all multicast. */
7336                 tg3_set_multi (tp, 1);
7337         } else if (dev->mc_count < 1) {
7338                 /* Reject all multicast. */
7339                 tg3_set_multi (tp, 0);
7340         } else {
7341                 /* Accept one or more multicast(s). */
7342                 struct dev_mc_list *mclist;
7343                 unsigned int i;
7344                 u32 mc_filter[4] = { 0, };
7345                 u32 regidx;
7346                 u32 bit;
7347                 u32 crc;
7348
7349                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7350                      i++, mclist = mclist->next) {
7351
7352                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7353                         bit = ~crc & 0x7f;
7354                         regidx = (bit & 0x60) >> 5;
7355                         bit &= 0x1f;
7356                         mc_filter[regidx] |= (1 << bit);
7357                 }
7358
7359                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7360                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7361                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7362                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7363         }
7364
7365         if (rx_mode != tp->rx_mode) {
7366                 tp->rx_mode = rx_mode;
7367                 tw32_f(MAC_RX_MODE, rx_mode);
7368                 udelay(10);
7369         }
7370 }
7371
7372 static void tg3_set_rx_mode(struct net_device *dev)
7373 {
7374         struct tg3 *tp = netdev_priv(dev);
7375
7376         if (!netif_running(dev))
7377                 return;
7378
7379         tg3_full_lock(tp, 0);
7380         __tg3_set_rx_mode(dev);
7381         tg3_full_unlock(tp);
7382 }
7383
7384 #define TG3_REGDUMP_LEN         (32 * 1024)
7385
7386 static int tg3_get_regs_len(struct net_device *dev)
7387 {
7388         return TG3_REGDUMP_LEN;
7389 }
7390
7391 static void tg3_get_regs(struct net_device *dev,
7392                 struct ethtool_regs *regs, void *_p)
7393 {
7394         u32 *p = _p;
7395         struct tg3 *tp = netdev_priv(dev);
7396         u8 *orig_p = _p;
7397         int i;
7398
7399         regs->version = 0;
7400
7401         memset(p, 0, TG3_REGDUMP_LEN);
7402
7403         if (tp->link_config.phy_is_low_power)
7404                 return;
7405
7406         tg3_full_lock(tp, 0);
7407
7408 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7409 #define GET_REG32_LOOP(base,len)                \
7410 do {    p = (u32 *)(orig_p + (base));           \
7411         for (i = 0; i < len; i += 4)            \
7412                 __GET_REG32((base) + i);        \
7413 } while (0)
7414 #define GET_REG32_1(reg)                        \
7415 do {    p = (u32 *)(orig_p + (reg));            \
7416         __GET_REG32((reg));                     \
7417 } while (0)
7418
7419         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7420         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7421         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7422         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7423         GET_REG32_1(SNDDATAC_MODE);
7424         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7425         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7426         GET_REG32_1(SNDBDC_MODE);
7427         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7428         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7429         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7430         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7431         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7432         GET_REG32_1(RCVDCC_MODE);
7433         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7434         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7435         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7436         GET_REG32_1(MBFREE_MODE);
7437         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7438         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7439         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7440         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7441         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7442         GET_REG32_1(RX_CPU_MODE);
7443         GET_REG32_1(RX_CPU_STATE);
7444         GET_REG32_1(RX_CPU_PGMCTR);
7445         GET_REG32_1(RX_CPU_HWBKPT);
7446         GET_REG32_1(TX_CPU_MODE);
7447         GET_REG32_1(TX_CPU_STATE);
7448         GET_REG32_1(TX_CPU_PGMCTR);
7449         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7450         GET_REG32_LOOP(FTQ_RESET, 0x120);
7451         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7452         GET_REG32_1(DMAC_MODE);
7453         GET_REG32_LOOP(GRC_MODE, 0x4c);
7454         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7455                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7456
7457 #undef __GET_REG32
7458 #undef GET_REG32_LOOP
7459 #undef GET_REG32_1
7460
7461         tg3_full_unlock(tp);
7462 }
7463
7464 static int tg3_get_eeprom_len(struct net_device *dev)
7465 {
7466         struct tg3 *tp = netdev_priv(dev);
7467
7468         return tp->nvram_size;
7469 }
7470
7471 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7472 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7473
7474 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7475 {
7476         struct tg3 *tp = netdev_priv(dev);
7477         int ret;
7478         u8  *pd;
7479         u32 i, offset, len, val, b_offset, b_count;
7480
7481         if (tp->link_config.phy_is_low_power)
7482                 return -EAGAIN;
7483
7484         offset = eeprom->offset;
7485         len = eeprom->len;
7486         eeprom->len = 0;
7487
7488         eeprom->magic = TG3_EEPROM_MAGIC;
7489
7490         if (offset & 3) {
7491                 /* adjustments to start on required 4 byte boundary */
7492                 b_offset = offset & 3;
7493                 b_count = 4 - b_offset;
7494                 if (b_count > len) {
7495                         /* i.e. offset=1 len=2 */
7496                         b_count = len;
7497                 }
7498                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7499                 if (ret)
7500                         return ret;
7501                 val = cpu_to_le32(val);
7502                 memcpy(data, ((char*)&val) + b_offset, b_count);
7503                 len -= b_count;
7504                 offset += b_count;
7505                 eeprom->len += b_count;
7506         }
7507
7508         /* read bytes upto the last 4 byte boundary */
7509         pd = &data[eeprom->len];
7510         for (i = 0; i < (len - (len & 3)); i += 4) {
7511                 ret = tg3_nvram_read(tp, offset + i, &val);
7512                 if (ret) {
7513                         eeprom->len += i;
7514                         return ret;
7515                 }
7516                 val = cpu_to_le32(val);
7517                 memcpy(pd + i, &val, 4);
7518         }
7519         eeprom->len += i;
7520
7521         if (len & 3) {
7522                 /* read last bytes not ending on 4 byte boundary */
7523                 pd = &data[eeprom->len];
7524                 b_count = len & 3;
7525                 b_offset = offset + len - b_count;
7526                 ret = tg3_nvram_read(tp, b_offset, &val);
7527                 if (ret)
7528                         return ret;
7529                 val = cpu_to_le32(val);
7530                 memcpy(pd, ((char*)&val), b_count);
7531                 eeprom->len += b_count;
7532         }
7533         return 0;
7534 }
7535
7536 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7537
7538 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7539 {
7540         struct tg3 *tp = netdev_priv(dev);
7541         int ret;
7542         u32 offset, len, b_offset, odd_len, start, end;
7543         u8 *buf;
7544
7545         if (tp->link_config.phy_is_low_power)
7546                 return -EAGAIN;
7547
7548         if (eeprom->magic != TG3_EEPROM_MAGIC)
7549                 return -EINVAL;
7550
7551         offset = eeprom->offset;
7552         len = eeprom->len;
7553
7554         if ((b_offset = (offset & 3))) {
7555                 /* adjustments to start on required 4 byte boundary */
7556                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7557                 if (ret)
7558                         return ret;
7559                 start = cpu_to_le32(start);
7560                 len += b_offset;
7561                 offset &= ~3;
7562                 if (len < 4)
7563                         len = 4;
7564         }
7565
7566         odd_len = 0;
7567         if (len & 3) {
7568                 /* adjustments to end on required 4 byte boundary */
7569                 odd_len = 1;
7570                 len = (len + 3) & ~3;
7571                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7572                 if (ret)
7573                         return ret;
7574                 end = cpu_to_le32(end);
7575         }
7576
7577         buf = data;
7578         if (b_offset || odd_len) {
7579                 buf = kmalloc(len, GFP_KERNEL);
7580                 if (buf == 0)
7581                         return -ENOMEM;
7582                 if (b_offset)
7583                         memcpy(buf, &start, 4);
7584                 if (odd_len)
7585                         memcpy(buf+len-4, &end, 4);
7586                 memcpy(buf + b_offset, data, eeprom->len);
7587         }
7588
7589         ret = tg3_nvram_write_block(tp, offset, len, buf);
7590
7591         if (buf != data)
7592                 kfree(buf);
7593
7594         return ret;
7595 }
7596
7597 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7598 {
7599         struct tg3 *tp = netdev_priv(dev);
7600   
7601         cmd->supported = (SUPPORTED_Autoneg);
7602
7603         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7604                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7605                                    SUPPORTED_1000baseT_Full);
7606
7607         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7608                 cmd->supported |= (SUPPORTED_100baseT_Half |
7609                                   SUPPORTED_100baseT_Full |
7610                                   SUPPORTED_10baseT_Half |
7611                                   SUPPORTED_10baseT_Full |
7612                                   SUPPORTED_MII);
7613         else
7614                 cmd->supported |= SUPPORTED_FIBRE;
7615   
7616         cmd->advertising = tp->link_config.advertising;
7617         if (netif_running(dev)) {
7618                 cmd->speed = tp->link_config.active_speed;
7619                 cmd->duplex = tp->link_config.active_duplex;
7620         }
7621         cmd->port = 0;
7622         cmd->phy_address = PHY_ADDR;
7623         cmd->transceiver = 0;
7624         cmd->autoneg = tp->link_config.autoneg;
7625         cmd->maxtxpkt = 0;
7626         cmd->maxrxpkt = 0;
7627         return 0;
7628 }
7629   
7630 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7631 {
7632         struct tg3 *tp = netdev_priv(dev);
7633   
7634         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7635                 /* These are the only valid advertisement bits allowed.  */
7636                 if (cmd->autoneg == AUTONEG_ENABLE &&
7637                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7638                                           ADVERTISED_1000baseT_Full |
7639                                           ADVERTISED_Autoneg |
7640                                           ADVERTISED_FIBRE)))
7641                         return -EINVAL;
7642                 /* Fiber can only do SPEED_1000.  */
7643                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7644                          (cmd->speed != SPEED_1000))
7645                         return -EINVAL;
7646         /* Copper cannot force SPEED_1000.  */
7647         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7648                    (cmd->speed == SPEED_1000))
7649                 return -EINVAL;
7650         else if ((cmd->speed == SPEED_1000) &&
7651                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7652                 return -EINVAL;
7653
7654         tg3_full_lock(tp, 0);
7655
7656         tp->link_config.autoneg = cmd->autoneg;
7657         if (cmd->autoneg == AUTONEG_ENABLE) {
7658                 tp->link_config.advertising = cmd->advertising;
7659                 tp->link_config.speed = SPEED_INVALID;
7660                 tp->link_config.duplex = DUPLEX_INVALID;
7661         } else {
7662                 tp->link_config.advertising = 0;
7663                 tp->link_config.speed = cmd->speed;
7664                 tp->link_config.duplex = cmd->duplex;
7665         }
7666   
7667         if (netif_running(dev))
7668                 tg3_setup_phy(tp, 1);
7669
7670         tg3_full_unlock(tp);
7671   
7672         return 0;
7673 }
7674   
7675 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7676 {
7677         struct tg3 *tp = netdev_priv(dev);
7678   
7679         strcpy(info->driver, DRV_MODULE_NAME);
7680         strcpy(info->version, DRV_MODULE_VERSION);
7681         strcpy(info->fw_version, tp->fw_ver);
7682         strcpy(info->bus_info, pci_name(tp->pdev));
7683 }
7684   
7685 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7686 {
7687         struct tg3 *tp = netdev_priv(dev);
7688   
7689         wol->supported = WAKE_MAGIC;
7690         wol->wolopts = 0;
7691         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7692                 wol->wolopts = WAKE_MAGIC;
7693         memset(&wol->sopass, 0, sizeof(wol->sopass));
7694 }
7695   
7696 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7697 {
7698         struct tg3 *tp = netdev_priv(dev);
7699   
7700         if (wol->wolopts & ~WAKE_MAGIC)
7701                 return -EINVAL;
7702         if ((wol->wolopts & WAKE_MAGIC) &&
7703             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7704             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7705                 return -EINVAL;
7706   
7707         spin_lock_bh(&tp->lock);
7708         if (wol->wolopts & WAKE_MAGIC)
7709                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7710         else
7711                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7712         spin_unlock_bh(&tp->lock);
7713   
7714         return 0;
7715 }
7716   
7717 static u32 tg3_get_msglevel(struct net_device *dev)
7718 {
7719         struct tg3 *tp = netdev_priv(dev);
7720         return tp->msg_enable;
7721 }
7722   
7723 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7724 {
7725         struct tg3 *tp = netdev_priv(dev);
7726         tp->msg_enable = value;
7727 }
7728   
7729 #if TG3_TSO_SUPPORT != 0
7730 static int tg3_set_tso(struct net_device *dev, u32 value)
7731 {
7732         struct tg3 *tp = netdev_priv(dev);
7733
7734         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7735                 if (value)
7736                         return -EINVAL;
7737                 return 0;
7738         }
7739         return ethtool_op_set_tso(dev, value);
7740 }
7741 #endif
7742   
7743 static int tg3_nway_reset(struct net_device *dev)
7744 {
7745         struct tg3 *tp = netdev_priv(dev);
7746         u32 bmcr;
7747         int r;
7748   
7749         if (!netif_running(dev))
7750                 return -EAGAIN;
7751
7752         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7753                 return -EINVAL;
7754
7755         spin_lock_bh(&tp->lock);
7756         r = -EINVAL;
7757         tg3_readphy(tp, MII_BMCR, &bmcr);
7758         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7759             ((bmcr & BMCR_ANENABLE) ||
7760              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7761                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7762                                            BMCR_ANENABLE);
7763                 r = 0;
7764         }
7765         spin_unlock_bh(&tp->lock);
7766   
7767         return r;
7768 }
7769   
7770 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7771 {
7772         struct tg3 *tp = netdev_priv(dev);
7773   
7774         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7775         ering->rx_mini_max_pending = 0;
7776         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7777                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7778         else
7779                 ering->rx_jumbo_max_pending = 0;
7780
7781         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7782
7783         ering->rx_pending = tp->rx_pending;
7784         ering->rx_mini_pending = 0;
7785         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7786                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7787         else
7788                 ering->rx_jumbo_pending = 0;
7789
7790         ering->tx_pending = tp->tx_pending;
7791 }
7792   
7793 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7794 {
7795         struct tg3 *tp = netdev_priv(dev);
7796         int irq_sync = 0;
7797   
7798         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7799             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7800             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7801                 return -EINVAL;
7802   
7803         if (netif_running(dev)) {
7804                 tg3_netif_stop(tp);
7805                 irq_sync = 1;
7806         }
7807
7808         tg3_full_lock(tp, irq_sync);
7809   
7810         tp->rx_pending = ering->rx_pending;
7811
7812         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7813             tp->rx_pending > 63)
7814                 tp->rx_pending = 63;
7815         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7816         tp->tx_pending = ering->tx_pending;
7817
7818         if (netif_running(dev)) {
7819                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7820                 tg3_init_hw(tp);
7821                 tg3_netif_start(tp);
7822         }
7823
7824         tg3_full_unlock(tp);
7825   
7826         return 0;
7827 }
7828   
7829 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7830 {
7831         struct tg3 *tp = netdev_priv(dev);
7832   
7833         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7834         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7835         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7836 }
7837   
7838 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7839 {
7840         struct tg3 *tp = netdev_priv(dev);
7841         int irq_sync = 0;
7842   
7843         if (netif_running(dev)) {
7844                 tg3_netif_stop(tp);
7845                 irq_sync = 1;
7846         }
7847
7848         tg3_full_lock(tp, irq_sync);
7849
7850         if (epause->autoneg)
7851                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7852         else
7853                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7854         if (epause->rx_pause)
7855                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7856         else
7857                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7858         if (epause->tx_pause)
7859                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7860         else
7861                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7862
7863         if (netif_running(dev)) {
7864                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7865                 tg3_init_hw(tp);
7866                 tg3_netif_start(tp);
7867         }
7868
7869         tg3_full_unlock(tp);
7870   
7871         return 0;
7872 }
7873   
7874 static u32 tg3_get_rx_csum(struct net_device *dev)
7875 {
7876         struct tg3 *tp = netdev_priv(dev);
7877         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7878 }
7879   
7880 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7881 {
7882         struct tg3 *tp = netdev_priv(dev);
7883   
7884         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7885                 if (data != 0)
7886                         return -EINVAL;
7887                 return 0;
7888         }
7889   
7890         spin_lock_bh(&tp->lock);
7891         if (data)
7892                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7893         else
7894                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7895         spin_unlock_bh(&tp->lock);
7896   
7897         return 0;
7898 }
7899   
7900 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7901 {
7902         struct tg3 *tp = netdev_priv(dev);
7903   
7904         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7905                 if (data != 0)
7906                         return -EINVAL;
7907                 return 0;
7908         }
7909   
7910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7911                 ethtool_op_set_tx_hw_csum(dev, data);
7912         else
7913                 ethtool_op_set_tx_csum(dev, data);
7914
7915         return 0;
7916 }
7917
7918 static int tg3_get_stats_count (struct net_device *dev)
7919 {
7920         return TG3_NUM_STATS;
7921 }
7922
7923 static int tg3_get_test_count (struct net_device *dev)
7924 {
7925         return TG3_NUM_TEST;
7926 }
7927
7928 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7929 {
7930         switch (stringset) {
7931         case ETH_SS_STATS:
7932                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7933                 break;
7934         case ETH_SS_TEST:
7935                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7936                 break;
7937         default:
7938                 WARN_ON(1);     /* we need a WARN() */
7939                 break;
7940         }
7941 }
7942
7943 static int tg3_phys_id(struct net_device *dev, u32 data)
7944 {
7945         struct tg3 *tp = netdev_priv(dev);
7946         int i;
7947
7948         if (!netif_running(tp->dev))
7949                 return -EAGAIN;
7950
7951         if (data == 0)
7952                 data = 2;
7953
7954         for (i = 0; i < (data * 2); i++) {
7955                 if ((i % 2) == 0)
7956                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7957                                            LED_CTRL_1000MBPS_ON |
7958                                            LED_CTRL_100MBPS_ON |
7959                                            LED_CTRL_10MBPS_ON |
7960                                            LED_CTRL_TRAFFIC_OVERRIDE |
7961                                            LED_CTRL_TRAFFIC_BLINK |
7962                                            LED_CTRL_TRAFFIC_LED);
7963         
7964                 else
7965                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7966                                            LED_CTRL_TRAFFIC_OVERRIDE);
7967
7968                 if (msleep_interruptible(500))
7969                         break;
7970         }
7971         tw32(MAC_LED_CTRL, tp->led_ctrl);
7972         return 0;
7973 }
7974
7975 static void tg3_get_ethtool_stats (struct net_device *dev,
7976                                    struct ethtool_stats *estats, u64 *tmp_stats)
7977 {
7978         struct tg3 *tp = netdev_priv(dev);
7979         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7980 }
7981
7982 #define NVRAM_TEST_SIZE 0x100
7983 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7984
7985 static int tg3_test_nvram(struct tg3 *tp)
7986 {
7987         u32 *buf, csum, magic;
7988         int i, j, err = 0, size;
7989
7990         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
7991                 return -EIO;
7992
7993         if (magic == TG3_EEPROM_MAGIC)
7994                 size = NVRAM_TEST_SIZE;
7995         else if ((magic & 0xff000000) == 0xa5000000) {
7996                 if ((magic & 0xe00000) == 0x200000)
7997                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
7998                 else
7999                         return 0;
8000         } else
8001                 return -EIO;
8002
8003         buf = kmalloc(size, GFP_KERNEL);
8004         if (buf == NULL)
8005                 return -ENOMEM;
8006
8007         err = -EIO;
8008         for (i = 0, j = 0; i < size; i += 4, j++) {
8009                 u32 val;
8010
8011                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8012                         break;
8013                 buf[j] = cpu_to_le32(val);
8014         }
8015         if (i < size)
8016                 goto out;
8017
8018         /* Selfboot format */
8019         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8020                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8021
8022                 for (i = 0; i < size; i++)
8023                         csum8 += buf8[i];
8024
8025                 if (csum8 == 0)
8026                         return 0;
8027                 return -EIO;
8028         }
8029
8030         /* Bootstrap checksum at offset 0x10 */
8031         csum = calc_crc((unsigned char *) buf, 0x10);
8032         if(csum != cpu_to_le32(buf[0x10/4]))
8033                 goto out;
8034
8035         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8036         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8037         if (csum != cpu_to_le32(buf[0xfc/4]))
8038                  goto out;
8039
8040         err = 0;
8041
8042 out:
8043         kfree(buf);
8044         return err;
8045 }
8046
8047 #define TG3_SERDES_TIMEOUT_SEC  2
8048 #define TG3_COPPER_TIMEOUT_SEC  6
8049
8050 static int tg3_test_link(struct tg3 *tp)
8051 {
8052         int i, max;
8053
8054         if (!netif_running(tp->dev))
8055                 return -ENODEV;
8056
8057         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8058                 max = TG3_SERDES_TIMEOUT_SEC;
8059         else
8060                 max = TG3_COPPER_TIMEOUT_SEC;
8061
8062         for (i = 0; i < max; i++) {
8063                 if (netif_carrier_ok(tp->dev))
8064                         return 0;
8065
8066                 if (msleep_interruptible(1000))
8067                         break;
8068         }
8069
8070         return -EIO;
8071 }
8072
8073 /* Only test the commonly used registers */
8074 static int tg3_test_registers(struct tg3 *tp)
8075 {
8076         int i, is_5705;
8077         u32 offset, read_mask, write_mask, val, save_val, read_val;
8078         static struct {
8079                 u16 offset;
8080                 u16 flags;
8081 #define TG3_FL_5705     0x1
8082 #define TG3_FL_NOT_5705 0x2
8083 #define TG3_FL_NOT_5788 0x4
8084                 u32 read_mask;
8085                 u32 write_mask;
8086         } reg_tbl[] = {
8087                 /* MAC Control Registers */
8088                 { MAC_MODE, TG3_FL_NOT_5705,
8089                         0x00000000, 0x00ef6f8c },
8090                 { MAC_MODE, TG3_FL_5705,
8091                         0x00000000, 0x01ef6b8c },
8092                 { MAC_STATUS, TG3_FL_NOT_5705,
8093                         0x03800107, 0x00000000 },
8094                 { MAC_STATUS, TG3_FL_5705,
8095                         0x03800100, 0x00000000 },
8096                 { MAC_ADDR_0_HIGH, 0x0000,
8097                         0x00000000, 0x0000ffff },
8098                 { MAC_ADDR_0_LOW, 0x0000,
8099                         0x00000000, 0xffffffff },
8100                 { MAC_RX_MTU_SIZE, 0x0000,
8101                         0x00000000, 0x0000ffff },
8102                 { MAC_TX_MODE, 0x0000,
8103                         0x00000000, 0x00000070 },
8104                 { MAC_TX_LENGTHS, 0x0000,
8105                         0x00000000, 0x00003fff },
8106                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8107                         0x00000000, 0x000007fc },
8108                 { MAC_RX_MODE, TG3_FL_5705,
8109                         0x00000000, 0x000007dc },
8110                 { MAC_HASH_REG_0, 0x0000,
8111                         0x00000000, 0xffffffff },
8112                 { MAC_HASH_REG_1, 0x0000,
8113                         0x00000000, 0xffffffff },
8114                 { MAC_HASH_REG_2, 0x0000,
8115                         0x00000000, 0xffffffff },
8116                 { MAC_HASH_REG_3, 0x0000,
8117                         0x00000000, 0xffffffff },
8118
8119                 /* Receive Data and Receive BD Initiator Control Registers. */
8120                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8121                         0x00000000, 0xffffffff },
8122                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8123                         0x00000000, 0xffffffff },
8124                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8125                         0x00000000, 0x00000003 },
8126                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8127                         0x00000000, 0xffffffff },
8128                 { RCVDBDI_STD_BD+0, 0x0000,
8129                         0x00000000, 0xffffffff },
8130                 { RCVDBDI_STD_BD+4, 0x0000,
8131                         0x00000000, 0xffffffff },
8132                 { RCVDBDI_STD_BD+8, 0x0000,
8133                         0x00000000, 0xffff0002 },
8134                 { RCVDBDI_STD_BD+0xc, 0x0000,
8135                         0x00000000, 0xffffffff },
8136         
8137                 /* Receive BD Initiator Control Registers. */
8138                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8139                         0x00000000, 0xffffffff },
8140                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8141                         0x00000000, 0x000003ff },
8142                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8143                         0x00000000, 0xffffffff },
8144         
8145                 /* Host Coalescing Control Registers. */
8146                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8147                         0x00000000, 0x00000004 },
8148                 { HOSTCC_MODE, TG3_FL_5705,
8149                         0x00000000, 0x000000f6 },
8150                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8151                         0x00000000, 0xffffffff },
8152                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8153                         0x00000000, 0x000003ff },
8154                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8155                         0x00000000, 0xffffffff },
8156                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8157                         0x00000000, 0x000003ff },
8158                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8159                         0x00000000, 0xffffffff },
8160                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8161                         0x00000000, 0x000000ff },
8162                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8163                         0x00000000, 0xffffffff },
8164                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8165                         0x00000000, 0x000000ff },
8166                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8167                         0x00000000, 0xffffffff },
8168                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8169                         0x00000000, 0xffffffff },
8170                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8171                         0x00000000, 0xffffffff },
8172                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8173                         0x00000000, 0x000000ff },
8174                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8177                         0x00000000, 0x000000ff },
8178                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8179                         0x00000000, 0xffffffff },
8180                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8181                         0x00000000, 0xffffffff },
8182                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8183                         0x00000000, 0xffffffff },
8184                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8185                         0x00000000, 0xffffffff },
8186                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8187                         0x00000000, 0xffffffff },
8188                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8189                         0xffffffff, 0x00000000 },
8190                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8191                         0xffffffff, 0x00000000 },
8192
8193                 /* Buffer Manager Control Registers. */
8194                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8195                         0x00000000, 0x007fff80 },
8196                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8197                         0x00000000, 0x007fffff },
8198                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8199                         0x00000000, 0x0000003f },
8200                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8201                         0x00000000, 0x000001ff },
8202                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8203                         0x00000000, 0x000001ff },
8204                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8205                         0xffffffff, 0x00000000 },
8206                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8207                         0xffffffff, 0x00000000 },
8208         
8209                 /* Mailbox Registers */
8210                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8211                         0x00000000, 0x000001ff },
8212                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8213                         0x00000000, 0x000001ff },
8214                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8215                         0x00000000, 0x000007ff },
8216                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8217                         0x00000000, 0x000001ff },
8218
8219                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8220         };
8221
8222         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8223                 is_5705 = 1;
8224         else
8225                 is_5705 = 0;
8226
8227         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8228                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8229                         continue;
8230
8231                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8232                         continue;
8233
8234                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8235                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8236                         continue;
8237
8238                 offset = (u32) reg_tbl[i].offset;
8239                 read_mask = reg_tbl[i].read_mask;
8240                 write_mask = reg_tbl[i].write_mask;
8241
8242                 /* Save the original register content */
8243                 save_val = tr32(offset);
8244
8245                 /* Determine the read-only value. */
8246                 read_val = save_val & read_mask;
8247
8248                 /* Write zero to the register, then make sure the read-only bits
8249                  * are not changed and the read/write bits are all zeros.
8250                  */
8251                 tw32(offset, 0);
8252
8253                 val = tr32(offset);
8254
8255                 /* Test the read-only and read/write bits. */
8256                 if (((val & read_mask) != read_val) || (val & write_mask))
8257                         goto out;
8258
8259                 /* Write ones to all the bits defined by RdMask and WrMask, then
8260                  * make sure the read-only bits are not changed and the
8261                  * read/write bits are all ones.
8262                  */
8263                 tw32(offset, read_mask | write_mask);
8264
8265                 val = tr32(offset);
8266
8267                 /* Test the read-only bits. */
8268                 if ((val & read_mask) != read_val)
8269                         goto out;
8270
8271                 /* Test the read/write bits. */
8272                 if ((val & write_mask) != write_mask)
8273                         goto out;
8274
8275                 tw32(offset, save_val);
8276         }
8277
8278         return 0;
8279
8280 out:
8281         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8282         tw32(offset, save_val);
8283         return -EIO;
8284 }
8285
8286 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8287 {
8288         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8289         int i;
8290         u32 j;
8291
8292         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8293                 for (j = 0; j < len; j += 4) {
8294                         u32 val;
8295
8296                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8297                         tg3_read_mem(tp, offset + j, &val);
8298                         if (val != test_pattern[i])
8299                                 return -EIO;
8300                 }
8301         }
8302         return 0;
8303 }
8304
8305 static int tg3_test_memory(struct tg3 *tp)
8306 {
8307         static struct mem_entry {
8308                 u32 offset;
8309                 u32 len;
8310         } mem_tbl_570x[] = {
8311                 { 0x00000000, 0x00b50},
8312                 { 0x00002000, 0x1c000},
8313                 { 0xffffffff, 0x00000}
8314         }, mem_tbl_5705[] = {
8315                 { 0x00000100, 0x0000c},
8316                 { 0x00000200, 0x00008},
8317                 { 0x00004000, 0x00800},
8318                 { 0x00006000, 0x01000},
8319                 { 0x00008000, 0x02000},
8320                 { 0x00010000, 0x0e000},
8321                 { 0xffffffff, 0x00000}
8322         }, mem_tbl_5755[] = {
8323                 { 0x00000200, 0x00008},
8324                 { 0x00004000, 0x00800},
8325                 { 0x00006000, 0x00800},
8326                 { 0x00008000, 0x02000},
8327                 { 0x00010000, 0x0c000},
8328                 { 0xffffffff, 0x00000}
8329         };
8330         struct mem_entry *mem_tbl;
8331         int err = 0;
8332         int i;
8333
8334         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8335                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8336                         mem_tbl = mem_tbl_5755;
8337                 else
8338                         mem_tbl = mem_tbl_5705;
8339         } else
8340                 mem_tbl = mem_tbl_570x;
8341
8342         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8343                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8344                     mem_tbl[i].len)) != 0)
8345                         break;
8346         }
8347         
8348         return err;
8349 }
8350
8351 #define TG3_MAC_LOOPBACK        0
8352 #define TG3_PHY_LOOPBACK        1
8353
8354 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8355 {
8356         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8357         u32 desc_idx;
8358         struct sk_buff *skb, *rx_skb;
8359         u8 *tx_data;
8360         dma_addr_t map;
8361         int num_pkts, tx_len, rx_len, i, err;
8362         struct tg3_rx_buffer_desc *desc;
8363
8364         if (loopback_mode == TG3_MAC_LOOPBACK) {
8365                 /* HW errata - mac loopback fails in some cases on 5780.
8366                  * Normal traffic and PHY loopback are not affected by
8367                  * errata.
8368                  */
8369                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8370                         return 0;
8371
8372                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8373                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8374                            MAC_MODE_PORT_MODE_GMII;
8375                 tw32(MAC_MODE, mac_mode);
8376         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8377                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8378                                            BMCR_SPEED1000);
8379                 udelay(40);
8380                 /* reset to prevent losing 1st rx packet intermittently */
8381                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8382                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8383                         udelay(10);
8384                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8385                 }
8386                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8387                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8388                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8389                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8390                 tw32(MAC_MODE, mac_mode);
8391         }
8392         else
8393                 return -EINVAL;
8394
8395         err = -EIO;
8396
8397         tx_len = 1514;
8398         skb = dev_alloc_skb(tx_len);
8399         tx_data = skb_put(skb, tx_len);
8400         memcpy(tx_data, tp->dev->dev_addr, 6);
8401         memset(tx_data + 6, 0x0, 8);
8402
8403         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8404
8405         for (i = 14; i < tx_len; i++)
8406                 tx_data[i] = (u8) (i & 0xff);
8407
8408         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8409
8410         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8411              HOSTCC_MODE_NOW);
8412
8413         udelay(10);
8414
8415         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8416
8417         num_pkts = 0;
8418
8419         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8420
8421         tp->tx_prod++;
8422         num_pkts++;
8423
8424         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8425                      tp->tx_prod);
8426         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8427
8428         udelay(10);
8429
8430         for (i = 0; i < 10; i++) {
8431                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8432                        HOSTCC_MODE_NOW);
8433
8434                 udelay(10);
8435
8436                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8437                 rx_idx = tp->hw_status->idx[0].rx_producer;
8438                 if ((tx_idx == tp->tx_prod) &&
8439                     (rx_idx == (rx_start_idx + num_pkts)))
8440                         break;
8441         }
8442
8443         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8444         dev_kfree_skb(skb);
8445
8446         if (tx_idx != tp->tx_prod)
8447                 goto out;
8448
8449         if (rx_idx != rx_start_idx + num_pkts)
8450                 goto out;
8451
8452         desc = &tp->rx_rcb[rx_start_idx];
8453         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8454         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8455         if (opaque_key != RXD_OPAQUE_RING_STD)
8456                 goto out;
8457
8458         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8459             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8460                 goto out;
8461
8462         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8463         if (rx_len != tx_len)
8464                 goto out;
8465
8466         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8467
8468         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8469         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8470
8471         for (i = 14; i < tx_len; i++) {
8472                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8473                         goto out;
8474         }
8475         err = 0;
8476         
8477         /* tg3_free_rings will unmap and free the rx_skb */
8478 out:
8479         return err;
8480 }
8481
8482 #define TG3_MAC_LOOPBACK_FAILED         1
8483 #define TG3_PHY_LOOPBACK_FAILED         2
8484 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8485                                          TG3_PHY_LOOPBACK_FAILED)
8486
8487 static int tg3_test_loopback(struct tg3 *tp)
8488 {
8489         int err = 0;
8490
8491         if (!netif_running(tp->dev))
8492                 return TG3_LOOPBACK_FAILED;
8493
8494         tg3_reset_hw(tp);
8495
8496         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8497                 err |= TG3_MAC_LOOPBACK_FAILED;
8498         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8499                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8500                         err |= TG3_PHY_LOOPBACK_FAILED;
8501         }
8502
8503         return err;
8504 }
8505
8506 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8507                           u64 *data)
8508 {
8509         struct tg3 *tp = netdev_priv(dev);
8510
8511         if (tp->link_config.phy_is_low_power)
8512                 tg3_set_power_state(tp, PCI_D0);
8513
8514         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8515
8516         if (tg3_test_nvram(tp) != 0) {
8517                 etest->flags |= ETH_TEST_FL_FAILED;
8518                 data[0] = 1;
8519         }
8520         if (tg3_test_link(tp) != 0) {
8521                 etest->flags |= ETH_TEST_FL_FAILED;
8522                 data[1] = 1;
8523         }
8524         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8525                 int err, irq_sync = 0;
8526
8527                 if (netif_running(dev)) {
8528                         tg3_netif_stop(tp);
8529                         irq_sync = 1;
8530                 }
8531
8532                 tg3_full_lock(tp, irq_sync);
8533
8534                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8535                 err = tg3_nvram_lock(tp);
8536                 tg3_halt_cpu(tp, RX_CPU_BASE);
8537                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8538                         tg3_halt_cpu(tp, TX_CPU_BASE);
8539                 if (!err)
8540                         tg3_nvram_unlock(tp);
8541
8542                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8543                         tg3_phy_reset(tp);
8544
8545                 if (tg3_test_registers(tp) != 0) {
8546                         etest->flags |= ETH_TEST_FL_FAILED;
8547                         data[2] = 1;
8548                 }
8549                 if (tg3_test_memory(tp) != 0) {
8550                         etest->flags |= ETH_TEST_FL_FAILED;
8551                         data[3] = 1;
8552                 }
8553                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8554                         etest->flags |= ETH_TEST_FL_FAILED;
8555
8556                 tg3_full_unlock(tp);
8557
8558                 if (tg3_test_interrupt(tp) != 0) {
8559                         etest->flags |= ETH_TEST_FL_FAILED;
8560                         data[5] = 1;
8561                 }
8562
8563                 tg3_full_lock(tp, 0);
8564
8565                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8566                 if (netif_running(dev)) {
8567                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8568                         tg3_init_hw(tp);
8569                         tg3_netif_start(tp);
8570                 }
8571
8572                 tg3_full_unlock(tp);
8573         }
8574         if (tp->link_config.phy_is_low_power)
8575                 tg3_set_power_state(tp, PCI_D3hot);
8576
8577 }
8578
8579 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8580 {
8581         struct mii_ioctl_data *data = if_mii(ifr);
8582         struct tg3 *tp = netdev_priv(dev);
8583         int err;
8584
8585         switch(cmd) {
8586         case SIOCGMIIPHY:
8587                 data->phy_id = PHY_ADDR;
8588
8589                 /* fallthru */
8590         case SIOCGMIIREG: {
8591                 u32 mii_regval;
8592
8593                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8594                         break;                  /* We have no PHY */
8595
8596                 if (tp->link_config.phy_is_low_power)
8597                         return -EAGAIN;
8598
8599                 spin_lock_bh(&tp->lock);
8600                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8601                 spin_unlock_bh(&tp->lock);
8602
8603                 data->val_out = mii_regval;
8604
8605                 return err;
8606         }
8607
8608         case SIOCSMIIREG:
8609                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8610                         break;                  /* We have no PHY */
8611
8612                 if (!capable(CAP_NET_ADMIN))
8613                         return -EPERM;
8614
8615                 if (tp->link_config.phy_is_low_power)
8616                         return -EAGAIN;
8617
8618                 spin_lock_bh(&tp->lock);
8619                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8620                 spin_unlock_bh(&tp->lock);
8621
8622                 return err;
8623
8624         default:
8625                 /* do nothing */
8626                 break;
8627         }
8628         return -EOPNOTSUPP;
8629 }
8630
8631 #if TG3_VLAN_TAG_USED
8632 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8633 {
8634         struct tg3 *tp = netdev_priv(dev);
8635
8636         tg3_full_lock(tp, 0);
8637
8638         tp->vlgrp = grp;
8639
8640         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8641         __tg3_set_rx_mode(dev);
8642
8643         tg3_full_unlock(tp);
8644 }
8645
8646 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8647 {
8648         struct tg3 *tp = netdev_priv(dev);
8649
8650         tg3_full_lock(tp, 0);
8651         if (tp->vlgrp)
8652                 tp->vlgrp->vlan_devices[vid] = NULL;
8653         tg3_full_unlock(tp);
8654 }
8655 #endif
8656
8657 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8658 {
8659         struct tg3 *tp = netdev_priv(dev);
8660
8661         memcpy(ec, &tp->coal, sizeof(*ec));
8662         return 0;
8663 }
8664
8665 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8666 {
8667         struct tg3 *tp = netdev_priv(dev);
8668         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8669         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8670
8671         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8672                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8673                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8674                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8675                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8676         }
8677
8678         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8679             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8680             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8681             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8682             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8683             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8684             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8685             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8686             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8687             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8688                 return -EINVAL;
8689
8690         /* No rx interrupts will be generated if both are zero */
8691         if ((ec->rx_coalesce_usecs == 0) &&
8692             (ec->rx_max_coalesced_frames == 0))
8693                 return -EINVAL;
8694
8695         /* No tx interrupts will be generated if both are zero */
8696         if ((ec->tx_coalesce_usecs == 0) &&
8697             (ec->tx_max_coalesced_frames == 0))
8698                 return -EINVAL;
8699
8700         /* Only copy relevant parameters, ignore all others. */
8701         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8702         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8703         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8704         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8705         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8706         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8707         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8708         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8709         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8710
8711         if (netif_running(dev)) {
8712                 tg3_full_lock(tp, 0);
8713                 __tg3_set_coalesce(tp, &tp->coal);
8714                 tg3_full_unlock(tp);
8715         }
8716         return 0;
8717 }
8718
8719 static struct ethtool_ops tg3_ethtool_ops = {
8720         .get_settings           = tg3_get_settings,
8721         .set_settings           = tg3_set_settings,
8722         .get_drvinfo            = tg3_get_drvinfo,
8723         .get_regs_len           = tg3_get_regs_len,
8724         .get_regs               = tg3_get_regs,
8725         .get_wol                = tg3_get_wol,
8726         .set_wol                = tg3_set_wol,
8727         .get_msglevel           = tg3_get_msglevel,
8728         .set_msglevel           = tg3_set_msglevel,
8729         .nway_reset             = tg3_nway_reset,
8730         .get_link               = ethtool_op_get_link,
8731         .get_eeprom_len         = tg3_get_eeprom_len,
8732         .get_eeprom             = tg3_get_eeprom,
8733         .set_eeprom             = tg3_set_eeprom,
8734         .get_ringparam          = tg3_get_ringparam,
8735         .set_ringparam          = tg3_set_ringparam,
8736         .get_pauseparam         = tg3_get_pauseparam,
8737         .set_pauseparam         = tg3_set_pauseparam,
8738         .get_rx_csum            = tg3_get_rx_csum,
8739         .set_rx_csum            = tg3_set_rx_csum,
8740         .get_tx_csum            = ethtool_op_get_tx_csum,
8741         .set_tx_csum            = tg3_set_tx_csum,
8742         .get_sg                 = ethtool_op_get_sg,
8743         .set_sg                 = ethtool_op_set_sg,
8744 #if TG3_TSO_SUPPORT != 0
8745         .get_tso                = ethtool_op_get_tso,
8746         .set_tso                = tg3_set_tso,
8747 #endif
8748         .self_test_count        = tg3_get_test_count,
8749         .self_test              = tg3_self_test,
8750         .get_strings            = tg3_get_strings,
8751         .phys_id                = tg3_phys_id,
8752         .get_stats_count        = tg3_get_stats_count,
8753         .get_ethtool_stats      = tg3_get_ethtool_stats,
8754         .get_coalesce           = tg3_get_coalesce,
8755         .set_coalesce           = tg3_set_coalesce,
8756         .get_perm_addr          = ethtool_op_get_perm_addr,
8757 };
8758
8759 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8760 {
8761         u32 cursize, val, magic;
8762
8763         tp->nvram_size = EEPROM_CHIP_SIZE;
8764
8765         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8766                 return;
8767
8768         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8769                 return;
8770
8771         /*
8772          * Size the chip by reading offsets at increasing powers of two.
8773          * When we encounter our validation signature, we know the addressing
8774          * has wrapped around, and thus have our chip size.
8775          */
8776         cursize = 0x10;
8777
8778         while (cursize < tp->nvram_size) {
8779                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8780                         return;
8781
8782                 if (val == magic)
8783                         break;
8784
8785                 cursize <<= 1;
8786         }
8787
8788         tp->nvram_size = cursize;
8789 }
8790                 
8791 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8792 {
8793         u32 val;
8794
8795         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8796                 return;
8797
8798         /* Selfboot format */
8799         if (val != TG3_EEPROM_MAGIC) {
8800                 tg3_get_eeprom_size(tp);
8801                 return;
8802         }
8803
8804         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8805                 if (val != 0) {
8806                         tp->nvram_size = (val >> 16) * 1024;
8807                         return;
8808                 }
8809         }
8810         tp->nvram_size = 0x20000;
8811 }
8812
8813 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8814 {
8815         u32 nvcfg1;
8816
8817         nvcfg1 = tr32(NVRAM_CFG1);
8818         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8819                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8820         }
8821         else {
8822                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8823                 tw32(NVRAM_CFG1, nvcfg1);
8824         }
8825
8826         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8827             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8828                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8829                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8830                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8831                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8832                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8833                                 break;
8834                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8835                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8836                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8837                                 break;
8838                         case FLASH_VENDOR_ATMEL_EEPROM:
8839                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8840                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8841                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8842                                 break;
8843                         case FLASH_VENDOR_ST:
8844                                 tp->nvram_jedecnum = JEDEC_ST;
8845                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8846                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8847                                 break;
8848                         case FLASH_VENDOR_SAIFUN:
8849                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8850                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8851                                 break;
8852                         case FLASH_VENDOR_SST_SMALL:
8853                         case FLASH_VENDOR_SST_LARGE:
8854                                 tp->nvram_jedecnum = JEDEC_SST;
8855                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8856                                 break;
8857                 }
8858         }
8859         else {
8860                 tp->nvram_jedecnum = JEDEC_ATMEL;
8861                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8862                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8863         }
8864 }
8865
8866 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8867 {
8868         u32 nvcfg1;
8869
8870         nvcfg1 = tr32(NVRAM_CFG1);
8871
8872         /* NVRAM protection for TPM */
8873         if (nvcfg1 & (1 << 27))
8874                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8875
8876         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8877                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8878                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8879                         tp->nvram_jedecnum = JEDEC_ATMEL;
8880                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8881                         break;
8882                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8883                         tp->nvram_jedecnum = JEDEC_ATMEL;
8884                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8885                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8886                         break;
8887                 case FLASH_5752VENDOR_ST_M45PE10:
8888                 case FLASH_5752VENDOR_ST_M45PE20:
8889                 case FLASH_5752VENDOR_ST_M45PE40:
8890                         tp->nvram_jedecnum = JEDEC_ST;
8891                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8892                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8893                         break;
8894         }
8895
8896         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8897                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8898                         case FLASH_5752PAGE_SIZE_256:
8899                                 tp->nvram_pagesize = 256;
8900                                 break;
8901                         case FLASH_5752PAGE_SIZE_512:
8902                                 tp->nvram_pagesize = 512;
8903                                 break;
8904                         case FLASH_5752PAGE_SIZE_1K:
8905                                 tp->nvram_pagesize = 1024;
8906                                 break;
8907                         case FLASH_5752PAGE_SIZE_2K:
8908                                 tp->nvram_pagesize = 2048;
8909                                 break;
8910                         case FLASH_5752PAGE_SIZE_4K:
8911                                 tp->nvram_pagesize = 4096;
8912                                 break;
8913                         case FLASH_5752PAGE_SIZE_264:
8914                                 tp->nvram_pagesize = 264;
8915                                 break;
8916                 }
8917         }
8918         else {
8919                 /* For eeprom, set pagesize to maximum eeprom size */
8920                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8921
8922                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8923                 tw32(NVRAM_CFG1, nvcfg1);
8924         }
8925 }
8926
8927 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8928 {
8929         u32 nvcfg1;
8930
8931         nvcfg1 = tr32(NVRAM_CFG1);
8932
8933         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8934                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8935                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8936                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8937                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8938                         tp->nvram_jedecnum = JEDEC_ATMEL;
8939                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8940                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8941
8942                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8943                         tw32(NVRAM_CFG1, nvcfg1);
8944                         break;
8945                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8946                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8947                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8948                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8949                         tp->nvram_jedecnum = JEDEC_ATMEL;
8950                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8951                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8952                         tp->nvram_pagesize = 264;
8953                         break;
8954                 case FLASH_5752VENDOR_ST_M45PE10:
8955                 case FLASH_5752VENDOR_ST_M45PE20:
8956                 case FLASH_5752VENDOR_ST_M45PE40:
8957                         tp->nvram_jedecnum = JEDEC_ST;
8958                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8959                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8960                         tp->nvram_pagesize = 256;
8961                         break;
8962         }
8963 }
8964
8965 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8966 static void __devinit tg3_nvram_init(struct tg3 *tp)
8967 {
8968         int j;
8969
8970         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8971                 return;
8972
8973         tw32_f(GRC_EEPROM_ADDR,
8974              (EEPROM_ADDR_FSM_RESET |
8975               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8976                EEPROM_ADDR_CLKPERD_SHIFT)));
8977
8978         /* XXX schedule_timeout() ... */
8979         for (j = 0; j < 100; j++)
8980                 udelay(10);
8981
8982         /* Enable seeprom accesses. */
8983         tw32_f(GRC_LOCAL_CTRL,
8984              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8985         udelay(100);
8986
8987         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8988             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8989                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8990
8991                 if (tg3_nvram_lock(tp)) {
8992                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8993                                "tg3_nvram_init failed.\n", tp->dev->name);
8994                         return;
8995                 }
8996                 tg3_enable_nvram_access(tp);
8997
8998                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8999                         tg3_get_5752_nvram_info(tp);
9000                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9001                         tg3_get_5787_nvram_info(tp);
9002                 else
9003                         tg3_get_nvram_info(tp);
9004
9005                 tg3_get_nvram_size(tp);
9006
9007                 tg3_disable_nvram_access(tp);
9008                 tg3_nvram_unlock(tp);
9009
9010         } else {
9011                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9012
9013                 tg3_get_eeprom_size(tp);
9014         }
9015 }
9016
9017 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9018                                         u32 offset, u32 *val)
9019 {
9020         u32 tmp;
9021         int i;
9022
9023         if (offset > EEPROM_ADDR_ADDR_MASK ||
9024             (offset % 4) != 0)
9025                 return -EINVAL;
9026
9027         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9028                                         EEPROM_ADDR_DEVID_MASK |
9029                                         EEPROM_ADDR_READ);
9030         tw32(GRC_EEPROM_ADDR,
9031              tmp |
9032              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9033              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9034               EEPROM_ADDR_ADDR_MASK) |
9035              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9036
9037         for (i = 0; i < 10000; i++) {
9038                 tmp = tr32(GRC_EEPROM_ADDR);
9039
9040                 if (tmp & EEPROM_ADDR_COMPLETE)
9041                         break;
9042                 udelay(100);
9043         }
9044         if (!(tmp & EEPROM_ADDR_COMPLETE))
9045                 return -EBUSY;
9046
9047         *val = tr32(GRC_EEPROM_DATA);
9048         return 0;
9049 }
9050
9051 #define NVRAM_CMD_TIMEOUT 10000
9052
9053 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9054 {
9055         int i;
9056
9057         tw32(NVRAM_CMD, nvram_cmd);
9058         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9059                 udelay(10);
9060                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9061                         udelay(10);
9062                         break;
9063                 }
9064         }
9065         if (i == NVRAM_CMD_TIMEOUT) {
9066                 return -EBUSY;
9067         }
9068         return 0;
9069 }
9070
9071 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9072 {
9073         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9074             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9075             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9076             (tp->nvram_jedecnum == JEDEC_ATMEL))
9077
9078                 addr = ((addr / tp->nvram_pagesize) <<
9079                         ATMEL_AT45DB0X1B_PAGE_POS) +
9080                        (addr % tp->nvram_pagesize);
9081
9082         return addr;
9083 }
9084
9085 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9086 {
9087         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9088             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9089             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9090             (tp->nvram_jedecnum == JEDEC_ATMEL))
9091
9092                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9093                         tp->nvram_pagesize) +
9094                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9095
9096         return addr;
9097 }
9098
9099 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9100 {
9101         int ret;
9102
9103         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9104                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9105                 return -EINVAL;
9106         }
9107
9108         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9109                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9110
9111         offset = tg3_nvram_phys_addr(tp, offset);
9112
9113         if (offset > NVRAM_ADDR_MSK)
9114                 return -EINVAL;
9115
9116         ret = tg3_nvram_lock(tp);
9117         if (ret)
9118                 return ret;
9119
9120         tg3_enable_nvram_access(tp);
9121
9122         tw32(NVRAM_ADDR, offset);
9123         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9124                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9125
9126         if (ret == 0)
9127                 *val = swab32(tr32(NVRAM_RDDATA));
9128
9129         tg3_disable_nvram_access(tp);
9130
9131         tg3_nvram_unlock(tp);
9132
9133         return ret;
9134 }
9135
9136 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9137 {
9138         int err;
9139         u32 tmp;
9140
9141         err = tg3_nvram_read(tp, offset, &tmp);
9142         *val = swab32(tmp);
9143         return err;
9144 }
9145
9146 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9147                                     u32 offset, u32 len, u8 *buf)
9148 {
9149         int i, j, rc = 0;
9150         u32 val;
9151
9152         for (i = 0; i < len; i += 4) {
9153                 u32 addr, data;
9154
9155                 addr = offset + i;
9156
9157                 memcpy(&data, buf + i, 4);
9158
9159                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9160
9161                 val = tr32(GRC_EEPROM_ADDR);
9162                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9163
9164                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9165                         EEPROM_ADDR_READ);
9166                 tw32(GRC_EEPROM_ADDR, val |
9167                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9168                         (addr & EEPROM_ADDR_ADDR_MASK) |
9169                         EEPROM_ADDR_START |
9170                         EEPROM_ADDR_WRITE);
9171                 
9172                 for (j = 0; j < 10000; j++) {
9173                         val = tr32(GRC_EEPROM_ADDR);
9174
9175                         if (val & EEPROM_ADDR_COMPLETE)
9176                                 break;
9177                         udelay(100);
9178                 }
9179                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9180                         rc = -EBUSY;
9181                         break;
9182                 }
9183         }
9184
9185         return rc;
9186 }
9187
9188 /* offset and length are dword aligned */
9189 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9190                 u8 *buf)
9191 {
9192         int ret = 0;
9193         u32 pagesize = tp->nvram_pagesize;
9194         u32 pagemask = pagesize - 1;
9195         u32 nvram_cmd;
9196         u8 *tmp;
9197
9198         tmp = kmalloc(pagesize, GFP_KERNEL);
9199         if (tmp == NULL)
9200                 return -ENOMEM;
9201
9202         while (len) {
9203                 int j;
9204                 u32 phy_addr, page_off, size;
9205
9206                 phy_addr = offset & ~pagemask;
9207         
9208                 for (j = 0; j < pagesize; j += 4) {
9209                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9210                                                 (u32 *) (tmp + j))))
9211                                 break;
9212                 }
9213                 if (ret)
9214                         break;
9215
9216                 page_off = offset & pagemask;
9217                 size = pagesize;
9218                 if (len < size)
9219                         size = len;
9220
9221                 len -= size;
9222
9223                 memcpy(tmp + page_off, buf, size);
9224
9225                 offset = offset + (pagesize - page_off);
9226
9227                 tg3_enable_nvram_access(tp);
9228
9229                 /*
9230                  * Before we can erase the flash page, we need
9231                  * to issue a special "write enable" command.
9232                  */
9233                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9234
9235                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9236                         break;
9237
9238                 /* Erase the target page */
9239                 tw32(NVRAM_ADDR, phy_addr);
9240
9241                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9242                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9243
9244                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9245                         break;
9246
9247                 /* Issue another write enable to start the write. */
9248                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9249
9250                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9251                         break;
9252
9253                 for (j = 0; j < pagesize; j += 4) {
9254                         u32 data;
9255
9256                         data = *((u32 *) (tmp + j));
9257                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9258
9259                         tw32(NVRAM_ADDR, phy_addr + j);
9260
9261                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9262                                 NVRAM_CMD_WR;
9263
9264                         if (j == 0)
9265                                 nvram_cmd |= NVRAM_CMD_FIRST;
9266                         else if (j == (pagesize - 4))
9267                                 nvram_cmd |= NVRAM_CMD_LAST;
9268
9269                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9270                                 break;
9271                 }
9272                 if (ret)
9273                         break;
9274         }
9275
9276         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9277         tg3_nvram_exec_cmd(tp, nvram_cmd);
9278
9279         kfree(tmp);
9280
9281         return ret;
9282 }
9283
9284 /* offset and length are dword aligned */
9285 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9286                 u8 *buf)
9287 {
9288         int i, ret = 0;
9289
9290         for (i = 0; i < len; i += 4, offset += 4) {
9291                 u32 data, page_off, phy_addr, nvram_cmd;
9292
9293                 memcpy(&data, buf + i, 4);
9294                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9295
9296                 page_off = offset % tp->nvram_pagesize;
9297
9298                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9299
9300                 tw32(NVRAM_ADDR, phy_addr);
9301
9302                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9303
9304                 if ((page_off == 0) || (i == 0))
9305                         nvram_cmd |= NVRAM_CMD_FIRST;
9306                 else if (page_off == (tp->nvram_pagesize - 4))
9307                         nvram_cmd |= NVRAM_CMD_LAST;
9308
9309                 if (i == (len - 4))
9310                         nvram_cmd |= NVRAM_CMD_LAST;
9311
9312                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9313                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9314                     (tp->nvram_jedecnum == JEDEC_ST) &&
9315                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9316
9317                         if ((ret = tg3_nvram_exec_cmd(tp,
9318                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9319                                 NVRAM_CMD_DONE)))
9320
9321                                 break;
9322                 }
9323                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9324                         /* We always do complete word writes to eeprom. */
9325                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9326                 }
9327
9328                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9329                         break;
9330         }
9331         return ret;
9332 }
9333
9334 /* offset and length are dword aligned */
9335 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9336 {
9337         int ret;
9338
9339         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9340                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9341                 return -EINVAL;
9342         }
9343
9344         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9345                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9346                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9347                 udelay(40);
9348         }
9349
9350         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9351                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9352         }
9353         else {
9354                 u32 grc_mode;
9355
9356                 ret = tg3_nvram_lock(tp);
9357                 if (ret)
9358                         return ret;
9359
9360                 tg3_enable_nvram_access(tp);
9361                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9362                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9363                         tw32(NVRAM_WRITE1, 0x406);
9364
9365                 grc_mode = tr32(GRC_MODE);
9366                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9367
9368                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9369                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9370
9371                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9372                                 buf);
9373                 }
9374                 else {
9375                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9376                                 buf);
9377                 }
9378
9379                 grc_mode = tr32(GRC_MODE);
9380                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9381
9382                 tg3_disable_nvram_access(tp);
9383                 tg3_nvram_unlock(tp);
9384         }
9385
9386         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9387                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9388                 udelay(40);
9389         }
9390
9391         return ret;
9392 }
9393
9394 struct subsys_tbl_ent {
9395         u16 subsys_vendor, subsys_devid;
9396         u32 phy_id;
9397 };
9398
9399 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9400         /* Broadcom boards. */
9401         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9402         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9403         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9404         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9405         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9406         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9407         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9408         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9409         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9410         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9411         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9412
9413         /* 3com boards. */
9414         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9415         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9416         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9417         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9418         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9419
9420         /* DELL boards. */
9421         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9422         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9423         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9424         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9425
9426         /* Compaq boards. */
9427         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9428         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9429         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9430         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9431         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9432
9433         /* IBM boards. */
9434         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9435 };
9436
9437 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9438 {
9439         int i;
9440
9441         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9442                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9443                      tp->pdev->subsystem_vendor) &&
9444                     (subsys_id_to_phy_id[i].subsys_devid ==
9445                      tp->pdev->subsystem_device))
9446                         return &subsys_id_to_phy_id[i];
9447         }
9448         return NULL;
9449 }
9450
9451 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9452 {
9453         u32 val;
9454         u16 pmcsr;
9455
9456         /* On some early chips the SRAM cannot be accessed in D3hot state,
9457          * so need make sure we're in D0.
9458          */
9459         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9460         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9461         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9462         msleep(1);
9463
9464         /* Make sure register accesses (indirect or otherwise)
9465          * will function correctly.
9466          */
9467         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9468                                tp->misc_host_ctrl);
9469
9470         tp->phy_id = PHY_ID_INVALID;
9471         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9472
9473         /* Do not even try poking around in here on Sun parts.  */
9474         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9475                 return;
9476
9477         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9478         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9479                 u32 nic_cfg, led_cfg;
9480                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9481                 int eeprom_phy_serdes = 0;
9482
9483                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9484                 tp->nic_sram_data_cfg = nic_cfg;
9485
9486                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9487                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9488                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9489                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9490                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9491                     (ver > 0) && (ver < 0x100))
9492                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9493
9494                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9495                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9496                         eeprom_phy_serdes = 1;
9497
9498                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9499                 if (nic_phy_id != 0) {
9500                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9501                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9502
9503                         eeprom_phy_id  = (id1 >> 16) << 10;
9504                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9505                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9506                 } else
9507                         eeprom_phy_id = 0;
9508
9509                 tp->phy_id = eeprom_phy_id;
9510                 if (eeprom_phy_serdes) {
9511                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9512                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9513                         else
9514                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9515                 }
9516
9517                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9518                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9519                                     SHASTA_EXT_LED_MODE_MASK);
9520                 else
9521                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9522
9523                 switch (led_cfg) {
9524                 default:
9525                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9526                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9527                         break;
9528
9529                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9530                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9531                         break;
9532
9533                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9534                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9535
9536                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9537                          * read on some older 5700/5701 bootcode.
9538                          */
9539                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9540                             ASIC_REV_5700 ||
9541                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9542                             ASIC_REV_5701)
9543                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9544
9545                         break;
9546
9547                 case SHASTA_EXT_LED_SHARED:
9548                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9549                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9550                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9551                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9552                                                  LED_CTRL_MODE_PHY_2);
9553                         break;
9554
9555                 case SHASTA_EXT_LED_MAC:
9556                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9557                         break;
9558
9559                 case SHASTA_EXT_LED_COMBO:
9560                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9561                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9562                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9563                                                  LED_CTRL_MODE_PHY_2);
9564                         break;
9565
9566                 };
9567
9568                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9569                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9570                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9571                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9572
9573                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9574                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9575                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9576                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9577
9578                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9579                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9580                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9581                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9582                 }
9583                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9584                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9585
9586                 if (cfg2 & (1 << 17))
9587                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9588
9589                 /* serdes signal pre-emphasis in register 0x590 set by */
9590                 /* bootcode if bit 18 is set */
9591                 if (cfg2 & (1 << 18))
9592                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9593         }
9594 }
9595
9596 static int __devinit tg3_phy_probe(struct tg3 *tp)
9597 {
9598         u32 hw_phy_id_1, hw_phy_id_2;
9599         u32 hw_phy_id, hw_phy_id_masked;
9600         int err;
9601
9602         /* Reading the PHY ID register can conflict with ASF
9603          * firwmare access to the PHY hardware.
9604          */
9605         err = 0;
9606         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9607                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9608         } else {
9609                 /* Now read the physical PHY_ID from the chip and verify
9610                  * that it is sane.  If it doesn't look good, we fall back
9611                  * to either the hard-coded table based PHY_ID and failing
9612                  * that the value found in the eeprom area.
9613                  */
9614                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9615                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9616
9617                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9618                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9619                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9620
9621                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9622         }
9623
9624         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9625                 tp->phy_id = hw_phy_id;
9626                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9627                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9628                 else
9629                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9630         } else {
9631                 if (tp->phy_id != PHY_ID_INVALID) {
9632                         /* Do nothing, phy ID already set up in
9633                          * tg3_get_eeprom_hw_cfg().
9634                          */
9635                 } else {
9636                         struct subsys_tbl_ent *p;
9637
9638                         /* No eeprom signature?  Try the hardcoded
9639                          * subsys device table.
9640                          */
9641                         p = lookup_by_subsys(tp);
9642                         if (!p)
9643                                 return -ENODEV;
9644
9645                         tp->phy_id = p->phy_id;
9646                         if (!tp->phy_id ||
9647                             tp->phy_id == PHY_ID_BCM8002)
9648                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9649                 }
9650         }
9651
9652         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9653             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9654                 u32 bmsr, adv_reg, tg3_ctrl;
9655
9656                 tg3_readphy(tp, MII_BMSR, &bmsr);
9657                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9658                     (bmsr & BMSR_LSTATUS))
9659                         goto skip_phy_reset;
9660                     
9661                 err = tg3_phy_reset(tp);
9662                 if (err)
9663                         return err;
9664
9665                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9666                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9667                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9668                 tg3_ctrl = 0;
9669                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9670                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9671                                     MII_TG3_CTRL_ADV_1000_FULL);
9672                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9673                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9674                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9675                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9676                 }
9677
9678                 if (!tg3_copper_is_advertising_all(tp)) {
9679                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9680
9681                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9682                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9683
9684                         tg3_writephy(tp, MII_BMCR,
9685                                      BMCR_ANENABLE | BMCR_ANRESTART);
9686                 }
9687                 tg3_phy_set_wirespeed(tp);
9688
9689                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9690                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9691                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9692         }
9693
9694 skip_phy_reset:
9695         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9696                 err = tg3_init_5401phy_dsp(tp);
9697                 if (err)
9698                         return err;
9699         }
9700
9701         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9702                 err = tg3_init_5401phy_dsp(tp);
9703         }
9704
9705         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9706                 tp->link_config.advertising =
9707                         (ADVERTISED_1000baseT_Half |
9708                          ADVERTISED_1000baseT_Full |
9709                          ADVERTISED_Autoneg |
9710                          ADVERTISED_FIBRE);
9711         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9712                 tp->link_config.advertising &=
9713                         ~(ADVERTISED_1000baseT_Half |
9714                           ADVERTISED_1000baseT_Full);
9715
9716         return err;
9717 }
9718
9719 static void __devinit tg3_read_partno(struct tg3 *tp)
9720 {
9721         unsigned char vpd_data[256];
9722         int i;
9723         u32 magic;
9724
9725         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9726                 /* Sun decided not to put the necessary bits in the
9727                  * NVRAM of their onboard tg3 parts :(
9728                  */
9729                 strcpy(tp->board_part_number, "Sun 570X");
9730                 return;
9731         }
9732
9733         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9734                 return;
9735
9736         if (magic == TG3_EEPROM_MAGIC) {
9737                 for (i = 0; i < 256; i += 4) {
9738                         u32 tmp;
9739
9740                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9741                                 goto out_not_found;
9742
9743                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9744                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9745                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9746                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9747                 }
9748         } else {
9749                 int vpd_cap;
9750
9751                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9752                 for (i = 0; i < 256; i += 4) {
9753                         u32 tmp, j = 0;
9754                         u16 tmp16;
9755
9756                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9757                                               i);
9758                         while (j++ < 100) {
9759                                 pci_read_config_word(tp->pdev, vpd_cap +
9760                                                      PCI_VPD_ADDR, &tmp16);
9761                                 if (tmp16 & 0x8000)
9762                                         break;
9763                                 msleep(1);
9764                         }
9765                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9766                                               &tmp);
9767                         tmp = cpu_to_le32(tmp);
9768                         memcpy(&vpd_data[i], &tmp, 4);
9769                 }
9770         }
9771
9772         /* Now parse and find the part number. */
9773         for (i = 0; i < 256; ) {
9774                 unsigned char val = vpd_data[i];
9775                 int block_end;
9776
9777                 if (val == 0x82 || val == 0x91) {
9778                         i = (i + 3 +
9779                              (vpd_data[i + 1] +
9780                               (vpd_data[i + 2] << 8)));
9781                         continue;
9782                 }
9783
9784                 if (val != 0x90)
9785                         goto out_not_found;
9786
9787                 block_end = (i + 3 +
9788                              (vpd_data[i + 1] +
9789                               (vpd_data[i + 2] << 8)));
9790                 i += 3;
9791                 while (i < block_end) {
9792                         if (vpd_data[i + 0] == 'P' &&
9793                             vpd_data[i + 1] == 'N') {
9794                                 int partno_len = vpd_data[i + 2];
9795
9796                                 if (partno_len > 24)
9797                                         goto out_not_found;
9798
9799                                 memcpy(tp->board_part_number,
9800                                        &vpd_data[i + 3],
9801                                        partno_len);
9802
9803                                 /* Success. */
9804                                 return;
9805                         }
9806                 }
9807
9808                 /* Part number not found. */
9809                 goto out_not_found;
9810         }
9811
9812 out_not_found:
9813         strcpy(tp->board_part_number, "none");
9814 }
9815
9816 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9817 {
9818         u32 val, offset, start;
9819
9820         if (tg3_nvram_read_swab(tp, 0, &val))
9821                 return;
9822
9823         if (val != TG3_EEPROM_MAGIC)
9824                 return;
9825
9826         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9827             tg3_nvram_read_swab(tp, 0x4, &start))
9828                 return;
9829
9830         offset = tg3_nvram_logical_addr(tp, offset);
9831         if (tg3_nvram_read_swab(tp, offset, &val))
9832                 return;
9833
9834         if ((val & 0xfc000000) == 0x0c000000) {
9835                 u32 ver_offset, addr;
9836                 int i;
9837
9838                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9839                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9840                         return;
9841
9842                 if (val != 0)
9843                         return;
9844
9845                 addr = offset + ver_offset - start;
9846                 for (i = 0; i < 16; i += 4) {
9847                         if (tg3_nvram_read(tp, addr + i, &val))
9848                                 return;
9849
9850                         val = cpu_to_le32(val);
9851                         memcpy(tp->fw_ver + i, &val, 4);
9852                 }
9853         }
9854 }
9855
9856 #ifdef CONFIG_SPARC64
9857 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9858 {
9859         struct pci_dev *pdev = tp->pdev;
9860         struct pcidev_cookie *pcp = pdev->sysdata;
9861
9862         if (pcp != NULL) {
9863                 int node = pcp->prom_node;
9864                 u32 venid;
9865                 int err;
9866
9867                 err = prom_getproperty(node, "subsystem-vendor-id",
9868                                        (char *) &venid, sizeof(venid));
9869                 if (err == 0 || err == -1)
9870                         return 0;
9871                 if (venid == PCI_VENDOR_ID_SUN)
9872                         return 1;
9873
9874                 /* TG3 chips onboard the SunBlade-2500 don't have the
9875                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9876                  * are distinguishable from non-Sun variants by being
9877                  * named "network" by the firmware.  Non-Sun cards will
9878                  * show up as being named "ethernet".
9879                  */
9880                 if (!strcmp(pcp->prom_name, "network"))
9881                         return 1;
9882         }
9883         return 0;
9884 }
9885 #endif
9886
9887 static int __devinit tg3_get_invariants(struct tg3 *tp)
9888 {
9889         static struct pci_device_id write_reorder_chipsets[] = {
9890                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9891                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9892                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9893                              PCI_DEVICE_ID_VIA_8385_0) },
9894                 { },
9895         };
9896         u32 misc_ctrl_reg;
9897         u32 cacheline_sz_reg;
9898         u32 pci_state_reg, grc_misc_cfg;
9899         u32 val;
9900         u16 pci_cmd;
9901         int err;
9902
9903 #ifdef CONFIG_SPARC64
9904         if (tg3_is_sun_570X(tp))
9905                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9906 #endif
9907
9908         /* Force memory write invalidate off.  If we leave it on,
9909          * then on 5700_BX chips we have to enable a workaround.
9910          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9911          * to match the cacheline size.  The Broadcom driver have this
9912          * workaround but turns MWI off all the times so never uses
9913          * it.  This seems to suggest that the workaround is insufficient.
9914          */
9915         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9916         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9917         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9918
9919         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9920          * has the register indirect write enable bit set before
9921          * we try to access any of the MMIO registers.  It is also
9922          * critical that the PCI-X hw workaround situation is decided
9923          * before that as well.
9924          */
9925         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9926                               &misc_ctrl_reg);
9927
9928         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9929                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9930
9931         /* Wrong chip ID in 5752 A0. This code can be removed later
9932          * as A0 is not in production.
9933          */
9934         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9935                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9936
9937         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9938          * we need to disable memory and use config. cycles
9939          * only to access all registers. The 5702/03 chips
9940          * can mistakenly decode the special cycles from the
9941          * ICH chipsets as memory write cycles, causing corruption
9942          * of register and memory space. Only certain ICH bridges
9943          * will drive special cycles with non-zero data during the
9944          * address phase which can fall within the 5703's address
9945          * range. This is not an ICH bug as the PCI spec allows
9946          * non-zero address during special cycles. However, only
9947          * these ICH bridges are known to drive non-zero addresses
9948          * during special cycles.
9949          *
9950          * Since special cycles do not cross PCI bridges, we only
9951          * enable this workaround if the 5703 is on the secondary
9952          * bus of these ICH bridges.
9953          */
9954         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9955             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9956                 static struct tg3_dev_id {
9957                         u32     vendor;
9958                         u32     device;
9959                         u32     rev;
9960                 } ich_chipsets[] = {
9961                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9962                           PCI_ANY_ID },
9963                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9964                           PCI_ANY_ID },
9965                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9966                           0xa },
9967                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9968                           PCI_ANY_ID },
9969                         { },
9970                 };
9971                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9972                 struct pci_dev *bridge = NULL;
9973
9974                 while (pci_id->vendor != 0) {
9975                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9976                                                 bridge);
9977                         if (!bridge) {
9978                                 pci_id++;
9979                                 continue;
9980                         }
9981                         if (pci_id->rev != PCI_ANY_ID) {
9982                                 u8 rev;
9983
9984                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9985                                                      &rev);
9986                                 if (rev > pci_id->rev)
9987                                         continue;
9988                         }
9989                         if (bridge->subordinate &&
9990                             (bridge->subordinate->number ==
9991                              tp->pdev->bus->number)) {
9992
9993                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9994                                 pci_dev_put(bridge);
9995                                 break;
9996                         }
9997                 }
9998         }
9999
10000         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10001          * DMA addresses > 40-bit. This bridge may have other additional
10002          * 57xx devices behind it in some 4-port NIC designs for example.
10003          * Any tg3 device found behind the bridge will also need the 40-bit
10004          * DMA workaround.
10005          */
10006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10007             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10008                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10009                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10010                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10011         }
10012         else {
10013                 struct pci_dev *bridge = NULL;
10014
10015                 do {
10016                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10017                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10018                                                 bridge);
10019                         if (bridge && bridge->subordinate &&
10020                             (bridge->subordinate->number <=
10021                              tp->pdev->bus->number) &&
10022                             (bridge->subordinate->subordinate >=
10023                              tp->pdev->bus->number)) {
10024                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10025                                 pci_dev_put(bridge);
10026                                 break;
10027                         }
10028                 } while (bridge);
10029         }
10030
10031         /* Initialize misc host control in PCI block. */
10032         tp->misc_host_ctrl |= (misc_ctrl_reg &
10033                                MISC_HOST_CTRL_CHIPREV);
10034         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10035                                tp->misc_host_ctrl);
10036
10037         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10038                               &cacheline_sz_reg);
10039
10040         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10041         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10042         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10043         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10044
10045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10048             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10049                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10050
10051         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10052             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10053                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10054
10055         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10056                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10057                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10058                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10059                 } else
10060                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10061         }
10062
10063         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10065             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10066             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10067                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10068
10069         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10070                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10071
10072         /* If we have an AMD 762 or VIA K8T800 chipset, write
10073          * reordering to the mailbox registers done by the host
10074          * controller can cause major troubles.  We read back from
10075          * every mailbox register write to force the writes to be
10076          * posted to the chip in order.
10077          */
10078         if (pci_dev_present(write_reorder_chipsets) &&
10079             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10080                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10081
10082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10083             tp->pci_lat_timer < 64) {
10084                 tp->pci_lat_timer = 64;
10085
10086                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10087                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10088                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10089                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10090
10091                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10092                                        cacheline_sz_reg);
10093         }
10094
10095         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10096                               &pci_state_reg);
10097
10098         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10099                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10100
10101                 /* If this is a 5700 BX chipset, and we are in PCI-X
10102                  * mode, enable register write workaround.
10103                  *
10104                  * The workaround is to use indirect register accesses
10105                  * for all chip writes not to mailbox registers.
10106                  */
10107                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10108                         u32 pm_reg;
10109                         u16 pci_cmd;
10110
10111                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10112
10113                         /* The chip can have it's power management PCI config
10114                          * space registers clobbered due to this bug.
10115                          * So explicitly force the chip into D0 here.
10116                          */
10117                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10118                                               &pm_reg);
10119                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10120                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10121                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10122                                                pm_reg);
10123
10124                         /* Also, force SERR#/PERR# in PCI command. */
10125                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10126                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10127                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10128                 }
10129         }
10130
10131         /* 5700 BX chips need to have their TX producer index mailboxes
10132          * written twice to workaround a bug.
10133          */
10134         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10135                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10136
10137         /* Back to back register writes can cause problems on this chip,
10138          * the workaround is to read back all reg writes except those to
10139          * mailbox regs.  See tg3_write_indirect_reg32().
10140          *
10141          * PCI Express 5750_A0 rev chips need this workaround too.
10142          */
10143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10144             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10145              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10146                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10147
10148         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10149                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10150         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10151                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10152
10153         /* Chip-specific fixup from Broadcom driver */
10154         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10155             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10156                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10157                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10158         }
10159
10160         /* Default fast path register access methods */
10161         tp->read32 = tg3_read32;
10162         tp->write32 = tg3_write32;
10163         tp->read32_mbox = tg3_read32;
10164         tp->write32_mbox = tg3_write32;
10165         tp->write32_tx_mbox = tg3_write32;
10166         tp->write32_rx_mbox = tg3_write32;
10167
10168         /* Various workaround register access methods */
10169         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10170                 tp->write32 = tg3_write_indirect_reg32;
10171         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10172                 tp->write32 = tg3_write_flush_reg32;
10173
10174         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10175             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10176                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10177                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10178                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10179         }
10180
10181         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10182                 tp->read32 = tg3_read_indirect_reg32;
10183                 tp->write32 = tg3_write_indirect_reg32;
10184                 tp->read32_mbox = tg3_read_indirect_mbox;
10185                 tp->write32_mbox = tg3_write_indirect_mbox;
10186                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10187                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10188
10189                 iounmap(tp->regs);
10190                 tp->regs = NULL;
10191
10192                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10193                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10194                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10195         }
10196
10197         /* Get eeprom hw config before calling tg3_set_power_state().
10198          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10199          * determined before calling tg3_set_power_state() so that
10200          * we know whether or not to switch out of Vaux power.
10201          * When the flag is set, it means that GPIO1 is used for eeprom
10202          * write protect and also implies that it is a LOM where GPIOs
10203          * are not used to switch power.
10204          */ 
10205         tg3_get_eeprom_hw_cfg(tp);
10206
10207         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10208          * GPIO1 driven high will bring 5700's external PHY out of reset.
10209          * It is also used as eeprom write protect on LOMs.
10210          */
10211         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10212         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10213             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10214                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10215                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10216         /* Unused GPIO3 must be driven as output on 5752 because there
10217          * are no pull-up resistors on unused GPIO pins.
10218          */
10219         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10220                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10221
10222         /* Force the chip into D0. */
10223         err = tg3_set_power_state(tp, PCI_D0);
10224         if (err) {
10225                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10226                        pci_name(tp->pdev));
10227                 return err;
10228         }
10229
10230         /* 5700 B0 chips do not support checksumming correctly due
10231          * to hardware bugs.
10232          */
10233         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10234                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10235
10236         /* Pseudo-header checksum is done by hardware logic and not
10237          * the offload processers, so make the chip do the pseudo-
10238          * header checksums on receive.  For transmit it is more
10239          * convenient to do the pseudo-header checksum in software
10240          * as Linux does that on transmit for us in all cases.
10241          */
10242         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10243         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10244
10245         /* Derive initial jumbo mode from MTU assigned in
10246          * ether_setup() via the alloc_etherdev() call
10247          */
10248         if (tp->dev->mtu > ETH_DATA_LEN &&
10249             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10250                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10251
10252         /* Determine WakeOnLan speed to use. */
10253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10254             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10255             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10256             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10257                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10258         } else {
10259                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10260         }
10261
10262         /* A few boards don't want Ethernet@WireSpeed phy feature */
10263         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10264             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10265              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10266              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10267             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10268                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10269
10270         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10271             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10272                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10273         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10274                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10275
10276         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10277             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10278                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10279
10280         tp->coalesce_mode = 0;
10281         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10282             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10283                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10284
10285         /* Initialize MAC MI mode, polling disabled. */
10286         tw32_f(MAC_MI_MODE, tp->mi_mode);
10287         udelay(80);
10288
10289         /* Initialize data/descriptor byte/word swapping. */
10290         val = tr32(GRC_MODE);
10291         val &= GRC_MODE_HOST_STACKUP;
10292         tw32(GRC_MODE, val | tp->grc_mode);
10293
10294         tg3_switch_clocks(tp);
10295
10296         /* Clear this out for sanity. */
10297         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10298
10299         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10300                               &pci_state_reg);
10301         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10302             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10303                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10304
10305                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10306                     chiprevid == CHIPREV_ID_5701_B0 ||
10307                     chiprevid == CHIPREV_ID_5701_B2 ||
10308                     chiprevid == CHIPREV_ID_5701_B5) {
10309                         void __iomem *sram_base;
10310
10311                         /* Write some dummy words into the SRAM status block
10312                          * area, see if it reads back correctly.  If the return
10313                          * value is bad, force enable the PCIX workaround.
10314                          */
10315                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10316
10317                         writel(0x00000000, sram_base);
10318                         writel(0x00000000, sram_base + 4);
10319                         writel(0xffffffff, sram_base + 4);
10320                         if (readl(sram_base) != 0x00000000)
10321                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10322                 }
10323         }
10324
10325         udelay(50);
10326         tg3_nvram_init(tp);
10327
10328         grc_misc_cfg = tr32(GRC_MISC_CFG);
10329         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10330
10331         /* Broadcom's driver says that CIOBE multisplit has a bug */
10332 #if 0
10333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10334             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10335                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10336                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10337         }
10338 #endif
10339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10340             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10341              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10342                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10343
10344         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10345             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10346                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10347         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10348                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10349                                       HOSTCC_MODE_CLRTICK_TXBD);
10350
10351                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10352                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10353                                        tp->misc_host_ctrl);
10354         }
10355
10356         /* these are limited to 10/100 only */
10357         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10358              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10359             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10360              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10361              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10362               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10363               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10364             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10365              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10366               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10367                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10368
10369         err = tg3_phy_probe(tp);
10370         if (err) {
10371                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10372                        pci_name(tp->pdev), err);
10373                 /* ... but do not return immediately ... */
10374         }
10375
10376         tg3_read_partno(tp);
10377         tg3_read_fw_ver(tp);
10378
10379         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10380                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10381         } else {
10382                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10383                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10384                 else
10385                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10386         }
10387
10388         /* 5700 {AX,BX} chips have a broken status block link
10389          * change bit implementation, so we must use the
10390          * status register in those cases.
10391          */
10392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10393                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10394         else
10395                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10396
10397         /* The led_ctrl is set during tg3_phy_probe, here we might
10398          * have to force the link status polling mechanism based
10399          * upon subsystem IDs.
10400          */
10401         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10402             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10403                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10404                                   TG3_FLAG_USE_LINKCHG_REG);
10405         }
10406
10407         /* For all SERDES we poll the MAC status register. */
10408         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10409                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10410         else
10411                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10412
10413         /* All chips before 5787 can get confused if TX buffers
10414          * straddle the 4GB address boundary in some cases.
10415          */
10416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10417                 tp->dev->hard_start_xmit = tg3_start_xmit;
10418         else
10419                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10420
10421         tp->rx_offset = 2;
10422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10423             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10424                 tp->rx_offset = 0;
10425
10426         /* By default, disable wake-on-lan.  User can change this
10427          * using ETHTOOL_SWOL.
10428          */
10429         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10430
10431         return err;
10432 }
10433
10434 #ifdef CONFIG_SPARC64
10435 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10436 {
10437         struct net_device *dev = tp->dev;
10438         struct pci_dev *pdev = tp->pdev;
10439         struct pcidev_cookie *pcp = pdev->sysdata;
10440
10441         if (pcp != NULL) {
10442                 int node = pcp->prom_node;
10443
10444                 if (prom_getproplen(node, "local-mac-address") == 6) {
10445                         prom_getproperty(node, "local-mac-address",
10446                                          dev->dev_addr, 6);
10447                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10448                         return 0;
10449                 }
10450         }
10451         return -ENODEV;
10452 }
10453
10454 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10455 {
10456         struct net_device *dev = tp->dev;
10457
10458         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10459         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10460         return 0;
10461 }
10462 #endif
10463
10464 static int __devinit tg3_get_device_address(struct tg3 *tp)
10465 {
10466         struct net_device *dev = tp->dev;
10467         u32 hi, lo, mac_offset;
10468
10469 #ifdef CONFIG_SPARC64
10470         if (!tg3_get_macaddr_sparc(tp))
10471                 return 0;
10472 #endif
10473
10474         mac_offset = 0x7c;
10475         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10476              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10477             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10478                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10479                         mac_offset = 0xcc;
10480                 if (tg3_nvram_lock(tp))
10481                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10482                 else
10483                         tg3_nvram_unlock(tp);
10484         }
10485
10486         /* First try to get it from MAC address mailbox. */
10487         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10488         if ((hi >> 16) == 0x484b) {
10489                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10490                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10491
10492                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10493                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10494                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10495                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10496                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10497         }
10498         /* Next, try NVRAM. */
10499         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10500                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10501                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10502                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10503                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10504                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10505                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10506                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10507                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10508         }
10509         /* Finally just fetch it out of the MAC control regs. */
10510         else {
10511                 hi = tr32(MAC_ADDR_0_HIGH);
10512                 lo = tr32(MAC_ADDR_0_LOW);
10513
10514                 dev->dev_addr[5] = lo & 0xff;
10515                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10516                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10517                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10518                 dev->dev_addr[1] = hi & 0xff;
10519                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10520         }
10521
10522         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10523 #ifdef CONFIG_SPARC64
10524                 if (!tg3_get_default_macaddr_sparc(tp))
10525                         return 0;
10526 #endif
10527                 return -EINVAL;
10528         }
10529         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10530         return 0;
10531 }
10532
10533 #define BOUNDARY_SINGLE_CACHELINE       1
10534 #define BOUNDARY_MULTI_CACHELINE        2
10535
10536 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10537 {
10538         int cacheline_size;
10539         u8 byte;
10540         int goal;
10541
10542         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10543         if (byte == 0)
10544                 cacheline_size = 1024;
10545         else
10546                 cacheline_size = (int) byte * 4;
10547
10548         /* On 5703 and later chips, the boundary bits have no
10549          * effect.
10550          */
10551         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10552             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10553             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10554                 goto out;
10555
10556 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10557         goal = BOUNDARY_MULTI_CACHELINE;
10558 #else
10559 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10560         goal = BOUNDARY_SINGLE_CACHELINE;
10561 #else
10562         goal = 0;
10563 #endif
10564 #endif
10565
10566         if (!goal)
10567                 goto out;
10568
10569         /* PCI controllers on most RISC systems tend to disconnect
10570          * when a device tries to burst across a cache-line boundary.
10571          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10572          *
10573          * Unfortunately, for PCI-E there are only limited
10574          * write-side controls for this, and thus for reads
10575          * we will still get the disconnects.  We'll also waste
10576          * these PCI cycles for both read and write for chips
10577          * other than 5700 and 5701 which do not implement the
10578          * boundary bits.
10579          */
10580         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10581             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10582                 switch (cacheline_size) {
10583                 case 16:
10584                 case 32:
10585                 case 64:
10586                 case 128:
10587                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10588                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10589                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10590                         } else {
10591                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10592                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10593                         }
10594                         break;
10595
10596                 case 256:
10597                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10598                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10599                         break;
10600
10601                 default:
10602                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10603                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10604                         break;
10605                 };
10606         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10607                 switch (cacheline_size) {
10608                 case 16:
10609                 case 32:
10610                 case 64:
10611                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10612                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10613                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10614                                 break;
10615                         }
10616                         /* fallthrough */
10617                 case 128:
10618                 default:
10619                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10620                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10621                         break;
10622                 };
10623         } else {
10624                 switch (cacheline_size) {
10625                 case 16:
10626                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10627                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10628                                         DMA_RWCTRL_WRITE_BNDRY_16);
10629                                 break;
10630                         }
10631                         /* fallthrough */
10632                 case 32:
10633                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10634                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10635                                         DMA_RWCTRL_WRITE_BNDRY_32);
10636                                 break;
10637                         }
10638                         /* fallthrough */
10639                 case 64:
10640                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10641                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10642                                         DMA_RWCTRL_WRITE_BNDRY_64);
10643                                 break;
10644                         }
10645                         /* fallthrough */
10646                 case 128:
10647                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10648                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10649                                         DMA_RWCTRL_WRITE_BNDRY_128);
10650                                 break;
10651                         }
10652                         /* fallthrough */
10653                 case 256:
10654                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10655                                 DMA_RWCTRL_WRITE_BNDRY_256);
10656                         break;
10657                 case 512:
10658                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10659                                 DMA_RWCTRL_WRITE_BNDRY_512);
10660                         break;
10661                 case 1024:
10662                 default:
10663                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10664                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10665                         break;
10666                 };
10667         }
10668
10669 out:
10670         return val;
10671 }
10672
10673 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10674 {
10675         struct tg3_internal_buffer_desc test_desc;
10676         u32 sram_dma_descs;
10677         int i, ret;
10678
10679         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10680
10681         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10682         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10683         tw32(RDMAC_STATUS, 0);
10684         tw32(WDMAC_STATUS, 0);
10685
10686         tw32(BUFMGR_MODE, 0);
10687         tw32(FTQ_RESET, 0);
10688
10689         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10690         test_desc.addr_lo = buf_dma & 0xffffffff;
10691         test_desc.nic_mbuf = 0x00002100;
10692         test_desc.len = size;
10693
10694         /*
10695          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10696          * the *second* time the tg3 driver was getting loaded after an
10697          * initial scan.
10698          *
10699          * Broadcom tells me:
10700          *   ...the DMA engine is connected to the GRC block and a DMA
10701          *   reset may affect the GRC block in some unpredictable way...
10702          *   The behavior of resets to individual blocks has not been tested.
10703          *
10704          * Broadcom noted the GRC reset will also reset all sub-components.
10705          */
10706         if (to_device) {
10707                 test_desc.cqid_sqid = (13 << 8) | 2;
10708
10709                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10710                 udelay(40);
10711         } else {
10712                 test_desc.cqid_sqid = (16 << 8) | 7;
10713
10714                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10715                 udelay(40);
10716         }
10717         test_desc.flags = 0x00000005;
10718
10719         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10720                 u32 val;
10721
10722                 val = *(((u32 *)&test_desc) + i);
10723                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10724                                        sram_dma_descs + (i * sizeof(u32)));
10725                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10726         }
10727         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10728
10729         if (to_device) {
10730                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10731         } else {
10732                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10733         }
10734
10735         ret = -ENODEV;
10736         for (i = 0; i < 40; i++) {
10737                 u32 val;
10738
10739                 if (to_device)
10740                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10741                 else
10742                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10743                 if ((val & 0xffff) == sram_dma_descs) {
10744                         ret = 0;
10745                         break;
10746                 }
10747
10748                 udelay(100);
10749         }
10750
10751         return ret;
10752 }
10753
10754 #define TEST_BUFFER_SIZE        0x2000
10755
10756 static int __devinit tg3_test_dma(struct tg3 *tp)
10757 {
10758         dma_addr_t buf_dma;
10759         u32 *buf, saved_dma_rwctrl;
10760         int ret;
10761
10762         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10763         if (!buf) {
10764                 ret = -ENOMEM;
10765                 goto out_nofree;
10766         }
10767
10768         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10769                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10770
10771         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10772
10773         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10774                 /* DMA read watermark not used on PCIE */
10775                 tp->dma_rwctrl |= 0x00180000;
10776         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10777                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10778                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10779                         tp->dma_rwctrl |= 0x003f0000;
10780                 else
10781                         tp->dma_rwctrl |= 0x003f000f;
10782         } else {
10783                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10784                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10785                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10786
10787                         /* If the 5704 is behind the EPB bridge, we can
10788                          * do the less restrictive ONE_DMA workaround for
10789                          * better performance.
10790                          */
10791                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10792                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10793                                 tp->dma_rwctrl |= 0x8000;
10794                         else if (ccval == 0x6 || ccval == 0x7)
10795                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10796
10797                         /* Set bit 23 to enable PCIX hw bug fix */
10798                         tp->dma_rwctrl |= 0x009f0000;
10799                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10800                         /* 5780 always in PCIX mode */
10801                         tp->dma_rwctrl |= 0x00144000;
10802                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10803                         /* 5714 always in PCIX mode */
10804                         tp->dma_rwctrl |= 0x00148000;
10805                 } else {
10806                         tp->dma_rwctrl |= 0x001b000f;
10807                 }
10808         }
10809
10810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10812                 tp->dma_rwctrl &= 0xfffffff0;
10813
10814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10816                 /* Remove this if it causes problems for some boards. */
10817                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10818
10819                 /* On 5700/5701 chips, we need to set this bit.
10820                  * Otherwise the chip will issue cacheline transactions
10821                  * to streamable DMA memory with not all the byte
10822                  * enables turned on.  This is an error on several
10823                  * RISC PCI controllers, in particular sparc64.
10824                  *
10825                  * On 5703/5704 chips, this bit has been reassigned
10826                  * a different meaning.  In particular, it is used
10827                  * on those chips to enable a PCI-X workaround.
10828                  */
10829                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10830         }
10831
10832         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10833
10834 #if 0
10835         /* Unneeded, already done by tg3_get_invariants.  */
10836         tg3_switch_clocks(tp);
10837 #endif
10838
10839         ret = 0;
10840         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10841             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10842                 goto out;
10843
10844         /* It is best to perform DMA test with maximum write burst size
10845          * to expose the 5700/5701 write DMA bug.
10846          */
10847         saved_dma_rwctrl = tp->dma_rwctrl;
10848         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10849         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10850
10851         while (1) {
10852                 u32 *p = buf, i;
10853
10854                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10855                         p[i] = i;
10856
10857                 /* Send the buffer to the chip. */
10858                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10859                 if (ret) {
10860                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10861                         break;
10862                 }
10863
10864 #if 0
10865                 /* validate data reached card RAM correctly. */
10866                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10867                         u32 val;
10868                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10869                         if (le32_to_cpu(val) != p[i]) {
10870                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10871                                 /* ret = -ENODEV here? */
10872                         }
10873                         p[i] = 0;
10874                 }
10875 #endif
10876                 /* Now read it back. */
10877                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10878                 if (ret) {
10879                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10880
10881                         break;
10882                 }
10883
10884                 /* Verify it. */
10885                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10886                         if (p[i] == i)
10887                                 continue;
10888
10889                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10890                             DMA_RWCTRL_WRITE_BNDRY_16) {
10891                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10892                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10893                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10894                                 break;
10895                         } else {
10896                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10897                                 ret = -ENODEV;
10898                                 goto out;
10899                         }
10900                 }
10901
10902                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10903                         /* Success. */
10904                         ret = 0;
10905                         break;
10906                 }
10907         }
10908         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10909             DMA_RWCTRL_WRITE_BNDRY_16) {
10910                 static struct pci_device_id dma_wait_state_chipsets[] = {
10911                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10912                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10913                         { },
10914                 };
10915
10916                 /* DMA test passed without adjusting DMA boundary,
10917                  * now look for chipsets that are known to expose the
10918                  * DMA bug without failing the test.
10919                  */
10920                 if (pci_dev_present(dma_wait_state_chipsets)) {
10921                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10922                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10923                 }
10924                 else
10925                         /* Safe to use the calculated DMA boundary. */
10926                         tp->dma_rwctrl = saved_dma_rwctrl;
10927
10928                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10929         }
10930
10931 out:
10932         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10933 out_nofree:
10934         return ret;
10935 }
10936
10937 static void __devinit tg3_init_link_config(struct tg3 *tp)
10938 {
10939         tp->link_config.advertising =
10940                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10941                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10942                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10943                  ADVERTISED_Autoneg | ADVERTISED_MII);
10944         tp->link_config.speed = SPEED_INVALID;
10945         tp->link_config.duplex = DUPLEX_INVALID;
10946         tp->link_config.autoneg = AUTONEG_ENABLE;
10947         tp->link_config.active_speed = SPEED_INVALID;
10948         tp->link_config.active_duplex = DUPLEX_INVALID;
10949         tp->link_config.phy_is_low_power = 0;
10950         tp->link_config.orig_speed = SPEED_INVALID;
10951         tp->link_config.orig_duplex = DUPLEX_INVALID;
10952         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10953 }
10954
10955 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10956 {
10957         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10958                 tp->bufmgr_config.mbuf_read_dma_low_water =
10959                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10960                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10961                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10962                 tp->bufmgr_config.mbuf_high_water =
10963                         DEFAULT_MB_HIGH_WATER_5705;
10964
10965                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10966                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10967                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10968                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10969                 tp->bufmgr_config.mbuf_high_water_jumbo =
10970                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10971         } else {
10972                 tp->bufmgr_config.mbuf_read_dma_low_water =
10973                         DEFAULT_MB_RDMA_LOW_WATER;
10974                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10975                         DEFAULT_MB_MACRX_LOW_WATER;
10976                 tp->bufmgr_config.mbuf_high_water =
10977                         DEFAULT_MB_HIGH_WATER;
10978
10979                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10980                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10981                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10982                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10983                 tp->bufmgr_config.mbuf_high_water_jumbo =
10984                         DEFAULT_MB_HIGH_WATER_JUMBO;
10985         }
10986
10987         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10988         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10989 }
10990
10991 static char * __devinit tg3_phy_string(struct tg3 *tp)
10992 {
10993         switch (tp->phy_id & PHY_ID_MASK) {
10994         case PHY_ID_BCM5400:    return "5400";
10995         case PHY_ID_BCM5401:    return "5401";
10996         case PHY_ID_BCM5411:    return "5411";
10997         case PHY_ID_BCM5701:    return "5701";
10998         case PHY_ID_BCM5703:    return "5703";
10999         case PHY_ID_BCM5704:    return "5704";
11000         case PHY_ID_BCM5705:    return "5705";
11001         case PHY_ID_BCM5750:    return "5750";
11002         case PHY_ID_BCM5752:    return "5752";
11003         case PHY_ID_BCM5714:    return "5714";
11004         case PHY_ID_BCM5780:    return "5780";
11005         case PHY_ID_BCM5787:    return "5787";
11006         case PHY_ID_BCM8002:    return "8002/serdes";
11007         case 0:                 return "serdes";
11008         default:                return "unknown";
11009         };
11010 }
11011
11012 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11013 {
11014         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11015                 strcpy(str, "PCI Express");
11016                 return str;
11017         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11018                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11019
11020                 strcpy(str, "PCIX:");
11021
11022                 if ((clock_ctrl == 7) ||
11023                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11024                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11025                         strcat(str, "133MHz");
11026                 else if (clock_ctrl == 0)
11027                         strcat(str, "33MHz");
11028                 else if (clock_ctrl == 2)
11029                         strcat(str, "50MHz");
11030                 else if (clock_ctrl == 4)
11031                         strcat(str, "66MHz");
11032                 else if (clock_ctrl == 6)
11033                         strcat(str, "100MHz");
11034         } else {
11035                 strcpy(str, "PCI:");
11036                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11037                         strcat(str, "66MHz");
11038                 else
11039                         strcat(str, "33MHz");
11040         }
11041         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11042                 strcat(str, ":32-bit");
11043         else
11044                 strcat(str, ":64-bit");
11045         return str;
11046 }
11047
11048 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11049 {
11050         struct pci_dev *peer;
11051         unsigned int func, devnr = tp->pdev->devfn & ~7;
11052
11053         for (func = 0; func < 8; func++) {
11054                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11055                 if (peer && peer != tp->pdev)
11056                         break;
11057                 pci_dev_put(peer);
11058         }
11059         /* 5704 can be configured in single-port mode, set peer to
11060          * tp->pdev in that case.
11061          */
11062         if (!peer) {
11063                 peer = tp->pdev;
11064                 return peer;
11065         }
11066
11067         /*
11068          * We don't need to keep the refcount elevated; there's no way
11069          * to remove one half of this device without removing the other
11070          */
11071         pci_dev_put(peer);
11072
11073         return peer;
11074 }
11075
11076 static void __devinit tg3_init_coal(struct tg3 *tp)
11077 {
11078         struct ethtool_coalesce *ec = &tp->coal;
11079
11080         memset(ec, 0, sizeof(*ec));
11081         ec->cmd = ETHTOOL_GCOALESCE;
11082         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11083         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11084         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11085         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11086         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11087         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11088         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11089         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11090         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11091
11092         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11093                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11094                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11095                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11096                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11097                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11098         }
11099
11100         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11101                 ec->rx_coalesce_usecs_irq = 0;
11102                 ec->tx_coalesce_usecs_irq = 0;
11103                 ec->stats_block_coalesce_usecs = 0;
11104         }
11105 }
11106
11107 static int __devinit tg3_init_one(struct pci_dev *pdev,
11108                                   const struct pci_device_id *ent)
11109 {
11110         static int tg3_version_printed = 0;
11111         unsigned long tg3reg_base, tg3reg_len;
11112         struct net_device *dev;
11113         struct tg3 *tp;
11114         int i, err, pm_cap;
11115         char str[40];
11116         u64 dma_mask, persist_dma_mask;
11117
11118         if (tg3_version_printed++ == 0)
11119                 printk(KERN_INFO "%s", version);
11120
11121         err = pci_enable_device(pdev);
11122         if (err) {
11123                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11124                        "aborting.\n");
11125                 return err;
11126         }
11127
11128         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11129                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11130                        "base address, aborting.\n");
11131                 err = -ENODEV;
11132                 goto err_out_disable_pdev;
11133         }
11134
11135         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11136         if (err) {
11137                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11138                        "aborting.\n");
11139                 goto err_out_disable_pdev;
11140         }
11141
11142         pci_set_master(pdev);
11143
11144         /* Find power-management capability. */
11145         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11146         if (pm_cap == 0) {
11147                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11148                        "aborting.\n");
11149                 err = -EIO;
11150                 goto err_out_free_res;
11151         }
11152
11153         tg3reg_base = pci_resource_start(pdev, 0);
11154         tg3reg_len = pci_resource_len(pdev, 0);
11155
11156         dev = alloc_etherdev(sizeof(*tp));
11157         if (!dev) {
11158                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11159                 err = -ENOMEM;
11160                 goto err_out_free_res;
11161         }
11162
11163         SET_MODULE_OWNER(dev);
11164         SET_NETDEV_DEV(dev, &pdev->dev);
11165
11166         dev->features |= NETIF_F_LLTX;
11167 #if TG3_VLAN_TAG_USED
11168         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11169         dev->vlan_rx_register = tg3_vlan_rx_register;
11170         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11171 #endif
11172
11173         tp = netdev_priv(dev);
11174         tp->pdev = pdev;
11175         tp->dev = dev;
11176         tp->pm_cap = pm_cap;
11177         tp->mac_mode = TG3_DEF_MAC_MODE;
11178         tp->rx_mode = TG3_DEF_RX_MODE;
11179         tp->tx_mode = TG3_DEF_TX_MODE;
11180         tp->mi_mode = MAC_MI_MODE_BASE;
11181         if (tg3_debug > 0)
11182                 tp->msg_enable = tg3_debug;
11183         else
11184                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11185
11186         /* The word/byte swap controls here control register access byte
11187          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11188          * setting below.
11189          */
11190         tp->misc_host_ctrl =
11191                 MISC_HOST_CTRL_MASK_PCI_INT |
11192                 MISC_HOST_CTRL_WORD_SWAP |
11193                 MISC_HOST_CTRL_INDIR_ACCESS |
11194                 MISC_HOST_CTRL_PCISTATE_RW;
11195
11196         /* The NONFRM (non-frame) byte/word swap controls take effect
11197          * on descriptor entries, anything which isn't packet data.
11198          *
11199          * The StrongARM chips on the board (one for tx, one for rx)
11200          * are running in big-endian mode.
11201          */
11202         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11203                         GRC_MODE_WSWAP_NONFRM_DATA);
11204 #ifdef __BIG_ENDIAN
11205         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11206 #endif
11207         spin_lock_init(&tp->lock);
11208         spin_lock_init(&tp->tx_lock);
11209         spin_lock_init(&tp->indirect_lock);
11210         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11211
11212         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11213         if (tp->regs == 0UL) {
11214                 printk(KERN_ERR PFX "Cannot map device registers, "
11215                        "aborting.\n");
11216                 err = -ENOMEM;
11217                 goto err_out_free_dev;
11218         }
11219
11220         tg3_init_link_config(tp);
11221
11222         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11223         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11224         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11225
11226         dev->open = tg3_open;
11227         dev->stop = tg3_close;
11228         dev->get_stats = tg3_get_stats;
11229         dev->set_multicast_list = tg3_set_rx_mode;
11230         dev->set_mac_address = tg3_set_mac_addr;
11231         dev->do_ioctl = tg3_ioctl;
11232         dev->tx_timeout = tg3_tx_timeout;
11233         dev->poll = tg3_poll;
11234         dev->ethtool_ops = &tg3_ethtool_ops;
11235         dev->weight = 64;
11236         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11237         dev->change_mtu = tg3_change_mtu;
11238         dev->irq = pdev->irq;
11239 #ifdef CONFIG_NET_POLL_CONTROLLER
11240         dev->poll_controller = tg3_poll_controller;
11241 #endif
11242
11243         err = tg3_get_invariants(tp);
11244         if (err) {
11245                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11246                        "aborting.\n");
11247                 goto err_out_iounmap;
11248         }
11249
11250         /* The EPB bridge inside 5714, 5715, and 5780 and any
11251          * device behind the EPB cannot support DMA addresses > 40-bit.
11252          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11253          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11254          * do DMA address check in tg3_start_xmit().
11255          */
11256         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11257                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11258         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11259                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11260 #ifdef CONFIG_HIGHMEM
11261                 dma_mask = DMA_64BIT_MASK;
11262 #endif
11263         } else
11264                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11265
11266         /* Configure DMA attributes. */
11267         if (dma_mask > DMA_32BIT_MASK) {
11268                 err = pci_set_dma_mask(pdev, dma_mask);
11269                 if (!err) {
11270                         dev->features |= NETIF_F_HIGHDMA;
11271                         err = pci_set_consistent_dma_mask(pdev,
11272                                                           persist_dma_mask);
11273                         if (err < 0) {
11274                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11275                                        "DMA for consistent allocations\n");
11276                                 goto err_out_iounmap;
11277                         }
11278                 }
11279         }
11280         if (err || dma_mask == DMA_32BIT_MASK) {
11281                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11282                 if (err) {
11283                         printk(KERN_ERR PFX "No usable DMA configuration, "
11284                                "aborting.\n");
11285                         goto err_out_iounmap;
11286                 }
11287         }
11288
11289         tg3_init_bufmgr_config(tp);
11290
11291 #if TG3_TSO_SUPPORT != 0
11292         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11293                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11294         }
11295         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11297             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11298             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11299                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11300         } else {
11301                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11302         }
11303
11304         /* TSO is on by default on chips that support hardware TSO.
11305          * Firmware TSO on older chips gives lower performance, so it
11306          * is off by default, but can be enabled using ethtool.
11307          */
11308         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11309                 dev->features |= NETIF_F_TSO;
11310
11311 #endif
11312
11313         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11314             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11315             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11316                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11317                 tp->rx_pending = 63;
11318         }
11319
11320         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11322                 tp->pdev_peer = tg3_find_peer(tp);
11323
11324         err = tg3_get_device_address(tp);
11325         if (err) {
11326                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11327                        "aborting.\n");
11328                 goto err_out_iounmap;
11329         }
11330
11331         /*
11332          * Reset chip in case UNDI or EFI driver did not shutdown
11333          * DMA self test will enable WDMAC and we'll see (spurious)
11334          * pending DMA on the PCI bus at that point.
11335          */
11336         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11337             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11338                 pci_save_state(tp->pdev);
11339                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11340                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11341         }
11342
11343         err = tg3_test_dma(tp);
11344         if (err) {
11345                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11346                 goto err_out_iounmap;
11347         }
11348
11349         /* Tigon3 can do ipv4 only... and some chips have buggy
11350          * checksumming.
11351          */
11352         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11353                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11354                         dev->features |= NETIF_F_HW_CSUM;
11355                 else
11356                         dev->features |= NETIF_F_IP_CSUM;
11357                 dev->features |= NETIF_F_SG;
11358                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11359         } else
11360                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11361
11362         /* flow control autonegotiation is default behavior */
11363         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11364
11365         tg3_init_coal(tp);
11366
11367         /* Now that we have fully setup the chip, save away a snapshot
11368          * of the PCI config space.  We need to restore this after
11369          * GRC_MISC_CFG core clock resets and some resume events.
11370          */
11371         pci_save_state(tp->pdev);
11372
11373         err = register_netdev(dev);
11374         if (err) {
11375                 printk(KERN_ERR PFX "Cannot register net device, "
11376                        "aborting.\n");
11377                 goto err_out_iounmap;
11378         }
11379
11380         pci_set_drvdata(pdev, dev);
11381
11382         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11383                dev->name,
11384                tp->board_part_number,
11385                tp->pci_chip_rev_id,
11386                tg3_phy_string(tp),
11387                tg3_bus_string(tp, str),
11388                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11389
11390         for (i = 0; i < 6; i++)
11391                 printk("%2.2x%c", dev->dev_addr[i],
11392                        i == 5 ? '\n' : ':');
11393
11394         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11395                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11396                "TSOcap[%d] \n",
11397                dev->name,
11398                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11399                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11400                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11401                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11402                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11403                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11404                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11405         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11406                dev->name, tp->dma_rwctrl,
11407                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11408                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11409
11410         netif_carrier_off(tp->dev);
11411
11412         return 0;
11413
11414 err_out_iounmap:
11415         if (tp->regs) {
11416                 iounmap(tp->regs);
11417                 tp->regs = NULL;
11418         }
11419
11420 err_out_free_dev:
11421         free_netdev(dev);
11422
11423 err_out_free_res:
11424         pci_release_regions(pdev);
11425
11426 err_out_disable_pdev:
11427         pci_disable_device(pdev);
11428         pci_set_drvdata(pdev, NULL);
11429         return err;
11430 }
11431
11432 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11433 {
11434         struct net_device *dev = pci_get_drvdata(pdev);
11435
11436         if (dev) {
11437                 struct tg3 *tp = netdev_priv(dev);
11438
11439                 flush_scheduled_work();
11440                 unregister_netdev(dev);
11441                 if (tp->regs) {
11442                         iounmap(tp->regs);
11443                         tp->regs = NULL;
11444                 }
11445                 free_netdev(dev);
11446                 pci_release_regions(pdev);
11447                 pci_disable_device(pdev);
11448                 pci_set_drvdata(pdev, NULL);
11449         }
11450 }
11451
11452 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11453 {
11454         struct net_device *dev = pci_get_drvdata(pdev);
11455         struct tg3 *tp = netdev_priv(dev);
11456         int err;
11457
11458         if (!netif_running(dev))
11459                 return 0;
11460
11461         flush_scheduled_work();
11462         tg3_netif_stop(tp);
11463
11464         del_timer_sync(&tp->timer);
11465
11466         tg3_full_lock(tp, 1);
11467         tg3_disable_ints(tp);
11468         tg3_full_unlock(tp);
11469
11470         netif_device_detach(dev);
11471
11472         tg3_full_lock(tp, 0);
11473         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11474         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11475         tg3_full_unlock(tp);
11476
11477         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11478         if (err) {
11479                 tg3_full_lock(tp, 0);
11480
11481                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11482                 tg3_init_hw(tp);
11483
11484                 tp->timer.expires = jiffies + tp->timer_offset;
11485                 add_timer(&tp->timer);
11486
11487                 netif_device_attach(dev);
11488                 tg3_netif_start(tp);
11489
11490                 tg3_full_unlock(tp);
11491         }
11492
11493         return err;
11494 }
11495
11496 static int tg3_resume(struct pci_dev *pdev)
11497 {
11498         struct net_device *dev = pci_get_drvdata(pdev);
11499         struct tg3 *tp = netdev_priv(dev);
11500         int err;
11501
11502         if (!netif_running(dev))
11503                 return 0;
11504
11505         pci_restore_state(tp->pdev);
11506
11507         err = tg3_set_power_state(tp, PCI_D0);
11508         if (err)
11509                 return err;
11510
11511         netif_device_attach(dev);
11512
11513         tg3_full_lock(tp, 0);
11514
11515         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11516         tg3_init_hw(tp);
11517
11518         tp->timer.expires = jiffies + tp->timer_offset;
11519         add_timer(&tp->timer);
11520
11521         tg3_netif_start(tp);
11522
11523         tg3_full_unlock(tp);
11524
11525         return 0;
11526 }
11527
11528 static struct pci_driver tg3_driver = {
11529         .name           = DRV_MODULE_NAME,
11530         .id_table       = tg3_pci_tbl,
11531         .probe          = tg3_init_one,
11532         .remove         = __devexit_p(tg3_remove_one),
11533         .suspend        = tg3_suspend,
11534         .resume         = tg3_resume
11535 };
11536
11537 static int __init tg3_init(void)
11538 {
11539         return pci_module_init(&tg3_driver);
11540 }
11541
11542 static void __exit tg3_cleanup(void)
11543 {
11544         pci_unregister_driver(&tg3_driver);
11545 }
11546
11547 module_init(tg3_init);
11548 module_exit(tg3_cleanup);