]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Add 5786 PCI ID
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.59"
73 #define DRV_MODULE_RELDATE      "June 8, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266         { 0, }
267 };
268
269 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270
271 static struct {
272         const char string[ETH_GSTRING_LEN];
273 } ethtool_stats_keys[TG3_NUM_STATS] = {
274         { "rx_octets" },
275         { "rx_fragments" },
276         { "rx_ucast_packets" },
277         { "rx_mcast_packets" },
278         { "rx_bcast_packets" },
279         { "rx_fcs_errors" },
280         { "rx_align_errors" },
281         { "rx_xon_pause_rcvd" },
282         { "rx_xoff_pause_rcvd" },
283         { "rx_mac_ctrl_rcvd" },
284         { "rx_xoff_entered" },
285         { "rx_frame_too_long_errors" },
286         { "rx_jabbers" },
287         { "rx_undersize_packets" },
288         { "rx_in_length_errors" },
289         { "rx_out_length_errors" },
290         { "rx_64_or_less_octet_packets" },
291         { "rx_65_to_127_octet_packets" },
292         { "rx_128_to_255_octet_packets" },
293         { "rx_256_to_511_octet_packets" },
294         { "rx_512_to_1023_octet_packets" },
295         { "rx_1024_to_1522_octet_packets" },
296         { "rx_1523_to_2047_octet_packets" },
297         { "rx_2048_to_4095_octet_packets" },
298         { "rx_4096_to_8191_octet_packets" },
299         { "rx_8192_to_9022_octet_packets" },
300
301         { "tx_octets" },
302         { "tx_collisions" },
303
304         { "tx_xon_sent" },
305         { "tx_xoff_sent" },
306         { "tx_flow_control" },
307         { "tx_mac_errors" },
308         { "tx_single_collisions" },
309         { "tx_mult_collisions" },
310         { "tx_deferred" },
311         { "tx_excessive_collisions" },
312         { "tx_late_collisions" },
313         { "tx_collide_2times" },
314         { "tx_collide_3times" },
315         { "tx_collide_4times" },
316         { "tx_collide_5times" },
317         { "tx_collide_6times" },
318         { "tx_collide_7times" },
319         { "tx_collide_8times" },
320         { "tx_collide_9times" },
321         { "tx_collide_10times" },
322         { "tx_collide_11times" },
323         { "tx_collide_12times" },
324         { "tx_collide_13times" },
325         { "tx_collide_14times" },
326         { "tx_collide_15times" },
327         { "tx_ucast_packets" },
328         { "tx_mcast_packets" },
329         { "tx_bcast_packets" },
330         { "tx_carrier_sense_errors" },
331         { "tx_discards" },
332         { "tx_errors" },
333
334         { "dma_writeq_full" },
335         { "dma_write_prioq_full" },
336         { "rxbds_empty" },
337         { "rx_discards" },
338         { "rx_errors" },
339         { "rx_threshold_hit" },
340
341         { "dma_readq_full" },
342         { "dma_read_prioq_full" },
343         { "tx_comp_queue_full" },
344
345         { "ring_set_send_prod_index" },
346         { "ring_status_update" },
347         { "nic_irqs" },
348         { "nic_avoided_irqs" },
349         { "nic_tx_threshold_hit" }
350 };
351
352 static struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_test_keys[TG3_NUM_TEST] = {
355         { "nvram test     (online) " },
356         { "link test      (online) " },
357         { "register test  (offline)" },
358         { "memory test    (offline)" },
359         { "loopback test  (offline)" },
360         { "interrupt test (offline)" },
361 };
362
363 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364 {
365         writel(val, tp->regs + off);
366 }
367
368 static u32 tg3_read32(struct tg3 *tp, u32 off)
369 {
370         return (readl(tp->regs + off)); 
371 }
372
373 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374 {
375         unsigned long flags;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381 }
382
383 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384 {
385         writel(val, tp->regs + off);
386         readl(tp->regs + off);
387 }
388
389 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
390 {
391         unsigned long flags;
392         u32 val;
393
394         spin_lock_irqsave(&tp->indirect_lock, flags);
395         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397         spin_unlock_irqrestore(&tp->indirect_lock, flags);
398         return val;
399 }
400
401 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402 {
403         unsigned long flags;
404
405         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407                                        TG3_64BIT_REG_LOW, val);
408                 return;
409         }
410         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412                                        TG3_64BIT_REG_LOW, val);
413                 return;
414         }
415
416         spin_lock_irqsave(&tp->indirect_lock, flags);
417         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419         spin_unlock_irqrestore(&tp->indirect_lock, flags);
420
421         /* In indirect mode when disabling interrupts, we also need
422          * to clear the interrupt bit in the GRC local ctrl register.
423          */
424         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425             (val == 0x1)) {
426                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428         }
429 }
430
431 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432 {
433         unsigned long flags;
434         u32 val;
435
436         spin_lock_irqsave(&tp->indirect_lock, flags);
437         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439         spin_unlock_irqrestore(&tp->indirect_lock, flags);
440         return val;
441 }
442
443 /* usec_wait specifies the wait time in usec when writing to certain registers
444  * where it is unsafe to read back the register without some delay.
445  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447  */
448 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
449 {
450         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452                 /* Non-posted methods */
453                 tp->write32(tp, off, val);
454         else {
455                 /* Posted method */
456                 tg3_write32(tp, off, val);
457                 if (usec_wait)
458                         udelay(usec_wait);
459                 tp->read32(tp, off);
460         }
461         /* Wait again after the read for the posted method to guarantee that
462          * the wait time is met.
463          */
464         if (usec_wait)
465                 udelay(usec_wait);
466 }
467
468 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469 {
470         tp->write32_mbox(tp, off, val);
471         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473                 tp->read32_mbox(tp, off);
474 }
475
476 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
477 {
478         void __iomem *mbox = tp->regs + off;
479         writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481                 writel(val, mbox);
482         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483                 readl(mbox);
484 }
485
486 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
487 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
488 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
489 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
490 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
491
492 #define tw32(reg,val)           tp->write32(tp, reg, val)
493 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
494 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
495 #define tr32(reg)               tp->read32(tp, reg)
496
497 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498 {
499         unsigned long flags;
500
501         spin_lock_irqsave(&tp->indirect_lock, flags);
502         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505
506                 /* Always leave this as zero. */
507                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508         } else {
509                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         }
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 }
517
518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519 {
520         unsigned long flags;
521
522         spin_lock_irqsave(&tp->indirect_lock, flags);
523         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526
527                 /* Always leave this as zero. */
528                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529         } else {
530                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531                 *val = tr32(TG3PCI_MEM_WIN_DATA);
532
533                 /* Always leave this as zero. */
534                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535         }
536         spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 }
538
539 static void tg3_disable_ints(struct tg3 *tp)
540 {
541         tw32(TG3PCI_MISC_HOST_CTRL,
542              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
543         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
544 }
545
546 static inline void tg3_cond_int(struct tg3 *tp)
547 {
548         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549             (tp->hw_status->status & SD_STATUS_UPDATED))
550                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551 }
552
553 static void tg3_enable_ints(struct tg3 *tp)
554 {
555         tp->irq_sync = 0;
556         wmb();
557
558         tw32(TG3PCI_MISC_HOST_CTRL,
559              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
560         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                        (tp->last_tag << 24));
562         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564                                (tp->last_tag << 24));
565         tg3_cond_int(tp);
566 }
567
568 static inline unsigned int tg3_has_work(struct tg3 *tp)
569 {
570         struct tg3_hw_status *sblk = tp->hw_status;
571         unsigned int work_exists = 0;
572
573         /* check for phy events */
574         if (!(tp->tg3_flags &
575               (TG3_FLAG_USE_LINKCHG_REG |
576                TG3_FLAG_POLL_SERDES))) {
577                 if (sblk->status & SD_STATUS_LINK_CHG)
578                         work_exists = 1;
579         }
580         /* check for RX/TX work to do */
581         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583                 work_exists = 1;
584
585         return work_exists;
586 }
587
588 /* tg3_restart_ints
589  *  similar to tg3_enable_ints, but it accurately determines whether there
590  *  is new work pending and can return without flushing the PIO write
591  *  which reenables interrupts 
592  */
593 static void tg3_restart_ints(struct tg3 *tp)
594 {
595         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596                      tp->last_tag << 24);
597         mmiowb();
598
599         /* When doing tagged status, this work check is unnecessary.
600          * The last_tag we write above tells the chip which piece of
601          * work we've completed.
602          */
603         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604             tg3_has_work(tp))
605                 tw32(HOSTCC_MODE, tp->coalesce_mode |
606                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
607 }
608
609 static inline void tg3_netif_stop(struct tg3 *tp)
610 {
611         tp->dev->trans_start = jiffies; /* prevent tx timeout */
612         netif_poll_disable(tp->dev);
613         netif_tx_disable(tp->dev);
614 }
615
616 static inline void tg3_netif_start(struct tg3 *tp)
617 {
618         netif_wake_queue(tp->dev);
619         /* NOTE: unconditional netif_wake_queue is only appropriate
620          * so long as all callers are assured to have free tx slots
621          * (such as after tg3_init_hw)
622          */
623         netif_poll_enable(tp->dev);
624         tp->hw_status->status |= SD_STATUS_UPDATED;
625         tg3_enable_ints(tp);
626 }
627
628 static void tg3_switch_clocks(struct tg3 *tp)
629 {
630         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631         u32 orig_clock_ctrl;
632
633         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
634                 return;
635
636         orig_clock_ctrl = clock_ctrl;
637         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638                        CLOCK_CTRL_CLKRUN_OENABLE |
639                        0x1f);
640         tp->pci_clock_ctrl = clock_ctrl;
641
642         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
644                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
645                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
646                 }
647         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
648                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649                             clock_ctrl |
650                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651                             40);
652                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
654                             40);
655         }
656         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
657 }
658
659 #define PHY_BUSY_LOOPS  5000
660
661 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662 {
663         u32 frame_val;
664         unsigned int loops;
665         int ret;
666
667         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668                 tw32_f(MAC_MI_MODE,
669                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670                 udelay(80);
671         }
672
673         *val = 0x0;
674
675         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676                       MI_COM_PHY_ADDR_MASK);
677         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678                       MI_COM_REG_ADDR_MASK);
679         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680         
681         tw32_f(MAC_MI_COM, frame_val);
682
683         loops = PHY_BUSY_LOOPS;
684         while (loops != 0) {
685                 udelay(10);
686                 frame_val = tr32(MAC_MI_COM);
687
688                 if ((frame_val & MI_COM_BUSY) == 0) {
689                         udelay(5);
690                         frame_val = tr32(MAC_MI_COM);
691                         break;
692                 }
693                 loops -= 1;
694         }
695
696         ret = -EBUSY;
697         if (loops != 0) {
698                 *val = frame_val & MI_COM_DATA_MASK;
699                 ret = 0;
700         }
701
702         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703                 tw32_f(MAC_MI_MODE, tp->mi_mode);
704                 udelay(80);
705         }
706
707         return ret;
708 }
709
710 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711 {
712         u32 frame_val;
713         unsigned int loops;
714         int ret;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE,
718                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719                 udelay(80);
720         }
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (val & MI_COM_DATA_MASK);
727         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728         
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0)
745                 ret = 0;
746
747         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748                 tw32_f(MAC_MI_MODE, tp->mi_mode);
749                 udelay(80);
750         }
751
752         return ret;
753 }
754
755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
756 {
757         u32 val;
758
759         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760                 return;
761
762         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765                              (val | (1 << 15) | (1 << 4)));
766 }
767
768 static int tg3_bmcr_reset(struct tg3 *tp)
769 {
770         u32 phy_control;
771         int limit, err;
772
773         /* OK, reset it, and poll the BMCR_RESET bit until it
774          * clears or we time out.
775          */
776         phy_control = BMCR_RESET;
777         err = tg3_writephy(tp, MII_BMCR, phy_control);
778         if (err != 0)
779                 return -EBUSY;
780
781         limit = 5000;
782         while (limit--) {
783                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784                 if (err != 0)
785                         return -EBUSY;
786
787                 if ((phy_control & BMCR_RESET) == 0) {
788                         udelay(40);
789                         break;
790                 }
791                 udelay(10);
792         }
793         if (limit <= 0)
794                 return -EBUSY;
795
796         return 0;
797 }
798
799 static int tg3_wait_macro_done(struct tg3 *tp)
800 {
801         int limit = 100;
802
803         while (limit--) {
804                 u32 tmp32;
805
806                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807                         if ((tmp32 & 0x1000) == 0)
808                                 break;
809                 }
810         }
811         if (limit <= 0)
812                 return -EBUSY;
813
814         return 0;
815 }
816
817 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818 {
819         static const u32 test_pat[4][6] = {
820         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824         };
825         int chan;
826
827         for (chan = 0; chan < 4; chan++) {
828                 int i;
829
830                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831                              (chan * 0x2000) | 0x0200);
832                 tg3_writephy(tp, 0x16, 0x0002);
833
834                 for (i = 0; i < 6; i++)
835                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836                                      test_pat[chan][i]);
837
838                 tg3_writephy(tp, 0x16, 0x0202);
839                 if (tg3_wait_macro_done(tp)) {
840                         *resetp = 1;
841                         return -EBUSY;
842                 }
843
844                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845                              (chan * 0x2000) | 0x0200);
846                 tg3_writephy(tp, 0x16, 0x0082);
847                 if (tg3_wait_macro_done(tp)) {
848                         *resetp = 1;
849                         return -EBUSY;
850                 }
851
852                 tg3_writephy(tp, 0x16, 0x0802);
853                 if (tg3_wait_macro_done(tp)) {
854                         *resetp = 1;
855                         return -EBUSY;
856                 }
857
858                 for (i = 0; i < 6; i += 2) {
859                         u32 low, high;
860
861                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863                             tg3_wait_macro_done(tp)) {
864                                 *resetp = 1;
865                                 return -EBUSY;
866                         }
867                         low &= 0x7fff;
868                         high &= 0x000f;
869                         if (low != test_pat[chan][i] ||
870                             high != test_pat[chan][i+1]) {
871                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874
875                                 return -EBUSY;
876                         }
877                 }
878         }
879
880         return 0;
881 }
882
883 static int tg3_phy_reset_chanpat(struct tg3 *tp)
884 {
885         int chan;
886
887         for (chan = 0; chan < 4; chan++) {
888                 int i;
889
890                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891                              (chan * 0x2000) | 0x0200);
892                 tg3_writephy(tp, 0x16, 0x0002);
893                 for (i = 0; i < 6; i++)
894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895                 tg3_writephy(tp, 0x16, 0x0202);
896                 if (tg3_wait_macro_done(tp))
897                         return -EBUSY;
898         }
899
900         return 0;
901 }
902
903 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904 {
905         u32 reg32, phy9_orig;
906         int retries, do_phy_reset, err;
907
908         retries = 10;
909         do_phy_reset = 1;
910         do {
911                 if (do_phy_reset) {
912                         err = tg3_bmcr_reset(tp);
913                         if (err)
914                                 return err;
915                         do_phy_reset = 0;
916                 }
917
918                 /* Disable transmitter and interrupt.  */
919                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920                         continue;
921
922                 reg32 |= 0x3000;
923                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924
925                 /* Set full-duplex, 1000 mbps.  */
926                 tg3_writephy(tp, MII_BMCR,
927                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928
929                 /* Set to master mode.  */
930                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931                         continue;
932
933                 tg3_writephy(tp, MII_TG3_CTRL,
934                              (MII_TG3_CTRL_AS_MASTER |
935                               MII_TG3_CTRL_ENABLE_AS_MASTER));
936
937                 /* Enable SM_DSP_CLOCK and 6dB.  */
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939
940                 /* Block the PHY control access.  */
941                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943
944                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945                 if (!err)
946                         break;
947         } while (--retries);
948
949         err = tg3_phy_reset_chanpat(tp);
950         if (err)
951                 return err;
952
953         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955
956         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957         tg3_writephy(tp, 0x16, 0x0000);
958
959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961                 /* Set Extended packet length bit for jumbo frames */
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963         }
964         else {
965                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966         }
967
968         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969
970         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971                 reg32 &= ~0x3000;
972                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973         } else if (!err)
974                 err = -EBUSY;
975
976         return err;
977 }
978
979 static void tg3_link_report(struct tg3 *);
980
981 /* This will reset the tigon3 PHY if there is no valid
982  * link unless the FORCE argument is non-zero.
983  */
984 static int tg3_phy_reset(struct tg3 *tp)
985 {
986         u32 phy_status;
987         int err;
988
989         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
990         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991         if (err != 0)
992                 return -EBUSY;
993
994         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995                 netif_carrier_off(tp->dev);
996                 tg3_link_report(tp);
997         }
998
999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002                 err = tg3_phy_reset_5703_4_5(tp);
1003                 if (err)
1004                         return err;
1005                 goto out;
1006         }
1007
1008         err = tg3_bmcr_reset(tp);
1009         if (err)
1010                 return err;
1011
1012 out:
1013         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020         }
1021         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022                 tg3_writephy(tp, 0x1c, 0x8d68);
1023                 tg3_writephy(tp, 0x1c, 0x8d68);
1024         }
1025         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034         }
1035         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040         }
1041         /* Set Extended packet length bit (bit 14) on all chips that */
1042         /* support jumbo frames */
1043         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044                 /* Cannot do read-modify-write on 5401 */
1045                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1046         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1047                 u32 phy_reg;
1048
1049                 /* Set bit 14 with read-modify-write to preserve other bits */
1050                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053         }
1054
1055         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056          * jumbo frames transmission.
1057          */
1058         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1059                 u32 phy_reg;
1060
1061                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064         }
1065
1066         tg3_phy_set_wirespeed(tp);
1067         return 0;
1068 }
1069
1070 static void tg3_frob_aux_power(struct tg3 *tp)
1071 {
1072         struct tg3 *tp_peer = tp;
1073
1074         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075                 return;
1076
1077         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079                 struct net_device *dev_peer;
1080
1081                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1082                 /* remove_one() may have been run on the peer. */
1083                 if (!dev_peer)
1084                         tp_peer = tp;
1085                 else
1086                         tp_peer = netdev_priv(dev_peer);
1087         }
1088
1089         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1090             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1093                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1095                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096                                     (GRC_LCLCTRL_GPIO_OE0 |
1097                                      GRC_LCLCTRL_GPIO_OE1 |
1098                                      GRC_LCLCTRL_GPIO_OE2 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1100                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1101                                     100);
1102                 } else {
1103                         u32 no_gpio2;
1104                         u32 grc_local_ctrl = 0;
1105
1106                         if (tp_peer != tp &&
1107                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108                                 return;
1109
1110                         /* Workaround to prevent overdrawing Amps. */
1111                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112                             ASIC_REV_5714) {
1113                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1114                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115                                             grc_local_ctrl, 100);
1116                         }
1117
1118                         /* On 5753 and variants, GPIO2 cannot be used. */
1119                         no_gpio2 = tp->nic_sram_data_cfg &
1120                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1121
1122                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1123                                          GRC_LCLCTRL_GPIO_OE1 |
1124                                          GRC_LCLCTRL_GPIO_OE2 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1126                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1127                         if (no_gpio2) {
1128                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1130                         }
1131                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132                                                     grc_local_ctrl, 100);
1133
1134                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135
1136                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137                                                     grc_local_ctrl, 100);
1138
1139                         if (!no_gpio2) {
1140                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1141                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                             grc_local_ctrl, 100);
1143                         }
1144                 }
1145         } else {
1146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148                         if (tp_peer != tp &&
1149                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150                                 return;
1151
1152                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153                                     (GRC_LCLCTRL_GPIO_OE1 |
1154                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1155
1156                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157                                     GRC_LCLCTRL_GPIO_OE1, 100);
1158
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                     (GRC_LCLCTRL_GPIO_OE1 |
1161                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1162                 }
1163         }
1164 }
1165
1166 static int tg3_setup_phy(struct tg3 *, int);
1167
1168 #define RESET_KIND_SHUTDOWN     0
1169 #define RESET_KIND_INIT         1
1170 #define RESET_KIND_SUSPEND      2
1171
1172 static void tg3_write_sig_post_reset(struct tg3 *, int);
1173 static int tg3_halt_cpu(struct tg3 *, u32);
1174 static int tg3_nvram_lock(struct tg3 *);
1175 static void tg3_nvram_unlock(struct tg3 *);
1176
1177 static void tg3_power_down_phy(struct tg3 *tp)
1178 {
1179         /* The PHY should not be powered down on some chips because
1180          * of bugs.
1181          */
1182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186                 return;
1187         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188 }
1189
1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1191 {
1192         u32 misc_host_ctrl;
1193         u16 power_control, power_caps;
1194         int pm = tp->pm_cap;
1195
1196         /* Make sure register accesses (indirect or otherwise)
1197          * will function correctly.
1198          */
1199         pci_write_config_dword(tp->pdev,
1200                                TG3PCI_MISC_HOST_CTRL,
1201                                tp->misc_host_ctrl);
1202
1203         pci_read_config_word(tp->pdev,
1204                              pm + PCI_PM_CTRL,
1205                              &power_control);
1206         power_control |= PCI_PM_CTRL_PME_STATUS;
1207         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208         switch (state) {
1209         case PCI_D0:
1210                 power_control |= 0;
1211                 pci_write_config_word(tp->pdev,
1212                                       pm + PCI_PM_CTRL,
1213                                       power_control);
1214                 udelay(100);    /* Delay after power state change */
1215
1216                 /* Switch out of Vaux if it is not a LOM */
1217                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1219
1220                 return 0;
1221
1222         case PCI_D1:
1223                 power_control |= 1;
1224                 break;
1225
1226         case PCI_D2:
1227                 power_control |= 2;
1228                 break;
1229
1230         case PCI_D3hot:
1231                 power_control |= 3;
1232                 break;
1233
1234         default:
1235                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236                        "requested.\n",
1237                        tp->dev->name, state);
1238                 return -EINVAL;
1239         };
1240
1241         power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244         tw32(TG3PCI_MISC_HOST_CTRL,
1245              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247         if (tp->link_config.phy_is_low_power == 0) {
1248                 tp->link_config.phy_is_low_power = 1;
1249                 tp->link_config.orig_speed = tp->link_config.speed;
1250                 tp->link_config.orig_duplex = tp->link_config.duplex;
1251                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252         }
1253
1254         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1255                 tp->link_config.speed = SPEED_10;
1256                 tp->link_config.duplex = DUPLEX_HALF;
1257                 tp->link_config.autoneg = AUTONEG_ENABLE;
1258                 tg3_setup_phy(tp, 0);
1259         }
1260
1261         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262                 int i;
1263                 u32 val;
1264
1265                 for (i = 0; i < 200; i++) {
1266                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268                                 break;
1269                         msleep(1);
1270                 }
1271         }
1272         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273                                              WOL_DRV_STATE_SHUTDOWN |
1274                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275
1276         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277
1278         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279                 u32 mac_mode;
1280
1281                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283                         udelay(40);
1284
1285                         mac_mode = MAC_MODE_PORT_MODE_MII;
1286
1287                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1290                 } else {
1291                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1292                 }
1293
1294                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1295                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1296
1297                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300
1301                 tw32_f(MAC_MODE, mac_mode);
1302                 udelay(100);
1303
1304                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305                 udelay(10);
1306         }
1307
1308         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311                 u32 base_val;
1312
1313                 base_val = tp->pci_clock_ctrl;
1314                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315                              CLOCK_CTRL_TXCLK_DISABLE);
1316
1317                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1319         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1320                 /* do nothing */
1321         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1322                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323                 u32 newbits1, newbits2;
1324
1325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328                                     CLOCK_CTRL_TXCLK_DISABLE |
1329                                     CLOCK_CTRL_ALTCLK);
1330                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332                         newbits1 = CLOCK_CTRL_625_CORE;
1333                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334                 } else {
1335                         newbits1 = CLOCK_CTRL_ALTCLK;
1336                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337                 }
1338
1339                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340                             40);
1341
1342                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343                             40);
1344
1345                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346                         u32 newbits3;
1347
1348                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351                                             CLOCK_CTRL_TXCLK_DISABLE |
1352                                             CLOCK_CTRL_44MHZ_CORE);
1353                         } else {
1354                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355                         }
1356
1357                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358                                     tp->pci_clock_ctrl | newbits3, 40);
1359                 }
1360         }
1361
1362         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364                 /* Turn off the PHY */
1365                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1369                         tg3_power_down_phy(tp);
1370                 }
1371         }
1372
1373         tg3_frob_aux_power(tp);
1374
1375         /* Workaround for unstable PLL clock */
1376         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378                 u32 val = tr32(0x7d00);
1379
1380                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381                 tw32(0x7d00, val);
1382                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1383                         int err;
1384
1385                         err = tg3_nvram_lock(tp);
1386                         tg3_halt_cpu(tp, RX_CPU_BASE);
1387                         if (!err)
1388                                 tg3_nvram_unlock(tp);
1389                 }
1390         }
1391
1392         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
1394         /* Finally, set the new power state. */
1395         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1396         udelay(100);    /* Delay after power state change */
1397
1398         return 0;
1399 }
1400
1401 static void tg3_link_report(struct tg3 *tp)
1402 {
1403         if (!netif_carrier_ok(tp->dev)) {
1404                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405         } else {
1406                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407                        tp->dev->name,
1408                        (tp->link_config.active_speed == SPEED_1000 ?
1409                         1000 :
1410                         (tp->link_config.active_speed == SPEED_100 ?
1411                          100 : 10)),
1412                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1413                         "full" : "half"));
1414
1415                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416                        "%s for RX.\n",
1417                        tp->dev->name,
1418                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420         }
1421 }
1422
1423 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424 {
1425         u32 new_tg3_flags = 0;
1426         u32 old_rx_mode = tp->rx_mode;
1427         u32 old_tx_mode = tp->tx_mode;
1428
1429         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1430
1431                 /* Convert 1000BaseX flow control bits to 1000BaseT
1432                  * bits before resolving flow control.
1433                  */
1434                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436                                        ADVERTISE_PAUSE_ASYM);
1437                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439                         if (local_adv & ADVERTISE_1000XPAUSE)
1440                                 local_adv |= ADVERTISE_PAUSE_CAP;
1441                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1443                         if (remote_adv & LPA_1000XPAUSE)
1444                                 remote_adv |= LPA_PAUSE_CAP;
1445                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1446                                 remote_adv |= LPA_PAUSE_ASYM;
1447                 }
1448
1449                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451                                 if (remote_adv & LPA_PAUSE_CAP)
1452                                         new_tg3_flags |=
1453                                                 (TG3_FLAG_RX_PAUSE |
1454                                                 TG3_FLAG_TX_PAUSE);
1455                                 else if (remote_adv & LPA_PAUSE_ASYM)
1456                                         new_tg3_flags |=
1457                                                 (TG3_FLAG_RX_PAUSE);
1458                         } else {
1459                                 if (remote_adv & LPA_PAUSE_CAP)
1460                                         new_tg3_flags |=
1461                                                 (TG3_FLAG_RX_PAUSE |
1462                                                 TG3_FLAG_TX_PAUSE);
1463                         }
1464                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465                         if ((remote_adv & LPA_PAUSE_CAP) &&
1466                         (remote_adv & LPA_PAUSE_ASYM))
1467                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468                 }
1469
1470                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471                 tp->tg3_flags |= new_tg3_flags;
1472         } else {
1473                 new_tg3_flags = tp->tg3_flags;
1474         }
1475
1476         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_rx_mode != tp->rx_mode) {
1482                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483         }
1484         
1485         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487         else
1488                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490         if (old_tx_mode != tp->tx_mode) {
1491                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492         }
1493 }
1494
1495 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496 {
1497         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498         case MII_TG3_AUX_STAT_10HALF:
1499                 *speed = SPEED_10;
1500                 *duplex = DUPLEX_HALF;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_10FULL:
1504                 *speed = SPEED_10;
1505                 *duplex = DUPLEX_FULL;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_100HALF:
1509                 *speed = SPEED_100;
1510                 *duplex = DUPLEX_HALF;
1511                 break;
1512
1513         case MII_TG3_AUX_STAT_100FULL:
1514                 *speed = SPEED_100;
1515                 *duplex = DUPLEX_FULL;
1516                 break;
1517
1518         case MII_TG3_AUX_STAT_1000HALF:
1519                 *speed = SPEED_1000;
1520                 *duplex = DUPLEX_HALF;
1521                 break;
1522
1523         case MII_TG3_AUX_STAT_1000FULL:
1524                 *speed = SPEED_1000;
1525                 *duplex = DUPLEX_FULL;
1526                 break;
1527
1528         default:
1529                 *speed = SPEED_INVALID;
1530                 *duplex = DUPLEX_INVALID;
1531                 break;
1532         };
1533 }
1534
1535 static void tg3_phy_copper_begin(struct tg3 *tp)
1536 {
1537         u32 new_adv;
1538         int i;
1539
1540         if (tp->link_config.phy_is_low_power) {
1541                 /* Entering low power mode.  Disable gigabit and
1542                  * 100baseT advertisements.
1543                  */
1544                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545
1546                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550
1551                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552         } else if (tp->link_config.speed == SPEED_INVALID) {
1553                 tp->link_config.advertising =
1554                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557                          ADVERTISED_Autoneg | ADVERTISED_MII);
1558
1559                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560                         tp->link_config.advertising &=
1561                                 ~(ADVERTISED_1000baseT_Half |
1562                                   ADVERTISED_1000baseT_Full);
1563
1564                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566                         new_adv |= ADVERTISE_10HALF;
1567                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568                         new_adv |= ADVERTISE_10FULL;
1569                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570                         new_adv |= ADVERTISE_100HALF;
1571                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572                         new_adv |= ADVERTISE_100FULL;
1573                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575                 if (tp->link_config.advertising &
1576                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577                         new_adv = 0;
1578                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1587                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588                 } else {
1589                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1590                 }
1591         } else {
1592                 /* Asking for a specific link mode. */
1593                 if (tp->link_config.speed == SPEED_1000) {
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596
1597                         if (tp->link_config.duplex == DUPLEX_FULL)
1598                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599                         else
1600                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1605                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606                 } else {
1607                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1608
1609                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610                         if (tp->link_config.speed == SPEED_100) {
1611                                 if (tp->link_config.duplex == DUPLEX_FULL)
1612                                         new_adv |= ADVERTISE_100FULL;
1613                                 else
1614                                         new_adv |= ADVERTISE_100HALF;
1615                         } else {
1616                                 if (tp->link_config.duplex == DUPLEX_FULL)
1617                                         new_adv |= ADVERTISE_10FULL;
1618                                 else
1619                                         new_adv |= ADVERTISE_10HALF;
1620                         }
1621                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622                 }
1623         }
1624
1625         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626             tp->link_config.speed != SPEED_INVALID) {
1627                 u32 bmcr, orig_bmcr;
1628
1629                 tp->link_config.active_speed = tp->link_config.speed;
1630                 tp->link_config.active_duplex = tp->link_config.duplex;
1631
1632                 bmcr = 0;
1633                 switch (tp->link_config.speed) {
1634                 default:
1635                 case SPEED_10:
1636                         break;
1637
1638                 case SPEED_100:
1639                         bmcr |= BMCR_SPEED100;
1640                         break;
1641
1642                 case SPEED_1000:
1643                         bmcr |= TG3_BMCR_SPEED1000;
1644                         break;
1645                 };
1646
1647                 if (tp->link_config.duplex == DUPLEX_FULL)
1648                         bmcr |= BMCR_FULLDPLX;
1649
1650                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651                     (bmcr != orig_bmcr)) {
1652                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653                         for (i = 0; i < 1500; i++) {
1654                                 u32 tmp;
1655
1656                                 udelay(10);
1657                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658                                     tg3_readphy(tp, MII_BMSR, &tmp))
1659                                         continue;
1660                                 if (!(tmp & BMSR_LSTATUS)) {
1661                                         udelay(40);
1662                                         break;
1663                                 }
1664                         }
1665                         tg3_writephy(tp, MII_BMCR, bmcr);
1666                         udelay(40);
1667                 }
1668         } else {
1669                 tg3_writephy(tp, MII_BMCR,
1670                              BMCR_ANENABLE | BMCR_ANRESTART);
1671         }
1672 }
1673
1674 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675 {
1676         int err;
1677
1678         /* Turn off tap power management. */
1679         /* Set Extended packet length bit */
1680         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681
1682         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684
1685         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687
1688         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690
1691         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693
1694         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696
1697         udelay(40);
1698
1699         return err;
1700 }
1701
1702 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703 {
1704         u32 adv_reg, all_mask;
1705
1706         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707                 return 0;
1708
1709         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1711         if ((adv_reg & all_mask) != all_mask)
1712                 return 0;
1713         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714                 u32 tg3_ctrl;
1715
1716                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717                         return 0;
1718
1719                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720                             MII_TG3_CTRL_ADV_1000_FULL);
1721                 if ((tg3_ctrl & all_mask) != all_mask)
1722                         return 0;
1723         }
1724         return 1;
1725 }
1726
1727 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728 {
1729         int current_link_up;
1730         u32 bmsr, dummy;
1731         u16 current_speed;
1732         u8 current_duplex;
1733         int i, err;
1734
1735         tw32(MAC_EVENT, 0);
1736
1737         tw32_f(MAC_STATUS,
1738              (MAC_STATUS_SYNC_CHANGED |
1739               MAC_STATUS_CFG_CHANGED |
1740               MAC_STATUS_MI_COMPLETION |
1741               MAC_STATUS_LNKSTATE_CHANGED));
1742         udelay(40);
1743
1744         tp->mi_mode = MAC_MI_MODE_BASE;
1745         tw32_f(MAC_MI_MODE, tp->mi_mode);
1746         udelay(80);
1747
1748         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749
1750         /* Some third-party PHYs need to be reset on link going
1751          * down.
1752          */
1753         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756             netif_carrier_ok(tp->dev)) {
1757                 tg3_readphy(tp, MII_BMSR, &bmsr);
1758                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759                     !(bmsr & BMSR_LSTATUS))
1760                         force_reset = 1;
1761         }
1762         if (force_reset)
1763                 tg3_phy_reset(tp);
1764
1765         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766                 tg3_readphy(tp, MII_BMSR, &bmsr);
1767                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769                         bmsr = 0;
1770
1771                 if (!(bmsr & BMSR_LSTATUS)) {
1772                         err = tg3_init_5401phy_dsp(tp);
1773                         if (err)
1774                                 return err;
1775
1776                         tg3_readphy(tp, MII_BMSR, &bmsr);
1777                         for (i = 0; i < 1000; i++) {
1778                                 udelay(10);
1779                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780                                     (bmsr & BMSR_LSTATUS)) {
1781                                         udelay(40);
1782                                         break;
1783                                 }
1784                         }
1785
1786                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787                             !(bmsr & BMSR_LSTATUS) &&
1788                             tp->link_config.active_speed == SPEED_1000) {
1789                                 err = tg3_phy_reset(tp);
1790                                 if (!err)
1791                                         err = tg3_init_5401phy_dsp(tp);
1792                                 if (err)
1793                                         return err;
1794                         }
1795                 }
1796         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798                 /* 5701 {A0,B0} CRC bug workaround */
1799                 tg3_writephy(tp, 0x15, 0x0a75);
1800                 tg3_writephy(tp, 0x1c, 0x8c68);
1801                 tg3_writephy(tp, 0x1c, 0x8d68);
1802                 tg3_writephy(tp, 0x1c, 0x8c68);
1803         }
1804
1805         /* Clear pending interrupts... */
1806         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808
1809         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811         else
1812                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813
1814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819                 else
1820                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821         }
1822
1823         current_link_up = 0;
1824         current_speed = SPEED_INVALID;
1825         current_duplex = DUPLEX_INVALID;
1826
1827         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828                 u32 val;
1829
1830                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832                 if (!(val & (1 << 10))) {
1833                         val |= (1 << 10);
1834                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835                         goto relink;
1836                 }
1837         }
1838
1839         bmsr = 0;
1840         for (i = 0; i < 100; i++) {
1841                 tg3_readphy(tp, MII_BMSR, &bmsr);
1842                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843                     (bmsr & BMSR_LSTATUS))
1844                         break;
1845                 udelay(40);
1846         }
1847
1848         if (bmsr & BMSR_LSTATUS) {
1849                 u32 aux_stat, bmcr;
1850
1851                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852                 for (i = 0; i < 2000; i++) {
1853                         udelay(10);
1854                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855                             aux_stat)
1856                                 break;
1857                 }
1858
1859                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860                                              &current_speed,
1861                                              &current_duplex);
1862
1863                 bmcr = 0;
1864                 for (i = 0; i < 200; i++) {
1865                         tg3_readphy(tp, MII_BMCR, &bmcr);
1866                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867                                 continue;
1868                         if (bmcr && bmcr != 0x7fff)
1869                                 break;
1870                         udelay(10);
1871                 }
1872
1873                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874                         if (bmcr & BMCR_ANENABLE) {
1875                                 current_link_up = 1;
1876
1877                                 /* Force autoneg restart if we are exiting
1878                                  * low power mode.
1879                                  */
1880                                 if (!tg3_copper_is_advertising_all(tp))
1881                                         current_link_up = 0;
1882                         } else {
1883                                 current_link_up = 0;
1884                         }
1885                 } else {
1886                         if (!(bmcr & BMCR_ANENABLE) &&
1887                             tp->link_config.speed == current_speed &&
1888                             tp->link_config.duplex == current_duplex) {
1889                                 current_link_up = 1;
1890                         } else {
1891                                 current_link_up = 0;
1892                         }
1893                 }
1894
1895                 tp->link_config.active_speed = current_speed;
1896                 tp->link_config.active_duplex = current_duplex;
1897         }
1898
1899         if (current_link_up == 1 &&
1900             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902                 u32 local_adv, remote_adv;
1903
1904                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905                         local_adv = 0;
1906                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907
1908                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909                         remote_adv = 0;
1910
1911                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912
1913                 /* If we are not advertising full pause capability,
1914                  * something is wrong.  Bring the link down and reconfigure.
1915                  */
1916                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917                         current_link_up = 0;
1918                 } else {
1919                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1920                 }
1921         }
1922 relink:
1923         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1924                 u32 tmp;
1925
1926                 tg3_phy_copper_begin(tp);
1927
1928                 tg3_readphy(tp, MII_BMSR, &tmp);
1929                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930                     (tmp & BMSR_LSTATUS))
1931                         current_link_up = 1;
1932         }
1933
1934         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935         if (current_link_up == 1) {
1936                 if (tp->link_config.active_speed == SPEED_100 ||
1937                     tp->link_config.active_speed == SPEED_10)
1938                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939                 else
1940                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941         } else
1942                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943
1944         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945         if (tp->link_config.active_duplex == DUPLEX_HALF)
1946                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947
1948         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951                     (current_link_up == 1 &&
1952                      tp->link_config.active_speed == SPEED_10))
1953                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954         } else {
1955                 if (current_link_up == 1)
1956                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957         }
1958
1959         /* ??? Without this setting Netgear GA302T PHY does not
1960          * ??? send/receive packets...
1961          */
1962         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966                 udelay(80);
1967         }
1968
1969         tw32_f(MAC_MODE, tp->mac_mode);
1970         udelay(40);
1971
1972         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973                 /* Polled via timer. */
1974                 tw32_f(MAC_EVENT, 0);
1975         } else {
1976                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977         }
1978         udelay(40);
1979
1980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981             current_link_up == 1 &&
1982             tp->link_config.active_speed == SPEED_1000 &&
1983             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985                 udelay(120);
1986                 tw32_f(MAC_STATUS,
1987                      (MAC_STATUS_SYNC_CHANGED |
1988                       MAC_STATUS_CFG_CHANGED));
1989                 udelay(40);
1990                 tg3_write_mem(tp,
1991                               NIC_SRAM_FIRMWARE_MBOX,
1992                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993         }
1994
1995         if (current_link_up != netif_carrier_ok(tp->dev)) {
1996                 if (current_link_up)
1997                         netif_carrier_on(tp->dev);
1998                 else
1999                         netif_carrier_off(tp->dev);
2000                 tg3_link_report(tp);
2001         }
2002
2003         return 0;
2004 }
2005
2006 struct tg3_fiber_aneginfo {
2007         int state;
2008 #define ANEG_STATE_UNKNOWN              0
2009 #define ANEG_STATE_AN_ENABLE            1
2010 #define ANEG_STATE_RESTART_INIT         2
2011 #define ANEG_STATE_RESTART              3
2012 #define ANEG_STATE_DISABLE_LINK_OK      4
2013 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2014 #define ANEG_STATE_ABILITY_DETECT       6
2015 #define ANEG_STATE_ACK_DETECT_INIT      7
2016 #define ANEG_STATE_ACK_DETECT           8
2017 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2018 #define ANEG_STATE_COMPLETE_ACK         10
2019 #define ANEG_STATE_IDLE_DETECT_INIT     11
2020 #define ANEG_STATE_IDLE_DETECT          12
2021 #define ANEG_STATE_LINK_OK              13
2022 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2023 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2024
2025         u32 flags;
2026 #define MR_AN_ENABLE            0x00000001
2027 #define MR_RESTART_AN           0x00000002
2028 #define MR_AN_COMPLETE          0x00000004
2029 #define MR_PAGE_RX              0x00000008
2030 #define MR_NP_LOADED            0x00000010
2031 #define MR_TOGGLE_TX            0x00000020
2032 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2033 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2034 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2035 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2036 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2039 #define MR_TOGGLE_RX            0x00002000
2040 #define MR_NP_RX                0x00004000
2041
2042 #define MR_LINK_OK              0x80000000
2043
2044         unsigned long link_time, cur_time;
2045
2046         u32 ability_match_cfg;
2047         int ability_match_count;
2048
2049         char ability_match, idle_match, ack_match;
2050
2051         u32 txconfig, rxconfig;
2052 #define ANEG_CFG_NP             0x00000080
2053 #define ANEG_CFG_ACK            0x00000040
2054 #define ANEG_CFG_RF2            0x00000020
2055 #define ANEG_CFG_RF1            0x00000010
2056 #define ANEG_CFG_PS2            0x00000001
2057 #define ANEG_CFG_PS1            0x00008000
2058 #define ANEG_CFG_HD             0x00004000
2059 #define ANEG_CFG_FD             0x00002000
2060 #define ANEG_CFG_INVAL          0x00001f06
2061
2062 };
2063 #define ANEG_OK         0
2064 #define ANEG_DONE       1
2065 #define ANEG_TIMER_ENAB 2
2066 #define ANEG_FAILED     -1
2067
2068 #define ANEG_STATE_SETTLE_TIME  10000
2069
2070 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071                                    struct tg3_fiber_aneginfo *ap)
2072 {
2073         unsigned long delta;
2074         u32 rx_cfg_reg;
2075         int ret;
2076
2077         if (ap->state == ANEG_STATE_UNKNOWN) {
2078                 ap->rxconfig = 0;
2079                 ap->link_time = 0;
2080                 ap->cur_time = 0;
2081                 ap->ability_match_cfg = 0;
2082                 ap->ability_match_count = 0;
2083                 ap->ability_match = 0;
2084                 ap->idle_match = 0;
2085                 ap->ack_match = 0;
2086         }
2087         ap->cur_time++;
2088
2089         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091
2092                 if (rx_cfg_reg != ap->ability_match_cfg) {
2093                         ap->ability_match_cfg = rx_cfg_reg;
2094                         ap->ability_match = 0;
2095                         ap->ability_match_count = 0;
2096                 } else {
2097                         if (++ap->ability_match_count > 1) {
2098                                 ap->ability_match = 1;
2099                                 ap->ability_match_cfg = rx_cfg_reg;
2100                         }
2101                 }
2102                 if (rx_cfg_reg & ANEG_CFG_ACK)
2103                         ap->ack_match = 1;
2104                 else
2105                         ap->ack_match = 0;
2106
2107                 ap->idle_match = 0;
2108         } else {
2109                 ap->idle_match = 1;
2110                 ap->ability_match_cfg = 0;
2111                 ap->ability_match_count = 0;
2112                 ap->ability_match = 0;
2113                 ap->ack_match = 0;
2114
2115                 rx_cfg_reg = 0;
2116         }
2117
2118         ap->rxconfig = rx_cfg_reg;
2119         ret = ANEG_OK;
2120
2121         switch(ap->state) {
2122         case ANEG_STATE_UNKNOWN:
2123                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124                         ap->state = ANEG_STATE_AN_ENABLE;
2125
2126                 /* fallthru */
2127         case ANEG_STATE_AN_ENABLE:
2128                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129                 if (ap->flags & MR_AN_ENABLE) {
2130                         ap->link_time = 0;
2131                         ap->cur_time = 0;
2132                         ap->ability_match_cfg = 0;
2133                         ap->ability_match_count = 0;
2134                         ap->ability_match = 0;
2135                         ap->idle_match = 0;
2136                         ap->ack_match = 0;
2137
2138                         ap->state = ANEG_STATE_RESTART_INIT;
2139                 } else {
2140                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141                 }
2142                 break;
2143
2144         case ANEG_STATE_RESTART_INIT:
2145                 ap->link_time = ap->cur_time;
2146                 ap->flags &= ~(MR_NP_LOADED);
2147                 ap->txconfig = 0;
2148                 tw32(MAC_TX_AUTO_NEG, 0);
2149                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150                 tw32_f(MAC_MODE, tp->mac_mode);
2151                 udelay(40);
2152
2153                 ret = ANEG_TIMER_ENAB;
2154                 ap->state = ANEG_STATE_RESTART;
2155
2156                 /* fallthru */
2157         case ANEG_STATE_RESTART:
2158                 delta = ap->cur_time - ap->link_time;
2159                 if (delta > ANEG_STATE_SETTLE_TIME) {
2160                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161                 } else {
2162                         ret = ANEG_TIMER_ENAB;
2163                 }
2164                 break;
2165
2166         case ANEG_STATE_DISABLE_LINK_OK:
2167                 ret = ANEG_DONE;
2168                 break;
2169
2170         case ANEG_STATE_ABILITY_DETECT_INIT:
2171                 ap->flags &= ~(MR_TOGGLE_TX);
2172                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175                 tw32_f(MAC_MODE, tp->mac_mode);
2176                 udelay(40);
2177
2178                 ap->state = ANEG_STATE_ABILITY_DETECT;
2179                 break;
2180
2181         case ANEG_STATE_ABILITY_DETECT:
2182                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184                 }
2185                 break;
2186
2187         case ANEG_STATE_ACK_DETECT_INIT:
2188                 ap->txconfig |= ANEG_CFG_ACK;
2189                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191                 tw32_f(MAC_MODE, tp->mac_mode);
2192                 udelay(40);
2193
2194                 ap->state = ANEG_STATE_ACK_DETECT;
2195
2196                 /* fallthru */
2197         case ANEG_STATE_ACK_DETECT:
2198                 if (ap->ack_match != 0) {
2199                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202                         } else {
2203                                 ap->state = ANEG_STATE_AN_ENABLE;
2204                         }
2205                 } else if (ap->ability_match != 0 &&
2206                            ap->rxconfig == 0) {
2207                         ap->state = ANEG_STATE_AN_ENABLE;
2208                 }
2209                 break;
2210
2211         case ANEG_STATE_COMPLETE_ACK_INIT:
2212                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213                         ret = ANEG_FAILED;
2214                         break;
2215                 }
2216                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217                                MR_LP_ADV_HALF_DUPLEX |
2218                                MR_LP_ADV_SYM_PAUSE |
2219                                MR_LP_ADV_ASYM_PAUSE |
2220                                MR_LP_ADV_REMOTE_FAULT1 |
2221                                MR_LP_ADV_REMOTE_FAULT2 |
2222                                MR_LP_ADV_NEXT_PAGE |
2223                                MR_TOGGLE_RX |
2224                                MR_NP_RX);
2225                 if (ap->rxconfig & ANEG_CFG_FD)
2226                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227                 if (ap->rxconfig & ANEG_CFG_HD)
2228                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229                 if (ap->rxconfig & ANEG_CFG_PS1)
2230                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231                 if (ap->rxconfig & ANEG_CFG_PS2)
2232                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233                 if (ap->rxconfig & ANEG_CFG_RF1)
2234                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235                 if (ap->rxconfig & ANEG_CFG_RF2)
2236                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237                 if (ap->rxconfig & ANEG_CFG_NP)
2238                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239
2240                 ap->link_time = ap->cur_time;
2241
2242                 ap->flags ^= (MR_TOGGLE_TX);
2243                 if (ap->rxconfig & 0x0008)
2244                         ap->flags |= MR_TOGGLE_RX;
2245                 if (ap->rxconfig & ANEG_CFG_NP)
2246                         ap->flags |= MR_NP_RX;
2247                 ap->flags |= MR_PAGE_RX;
2248
2249                 ap->state = ANEG_STATE_COMPLETE_ACK;
2250                 ret = ANEG_TIMER_ENAB;
2251                 break;
2252
2253         case ANEG_STATE_COMPLETE_ACK:
2254                 if (ap->ability_match != 0 &&
2255                     ap->rxconfig == 0) {
2256                         ap->state = ANEG_STATE_AN_ENABLE;
2257                         break;
2258                 }
2259                 delta = ap->cur_time - ap->link_time;
2260                 if (delta > ANEG_STATE_SETTLE_TIME) {
2261                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263                         } else {
2264                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265                                     !(ap->flags & MR_NP_RX)) {
2266                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267                                 } else {
2268                                         ret = ANEG_FAILED;
2269                                 }
2270                         }
2271                 }
2272                 break;
2273
2274         case ANEG_STATE_IDLE_DETECT_INIT:
2275                 ap->link_time = ap->cur_time;
2276                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277                 tw32_f(MAC_MODE, tp->mac_mode);
2278                 udelay(40);
2279
2280                 ap->state = ANEG_STATE_IDLE_DETECT;
2281                 ret = ANEG_TIMER_ENAB;
2282                 break;
2283
2284         case ANEG_STATE_IDLE_DETECT:
2285                 if (ap->ability_match != 0 &&
2286                     ap->rxconfig == 0) {
2287                         ap->state = ANEG_STATE_AN_ENABLE;
2288                         break;
2289                 }
2290                 delta = ap->cur_time - ap->link_time;
2291                 if (delta > ANEG_STATE_SETTLE_TIME) {
2292                         /* XXX another gem from the Broadcom driver :( */
2293                         ap->state = ANEG_STATE_LINK_OK;
2294                 }
2295                 break;
2296
2297         case ANEG_STATE_LINK_OK:
2298                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299                 ret = ANEG_DONE;
2300                 break;
2301
2302         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303                 /* ??? unimplemented */
2304                 break;
2305
2306         case ANEG_STATE_NEXT_PAGE_WAIT:
2307                 /* ??? unimplemented */
2308                 break;
2309
2310         default:
2311                 ret = ANEG_FAILED;
2312                 break;
2313         };
2314
2315         return ret;
2316 }
2317
2318 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319 {
2320         int res = 0;
2321         struct tg3_fiber_aneginfo aninfo;
2322         int status = ANEG_FAILED;
2323         unsigned int tick;
2324         u32 tmp;
2325
2326         tw32_f(MAC_TX_AUTO_NEG, 0);
2327
2328         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330         udelay(40);
2331
2332         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333         udelay(40);
2334
2335         memset(&aninfo, 0, sizeof(aninfo));
2336         aninfo.flags |= MR_AN_ENABLE;
2337         aninfo.state = ANEG_STATE_UNKNOWN;
2338         aninfo.cur_time = 0;
2339         tick = 0;
2340         while (++tick < 195000) {
2341                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342                 if (status == ANEG_DONE || status == ANEG_FAILED)
2343                         break;
2344
2345                 udelay(1);
2346         }
2347
2348         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349         tw32_f(MAC_MODE, tp->mac_mode);
2350         udelay(40);
2351
2352         *flags = aninfo.flags;
2353
2354         if (status == ANEG_DONE &&
2355             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356                              MR_LP_ADV_FULL_DUPLEX)))
2357                 res = 1;
2358
2359         return res;
2360 }
2361
2362 static void tg3_init_bcm8002(struct tg3 *tp)
2363 {
2364         u32 mac_status = tr32(MAC_STATUS);
2365         int i;
2366
2367         /* Reset when initting first time or we have a link. */
2368         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369             !(mac_status & MAC_STATUS_PCS_SYNCED))
2370                 return;
2371
2372         /* Set PLL lock range. */
2373         tg3_writephy(tp, 0x16, 0x8007);
2374
2375         /* SW reset */
2376         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377
2378         /* Wait for reset to complete. */
2379         /* XXX schedule_timeout() ... */
2380         for (i = 0; i < 500; i++)
2381                 udelay(10);
2382
2383         /* Config mode; select PMA/Ch 1 regs. */
2384         tg3_writephy(tp, 0x10, 0x8411);
2385
2386         /* Enable auto-lock and comdet, select txclk for tx. */
2387         tg3_writephy(tp, 0x11, 0x0a10);
2388
2389         tg3_writephy(tp, 0x18, 0x00a0);
2390         tg3_writephy(tp, 0x16, 0x41ff);
2391
2392         /* Assert and deassert POR. */
2393         tg3_writephy(tp, 0x13, 0x0400);
2394         udelay(40);
2395         tg3_writephy(tp, 0x13, 0x0000);
2396
2397         tg3_writephy(tp, 0x11, 0x0a50);
2398         udelay(40);
2399         tg3_writephy(tp, 0x11, 0x0a10);
2400
2401         /* Wait for signal to stabilize */
2402         /* XXX schedule_timeout() ... */
2403         for (i = 0; i < 15000; i++)
2404                 udelay(10);
2405
2406         /* Deselect the channel register so we can read the PHYID
2407          * later.
2408          */
2409         tg3_writephy(tp, 0x10, 0x8011);
2410 }
2411
2412 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413 {
2414         u32 sg_dig_ctrl, sg_dig_status;
2415         u32 serdes_cfg, expected_sg_dig_ctrl;
2416         int workaround, port_a;
2417         int current_link_up;
2418
2419         serdes_cfg = 0;
2420         expected_sg_dig_ctrl = 0;
2421         workaround = 0;
2422         port_a = 1;
2423         current_link_up = 0;
2424
2425         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427                 workaround = 1;
2428                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429                         port_a = 0;
2430
2431                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432                 /* preserve bits 20-23 for voltage regulator */
2433                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434         }
2435
2436         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437
2438         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439                 if (sg_dig_ctrl & (1 << 31)) {
2440                         if (workaround) {
2441                                 u32 val = serdes_cfg;
2442
2443                                 if (port_a)
2444                                         val |= 0xc010000;
2445                                 else
2446                                         val |= 0x4010000;
2447                                 tw32_f(MAC_SERDES_CFG, val);
2448                         }
2449                         tw32_f(SG_DIG_CTRL, 0x01388400);
2450                 }
2451                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452                         tg3_setup_flow_control(tp, 0, 0);
2453                         current_link_up = 1;
2454                 }
2455                 goto out;
2456         }
2457
2458         /* Want auto-negotiation.  */
2459         expected_sg_dig_ctrl = 0x81388400;
2460
2461         /* Pause capability */
2462         expected_sg_dig_ctrl |= (1 << 11);
2463
2464         /* Asymettric pause */
2465         expected_sg_dig_ctrl |= (1 << 12);
2466
2467         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468                 if (workaround)
2469                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471                 udelay(5);
2472                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473
2474                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476                                  MAC_STATUS_SIGNAL_DET)) {
2477                 int i;
2478
2479                 /* Giver time to negotiate (~200ms) */
2480                 for (i = 0; i < 40000; i++) {
2481                         sg_dig_status = tr32(SG_DIG_STATUS);
2482                         if (sg_dig_status & (0x3))
2483                                 break;
2484                         udelay(5);
2485                 }
2486                 mac_status = tr32(MAC_STATUS);
2487
2488                 if ((sg_dig_status & (1 << 1)) &&
2489                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490                         u32 local_adv, remote_adv;
2491
2492                         local_adv = ADVERTISE_PAUSE_CAP;
2493                         remote_adv = 0;
2494                         if (sg_dig_status & (1 << 19))
2495                                 remote_adv |= LPA_PAUSE_CAP;
2496                         if (sg_dig_status & (1 << 20))
2497                                 remote_adv |= LPA_PAUSE_ASYM;
2498
2499                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2500                         current_link_up = 1;
2501                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502                 } else if (!(sg_dig_status & (1 << 1))) {
2503                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505                         else {
2506                                 if (workaround) {
2507                                         u32 val = serdes_cfg;
2508
2509                                         if (port_a)
2510                                                 val |= 0xc010000;
2511                                         else
2512                                                 val |= 0x4010000;
2513
2514                                         tw32_f(MAC_SERDES_CFG, val);
2515                                 }
2516
2517                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2518                                 udelay(40);
2519
2520                                 /* Link parallel detection - link is up */
2521                                 /* only if we have PCS_SYNC and not */
2522                                 /* receiving config code words */
2523                                 mac_status = tr32(MAC_STATUS);
2524                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526                                         tg3_setup_flow_control(tp, 0, 0);
2527                                         current_link_up = 1;
2528                                 }
2529                         }
2530                 }
2531         }
2532
2533 out:
2534         return current_link_up;
2535 }
2536
2537 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538 {
2539         int current_link_up = 0;
2540
2541         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543                 goto out;
2544         }
2545
2546         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547                 u32 flags;
2548                 int i;
2549   
2550                 if (fiber_autoneg(tp, &flags)) {
2551                         u32 local_adv, remote_adv;
2552
2553                         local_adv = ADVERTISE_PAUSE_CAP;
2554                         remote_adv = 0;
2555                         if (flags & MR_LP_ADV_SYM_PAUSE)
2556                                 remote_adv |= LPA_PAUSE_CAP;
2557                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2558                                 remote_adv |= LPA_PAUSE_ASYM;
2559
2560                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2561
2562                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563                         current_link_up = 1;
2564                 }
2565                 for (i = 0; i < 30; i++) {
2566                         udelay(20);
2567                         tw32_f(MAC_STATUS,
2568                                (MAC_STATUS_SYNC_CHANGED |
2569                                 MAC_STATUS_CFG_CHANGED));
2570                         udelay(40);
2571                         if ((tr32(MAC_STATUS) &
2572                              (MAC_STATUS_SYNC_CHANGED |
2573                               MAC_STATUS_CFG_CHANGED)) == 0)
2574                                 break;
2575                 }
2576
2577                 mac_status = tr32(MAC_STATUS);
2578                 if (current_link_up == 0 &&
2579                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580                     !(mac_status & MAC_STATUS_RCVD_CFG))
2581                         current_link_up = 1;
2582         } else {
2583                 /* Forcing 1000FD link up. */
2584                 current_link_up = 1;
2585                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586
2587                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588                 udelay(40);
2589         }
2590
2591 out:
2592         return current_link_up;
2593 }
2594
2595 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596 {
2597         u32 orig_pause_cfg;
2598         u16 orig_active_speed;
2599         u8 orig_active_duplex;
2600         u32 mac_status;
2601         int current_link_up;
2602         int i;
2603
2604         orig_pause_cfg =
2605                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606                                   TG3_FLAG_TX_PAUSE));
2607         orig_active_speed = tp->link_config.active_speed;
2608         orig_active_duplex = tp->link_config.active_duplex;
2609
2610         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611             netif_carrier_ok(tp->dev) &&
2612             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613                 mac_status = tr32(MAC_STATUS);
2614                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615                                MAC_STATUS_SIGNAL_DET |
2616                                MAC_STATUS_CFG_CHANGED |
2617                                MAC_STATUS_RCVD_CFG);
2618                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619                                    MAC_STATUS_SIGNAL_DET)) {
2620                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621                                             MAC_STATUS_CFG_CHANGED));
2622                         return 0;
2623                 }
2624         }
2625
2626         tw32_f(MAC_TX_AUTO_NEG, 0);
2627
2628         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630         tw32_f(MAC_MODE, tp->mac_mode);
2631         udelay(40);
2632
2633         if (tp->phy_id == PHY_ID_BCM8002)
2634                 tg3_init_bcm8002(tp);
2635
2636         /* Enable link change event even when serdes polling.  */
2637         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638         udelay(40);
2639
2640         current_link_up = 0;
2641         mac_status = tr32(MAC_STATUS);
2642
2643         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645         else
2646                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647
2648         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649         tw32_f(MAC_MODE, tp->mac_mode);
2650         udelay(40);
2651
2652         tp->hw_status->status =
2653                 (SD_STATUS_UPDATED |
2654                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655
2656         for (i = 0; i < 100; i++) {
2657                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658                                     MAC_STATUS_CFG_CHANGED));
2659                 udelay(5);
2660                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661                                          MAC_STATUS_CFG_CHANGED)) == 0)
2662                         break;
2663         }
2664
2665         mac_status = tr32(MAC_STATUS);
2666         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667                 current_link_up = 0;
2668                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669                         tw32_f(MAC_MODE, (tp->mac_mode |
2670                                           MAC_MODE_SEND_CONFIGS));
2671                         udelay(1);
2672                         tw32_f(MAC_MODE, tp->mac_mode);
2673                 }
2674         }
2675
2676         if (current_link_up == 1) {
2677                 tp->link_config.active_speed = SPEED_1000;
2678                 tp->link_config.active_duplex = DUPLEX_FULL;
2679                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680                                     LED_CTRL_LNKLED_OVERRIDE |
2681                                     LED_CTRL_1000MBPS_ON));
2682         } else {
2683                 tp->link_config.active_speed = SPEED_INVALID;
2684                 tp->link_config.active_duplex = DUPLEX_INVALID;
2685                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686                                     LED_CTRL_LNKLED_OVERRIDE |
2687                                     LED_CTRL_TRAFFIC_OVERRIDE));
2688         }
2689
2690         if (current_link_up != netif_carrier_ok(tp->dev)) {
2691                 if (current_link_up)
2692                         netif_carrier_on(tp->dev);
2693                 else
2694                         netif_carrier_off(tp->dev);
2695                 tg3_link_report(tp);
2696         } else {
2697                 u32 now_pause_cfg =
2698                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699                                          TG3_FLAG_TX_PAUSE);
2700                 if (orig_pause_cfg != now_pause_cfg ||
2701                     orig_active_speed != tp->link_config.active_speed ||
2702                     orig_active_duplex != tp->link_config.active_duplex)
2703                         tg3_link_report(tp);
2704         }
2705
2706         return 0;
2707 }
2708
2709 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710 {
2711         int current_link_up, err = 0;
2712         u32 bmsr, bmcr;
2713         u16 current_speed;
2714         u8 current_duplex;
2715
2716         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717         tw32_f(MAC_MODE, tp->mac_mode);
2718         udelay(40);
2719
2720         tw32(MAC_EVENT, 0);
2721
2722         tw32_f(MAC_STATUS,
2723              (MAC_STATUS_SYNC_CHANGED |
2724               MAC_STATUS_CFG_CHANGED |
2725               MAC_STATUS_MI_COMPLETION |
2726               MAC_STATUS_LNKSTATE_CHANGED));
2727         udelay(40);
2728
2729         if (force_reset)
2730                 tg3_phy_reset(tp);
2731
2732         current_link_up = 0;
2733         current_speed = SPEED_INVALID;
2734         current_duplex = DUPLEX_INVALID;
2735
2736         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740                         bmsr |= BMSR_LSTATUS;
2741                 else
2742                         bmsr &= ~BMSR_LSTATUS;
2743         }
2744
2745         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746
2747         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749                 /* do nothing, just check for link up at the end */
2750         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751                 u32 adv, new_adv;
2752
2753                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755                                   ADVERTISE_1000XPAUSE |
2756                                   ADVERTISE_1000XPSE_ASYM |
2757                                   ADVERTISE_SLCT);
2758
2759                 /* Always advertise symmetric PAUSE just like copper */
2760                 new_adv |= ADVERTISE_1000XPAUSE;
2761
2762                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763                         new_adv |= ADVERTISE_1000XHALF;
2764                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765                         new_adv |= ADVERTISE_1000XFULL;
2766
2767                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770                         tg3_writephy(tp, MII_BMCR, bmcr);
2771
2772                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775
2776                         return err;
2777                 }
2778         } else {
2779                 u32 new_bmcr;
2780
2781                 bmcr &= ~BMCR_SPEED1000;
2782                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783
2784                 if (tp->link_config.duplex == DUPLEX_FULL)
2785                         new_bmcr |= BMCR_FULLDPLX;
2786
2787                 if (new_bmcr != bmcr) {
2788                         /* BMCR_SPEED1000 is a reserved bit that needs
2789                          * to be set on write.
2790                          */
2791                         new_bmcr |= BMCR_SPEED1000;
2792
2793                         /* Force a linkdown */
2794                         if (netif_carrier_ok(tp->dev)) {
2795                                 u32 adv;
2796
2797                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798                                 adv &= ~(ADVERTISE_1000XFULL |
2799                                          ADVERTISE_1000XHALF |
2800                                          ADVERTISE_SLCT);
2801                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2802                                 tg3_writephy(tp, MII_BMCR, bmcr |
2803                                                            BMCR_ANRESTART |
2804                                                            BMCR_ANENABLE);
2805                                 udelay(10);
2806                                 netif_carrier_off(tp->dev);
2807                         }
2808                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2809                         bmcr = new_bmcr;
2810                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2812                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813                             ASIC_REV_5714) {
2814                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815                                         bmsr |= BMSR_LSTATUS;
2816                                 else
2817                                         bmsr &= ~BMSR_LSTATUS;
2818                         }
2819                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820                 }
2821         }
2822
2823         if (bmsr & BMSR_LSTATUS) {
2824                 current_speed = SPEED_1000;
2825                 current_link_up = 1;
2826                 if (bmcr & BMCR_FULLDPLX)
2827                         current_duplex = DUPLEX_FULL;
2828                 else
2829                         current_duplex = DUPLEX_HALF;
2830
2831                 if (bmcr & BMCR_ANENABLE) {
2832                         u32 local_adv, remote_adv, common;
2833
2834                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836                         common = local_adv & remote_adv;
2837                         if (common & (ADVERTISE_1000XHALF |
2838                                       ADVERTISE_1000XFULL)) {
2839                                 if (common & ADVERTISE_1000XFULL)
2840                                         current_duplex = DUPLEX_FULL;
2841                                 else
2842                                         current_duplex = DUPLEX_HALF;
2843
2844                                 tg3_setup_flow_control(tp, local_adv,
2845                                                        remote_adv);
2846                         }
2847                         else
2848                                 current_link_up = 0;
2849                 }
2850         }
2851
2852         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853         if (tp->link_config.active_duplex == DUPLEX_HALF)
2854                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855
2856         tw32_f(MAC_MODE, tp->mac_mode);
2857         udelay(40);
2858
2859         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860
2861         tp->link_config.active_speed = current_speed;
2862         tp->link_config.active_duplex = current_duplex;
2863
2864         if (current_link_up != netif_carrier_ok(tp->dev)) {
2865                 if (current_link_up)
2866                         netif_carrier_on(tp->dev);
2867                 else {
2868                         netif_carrier_off(tp->dev);
2869                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870                 }
2871                 tg3_link_report(tp);
2872         }
2873         return err;
2874 }
2875
2876 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877 {
2878         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879                 /* Give autoneg time to complete. */
2880                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881                 return;
2882         }
2883         if (!netif_carrier_ok(tp->dev) &&
2884             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885                 u32 bmcr;
2886
2887                 tg3_readphy(tp, MII_BMCR, &bmcr);
2888                 if (bmcr & BMCR_ANENABLE) {
2889                         u32 phy1, phy2;
2890
2891                         /* Select shadow register 0x1f */
2892                         tg3_writephy(tp, 0x1c, 0x7c00);
2893                         tg3_readphy(tp, 0x1c, &phy1);
2894
2895                         /* Select expansion interrupt status register */
2896                         tg3_writephy(tp, 0x17, 0x0f01);
2897                         tg3_readphy(tp, 0x15, &phy2);
2898                         tg3_readphy(tp, 0x15, &phy2);
2899
2900                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901                                 /* We have signal detect and not receiving
2902                                  * config code words, link is up by parallel
2903                                  * detection.
2904                                  */
2905
2906                                 bmcr &= ~BMCR_ANENABLE;
2907                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908                                 tg3_writephy(tp, MII_BMCR, bmcr);
2909                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910                         }
2911                 }
2912         }
2913         else if (netif_carrier_ok(tp->dev) &&
2914                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916                 u32 phy2;
2917
2918                 /* Select expansion interrupt status register */
2919                 tg3_writephy(tp, 0x17, 0x0f01);
2920                 tg3_readphy(tp, 0x15, &phy2);
2921                 if (phy2 & 0x20) {
2922                         u32 bmcr;
2923
2924                         /* Config code words received, turn on autoneg. */
2925                         tg3_readphy(tp, MII_BMCR, &bmcr);
2926                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927
2928                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929
2930                 }
2931         }
2932 }
2933
2934 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935 {
2936         int err;
2937
2938         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939                 err = tg3_setup_fiber_phy(tp, force_reset);
2940         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2942         } else {
2943                 err = tg3_setup_copper_phy(tp, force_reset);
2944         }
2945
2946         if (tp->link_config.active_speed == SPEED_1000 &&
2947             tp->link_config.active_duplex == DUPLEX_HALF)
2948                 tw32(MAC_TX_LENGTHS,
2949                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950                       (6 << TX_LENGTHS_IPG_SHIFT) |
2951                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952         else
2953                 tw32(MAC_TX_LENGTHS,
2954                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955                       (6 << TX_LENGTHS_IPG_SHIFT) |
2956                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957
2958         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959                 if (netif_carrier_ok(tp->dev)) {
2960                         tw32(HOSTCC_STAT_COAL_TICKS,
2961                              tp->coal.stats_block_coalesce_usecs);
2962                 } else {
2963                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964                 }
2965         }
2966
2967         return err;
2968 }
2969
2970 /* Tigon3 never reports partial packet sends.  So we do not
2971  * need special logic to handle SKBs that have not had all
2972  * of their frags sent yet, like SunGEM does.
2973  */
2974 static void tg3_tx(struct tg3 *tp)
2975 {
2976         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2977         u32 sw_idx = tp->tx_cons;
2978
2979         while (sw_idx != hw_idx) {
2980                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2981                 struct sk_buff *skb = ri->skb;
2982                 int i;
2983
2984                 BUG_ON(skb == NULL);
2985                 pci_unmap_single(tp->pdev,
2986                                  pci_unmap_addr(ri, mapping),
2987                                  skb_headlen(skb),
2988                                  PCI_DMA_TODEVICE);
2989
2990                 ri->skb = NULL;
2991
2992                 sw_idx = NEXT_TX(sw_idx);
2993
2994                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2995                         BUG_ON(sw_idx == hw_idx);
2996
2997                         ri = &tp->tx_buffers[sw_idx];
2998                         BUG_ON(ri->skb != NULL);
2999
3000                         pci_unmap_page(tp->pdev,
3001                                        pci_unmap_addr(ri, mapping),
3002                                        skb_shinfo(skb)->frags[i].size,
3003                                        PCI_DMA_TODEVICE);
3004
3005                         sw_idx = NEXT_TX(sw_idx);
3006                 }
3007
3008                 dev_kfree_skb(skb);
3009         }
3010
3011         tp->tx_cons = sw_idx;
3012
3013         if (unlikely(netif_queue_stopped(tp->dev))) {
3014                 spin_lock(&tp->tx_lock);
3015                 if (netif_queue_stopped(tp->dev) &&
3016                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3017                         netif_wake_queue(tp->dev);
3018                 spin_unlock(&tp->tx_lock);
3019         }
3020 }
3021
3022 /* Returns size of skb allocated or < 0 on error.
3023  *
3024  * We only need to fill in the address because the other members
3025  * of the RX descriptor are invariant, see tg3_init_rings.
3026  *
3027  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3028  * posting buffers we only dirty the first cache line of the RX
3029  * descriptor (containing the address).  Whereas for the RX status
3030  * buffers the cpu only reads the last cacheline of the RX descriptor
3031  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3032  */
3033 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3034                             int src_idx, u32 dest_idx_unmasked)
3035 {
3036         struct tg3_rx_buffer_desc *desc;
3037         struct ring_info *map, *src_map;
3038         struct sk_buff *skb;
3039         dma_addr_t mapping;
3040         int skb_size, dest_idx;
3041
3042         src_map = NULL;
3043         switch (opaque_key) {
3044         case RXD_OPAQUE_RING_STD:
3045                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3046                 desc = &tp->rx_std[dest_idx];
3047                 map = &tp->rx_std_buffers[dest_idx];
3048                 if (src_idx >= 0)
3049                         src_map = &tp->rx_std_buffers[src_idx];
3050                 skb_size = tp->rx_pkt_buf_sz;
3051                 break;
3052
3053         case RXD_OPAQUE_RING_JUMBO:
3054                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3055                 desc = &tp->rx_jumbo[dest_idx];
3056                 map = &tp->rx_jumbo_buffers[dest_idx];
3057                 if (src_idx >= 0)
3058                         src_map = &tp->rx_jumbo_buffers[src_idx];
3059                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3060                 break;
3061
3062         default:
3063                 return -EINVAL;
3064         };
3065
3066         /* Do not overwrite any of the map or rp information
3067          * until we are sure we can commit to a new buffer.
3068          *
3069          * Callers depend upon this behavior and assume that
3070          * we leave everything unchanged if we fail.
3071          */
3072         skb = dev_alloc_skb(skb_size);
3073         if (skb == NULL)
3074                 return -ENOMEM;
3075
3076         skb->dev = tp->dev;
3077         skb_reserve(skb, tp->rx_offset);
3078
3079         mapping = pci_map_single(tp->pdev, skb->data,
3080                                  skb_size - tp->rx_offset,
3081                                  PCI_DMA_FROMDEVICE);
3082
3083         map->skb = skb;
3084         pci_unmap_addr_set(map, mapping, mapping);
3085
3086         if (src_map != NULL)
3087                 src_map->skb = NULL;
3088
3089         desc->addr_hi = ((u64)mapping >> 32);
3090         desc->addr_lo = ((u64)mapping & 0xffffffff);
3091
3092         return skb_size;
3093 }
3094
3095 /* We only need to move over in the address because the other
3096  * members of the RX descriptor are invariant.  See notes above
3097  * tg3_alloc_rx_skb for full details.
3098  */
3099 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3100                            int src_idx, u32 dest_idx_unmasked)
3101 {
3102         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3103         struct ring_info *src_map, *dest_map;
3104         int dest_idx;
3105
3106         switch (opaque_key) {
3107         case RXD_OPAQUE_RING_STD:
3108                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3109                 dest_desc = &tp->rx_std[dest_idx];
3110                 dest_map = &tp->rx_std_buffers[dest_idx];
3111                 src_desc = &tp->rx_std[src_idx];
3112                 src_map = &tp->rx_std_buffers[src_idx];
3113                 break;
3114
3115         case RXD_OPAQUE_RING_JUMBO:
3116                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3117                 dest_desc = &tp->rx_jumbo[dest_idx];
3118                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3119                 src_desc = &tp->rx_jumbo[src_idx];
3120                 src_map = &tp->rx_jumbo_buffers[src_idx];
3121                 break;
3122
3123         default:
3124                 return;
3125         };
3126
3127         dest_map->skb = src_map->skb;
3128         pci_unmap_addr_set(dest_map, mapping,
3129                            pci_unmap_addr(src_map, mapping));
3130         dest_desc->addr_hi = src_desc->addr_hi;
3131         dest_desc->addr_lo = src_desc->addr_lo;
3132
3133         src_map->skb = NULL;
3134 }
3135
3136 #if TG3_VLAN_TAG_USED
3137 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3138 {
3139         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3140 }
3141 #endif
3142
3143 /* The RX ring scheme is composed of multiple rings which post fresh
3144  * buffers to the chip, and one special ring the chip uses to report
3145  * status back to the host.
3146  *
3147  * The special ring reports the status of received packets to the
3148  * host.  The chip does not write into the original descriptor the
3149  * RX buffer was obtained from.  The chip simply takes the original
3150  * descriptor as provided by the host, updates the status and length
3151  * field, then writes this into the next status ring entry.
3152  *
3153  * Each ring the host uses to post buffers to the chip is described
3154  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3155  * it is first placed into the on-chip ram.  When the packet's length
3156  * is known, it walks down the TG3_BDINFO entries to select the ring.
3157  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3158  * which is within the range of the new packet's length is chosen.
3159  *
3160  * The "separate ring for rx status" scheme may sound queer, but it makes
3161  * sense from a cache coherency perspective.  If only the host writes
3162  * to the buffer post rings, and only the chip writes to the rx status
3163  * rings, then cache lines never move beyond shared-modified state.
3164  * If both the host and chip were to write into the same ring, cache line
3165  * eviction could occur since both entities want it in an exclusive state.
3166  */
3167 static int tg3_rx(struct tg3 *tp, int budget)
3168 {
3169         u32 work_mask;
3170         u32 sw_idx = tp->rx_rcb_ptr;
3171         u16 hw_idx;
3172         int received;
3173
3174         hw_idx = tp->hw_status->idx[0].rx_producer;
3175         /*
3176          * We need to order the read of hw_idx and the read of
3177          * the opaque cookie.
3178          */
3179         rmb();
3180         work_mask = 0;
3181         received = 0;
3182         while (sw_idx != hw_idx && budget > 0) {
3183                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3184                 unsigned int len;
3185                 struct sk_buff *skb;
3186                 dma_addr_t dma_addr;
3187                 u32 opaque_key, desc_idx, *post_ptr;
3188
3189                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3190                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3191                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3192                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3193                                                   mapping);
3194                         skb = tp->rx_std_buffers[desc_idx].skb;
3195                         post_ptr = &tp->rx_std_ptr;
3196                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3197                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3198                                                   mapping);
3199                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3200                         post_ptr = &tp->rx_jumbo_ptr;
3201                 }
3202                 else {
3203                         goto next_pkt_nopost;
3204                 }
3205
3206                 work_mask |= opaque_key;
3207
3208                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3209                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3210                 drop_it:
3211                         tg3_recycle_rx(tp, opaque_key,
3212                                        desc_idx, *post_ptr);
3213                 drop_it_no_recycle:
3214                         /* Other statistics kept track of by card. */
3215                         tp->net_stats.rx_dropped++;
3216                         goto next_pkt;
3217                 }
3218
3219                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3220
3221                 if (len > RX_COPY_THRESHOLD 
3222                         && tp->rx_offset == 2
3223                         /* rx_offset != 2 iff this is a 5701 card running
3224                          * in PCI-X mode [see tg3_get_invariants()] */
3225                 ) {
3226                         int skb_size;
3227
3228                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3229                                                     desc_idx, *post_ptr);
3230                         if (skb_size < 0)
3231                                 goto drop_it;
3232
3233                         pci_unmap_single(tp->pdev, dma_addr,
3234                                          skb_size - tp->rx_offset,
3235                                          PCI_DMA_FROMDEVICE);
3236
3237                         skb_put(skb, len);
3238                 } else {
3239                         struct sk_buff *copy_skb;
3240
3241                         tg3_recycle_rx(tp, opaque_key,
3242                                        desc_idx, *post_ptr);
3243
3244                         copy_skb = dev_alloc_skb(len + 2);
3245                         if (copy_skb == NULL)
3246                                 goto drop_it_no_recycle;
3247
3248                         copy_skb->dev = tp->dev;
3249                         skb_reserve(copy_skb, 2);
3250                         skb_put(copy_skb, len);
3251                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3252                         memcpy(copy_skb->data, skb->data, len);
3253                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3254
3255                         /* We'll reuse the original ring buffer. */
3256                         skb = copy_skb;
3257                 }
3258
3259                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3260                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3261                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3262                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3263                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3264                 else
3265                         skb->ip_summed = CHECKSUM_NONE;
3266
3267                 skb->protocol = eth_type_trans(skb, tp->dev);
3268 #if TG3_VLAN_TAG_USED
3269                 if (tp->vlgrp != NULL &&
3270                     desc->type_flags & RXD_FLAG_VLAN) {
3271                         tg3_vlan_rx(tp, skb,
3272                                     desc->err_vlan & RXD_VLAN_MASK);
3273                 } else
3274 #endif
3275                         netif_receive_skb(skb);
3276
3277                 tp->dev->last_rx = jiffies;
3278                 received++;
3279                 budget--;
3280
3281 next_pkt:
3282                 (*post_ptr)++;
3283 next_pkt_nopost:
3284                 sw_idx++;
3285                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3286
3287                 /* Refresh hw_idx to see if there is new work */
3288                 if (sw_idx == hw_idx) {
3289                         hw_idx = tp->hw_status->idx[0].rx_producer;
3290                         rmb();
3291                 }
3292         }
3293
3294         /* ACK the status ring. */
3295         tp->rx_rcb_ptr = sw_idx;
3296         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3297
3298         /* Refill RX ring(s). */
3299         if (work_mask & RXD_OPAQUE_RING_STD) {
3300                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3301                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3302                              sw_idx);
3303         }
3304         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3305                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3306                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3307                              sw_idx);
3308         }
3309         mmiowb();
3310
3311         return received;
3312 }
3313
3314 static int tg3_poll(struct net_device *netdev, int *budget)
3315 {
3316         struct tg3 *tp = netdev_priv(netdev);
3317         struct tg3_hw_status *sblk = tp->hw_status;
3318         int done;
3319
3320         /* handle link change and other phy events */
3321         if (!(tp->tg3_flags &
3322               (TG3_FLAG_USE_LINKCHG_REG |
3323                TG3_FLAG_POLL_SERDES))) {
3324                 if (sblk->status & SD_STATUS_LINK_CHG) {
3325                         sblk->status = SD_STATUS_UPDATED |
3326                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3327                         spin_lock(&tp->lock);
3328                         tg3_setup_phy(tp, 0);
3329                         spin_unlock(&tp->lock);
3330                 }
3331         }
3332
3333         /* run TX completion thread */
3334         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3335                 tg3_tx(tp);
3336         }
3337
3338         /* run RX thread, within the bounds set by NAPI.
3339          * All RX "locking" is done by ensuring outside
3340          * code synchronizes with dev->poll()
3341          */
3342         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3343                 int orig_budget = *budget;
3344                 int work_done;
3345
3346                 if (orig_budget > netdev->quota)
3347                         orig_budget = netdev->quota;
3348
3349                 work_done = tg3_rx(tp, orig_budget);
3350
3351                 *budget -= work_done;
3352                 netdev->quota -= work_done;
3353         }
3354
3355         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3356                 tp->last_tag = sblk->status_tag;
3357                 rmb();
3358         } else
3359                 sblk->status &= ~SD_STATUS_UPDATED;
3360
3361         /* if no more work, tell net stack and NIC we're done */
3362         done = !tg3_has_work(tp);
3363         if (done) {
3364                 netif_rx_complete(netdev);
3365                 tg3_restart_ints(tp);
3366         }
3367
3368         return (done ? 0 : 1);
3369 }
3370
3371 static void tg3_irq_quiesce(struct tg3 *tp)
3372 {
3373         BUG_ON(tp->irq_sync);
3374
3375         tp->irq_sync = 1;
3376         smp_mb();
3377
3378         synchronize_irq(tp->pdev->irq);
3379 }
3380
3381 static inline int tg3_irq_sync(struct tg3 *tp)
3382 {
3383         return tp->irq_sync;
3384 }
3385
3386 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3387  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3388  * with as well.  Most of the time, this is not necessary except when
3389  * shutting down the device.
3390  */
3391 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3392 {
3393         if (irq_sync)
3394                 tg3_irq_quiesce(tp);
3395         spin_lock_bh(&tp->lock);
3396         spin_lock(&tp->tx_lock);
3397 }
3398
3399 static inline void tg3_full_unlock(struct tg3 *tp)
3400 {
3401         spin_unlock(&tp->tx_lock);
3402         spin_unlock_bh(&tp->lock);
3403 }
3404
3405 /* One-shot MSI handler - Chip automatically disables interrupt
3406  * after sending MSI so driver doesn't have to do it.
3407  */
3408 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3409 {
3410         struct net_device *dev = dev_id;
3411         struct tg3 *tp = netdev_priv(dev);
3412
3413         prefetch(tp->hw_status);
3414         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3415
3416         if (likely(!tg3_irq_sync(tp)))
3417                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3418
3419         return IRQ_HANDLED;
3420 }
3421
3422 /* MSI ISR - No need to check for interrupt sharing and no need to
3423  * flush status block and interrupt mailbox. PCI ordering rules
3424  * guarantee that MSI will arrive after the status block.
3425  */
3426 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3427 {
3428         struct net_device *dev = dev_id;
3429         struct tg3 *tp = netdev_priv(dev);
3430
3431         prefetch(tp->hw_status);
3432         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3433         /*
3434          * Writing any value to intr-mbox-0 clears PCI INTA# and
3435          * chip-internal interrupt pending events.
3436          * Writing non-zero to intr-mbox-0 additional tells the
3437          * NIC to stop sending us irqs, engaging "in-intr-handler"
3438          * event coalescing.
3439          */
3440         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3441         if (likely(!tg3_irq_sync(tp)))
3442                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3443
3444         return IRQ_RETVAL(1);
3445 }
3446
3447 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3448 {
3449         struct net_device *dev = dev_id;
3450         struct tg3 *tp = netdev_priv(dev);
3451         struct tg3_hw_status *sblk = tp->hw_status;
3452         unsigned int handled = 1;
3453
3454         /* In INTx mode, it is possible for the interrupt to arrive at
3455          * the CPU before the status block posted prior to the interrupt.
3456          * Reading the PCI State register will confirm whether the
3457          * interrupt is ours and will flush the status block.
3458          */
3459         if ((sblk->status & SD_STATUS_UPDATED) ||
3460             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3461                 /*
3462                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3463                  * chip-internal interrupt pending events.
3464                  * Writing non-zero to intr-mbox-0 additional tells the
3465                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3466                  * event coalescing.
3467                  */
3468                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3469                              0x00000001);
3470                 if (tg3_irq_sync(tp))
3471                         goto out;
3472                 sblk->status &= ~SD_STATUS_UPDATED;
3473                 if (likely(tg3_has_work(tp))) {
3474                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3475                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3476                 } else {
3477                         /* No work, shared interrupt perhaps?  re-enable
3478                          * interrupts, and flush that PCI write
3479                          */
3480                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3481                                 0x00000000);
3482                 }
3483         } else {        /* shared interrupt */
3484                 handled = 0;
3485         }
3486 out:
3487         return IRQ_RETVAL(handled);
3488 }
3489
3490 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3491 {
3492         struct net_device *dev = dev_id;
3493         struct tg3 *tp = netdev_priv(dev);
3494         struct tg3_hw_status *sblk = tp->hw_status;
3495         unsigned int handled = 1;
3496
3497         /* In INTx mode, it is possible for the interrupt to arrive at
3498          * the CPU before the status block posted prior to the interrupt.
3499          * Reading the PCI State register will confirm whether the
3500          * interrupt is ours and will flush the status block.
3501          */
3502         if ((sblk->status_tag != tp->last_tag) ||
3503             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3504                 /*
3505                  * writing any value to intr-mbox-0 clears PCI INTA# and
3506                  * chip-internal interrupt pending events.
3507                  * writing non-zero to intr-mbox-0 additional tells the
3508                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3509                  * event coalescing.
3510                  */
3511                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3512                              0x00000001);
3513                 if (tg3_irq_sync(tp))
3514                         goto out;
3515                 if (netif_rx_schedule_prep(dev)) {
3516                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3517                         /* Update last_tag to mark that this status has been
3518                          * seen. Because interrupt may be shared, we may be
3519                          * racing with tg3_poll(), so only update last_tag
3520                          * if tg3_poll() is not scheduled.
3521                          */
3522                         tp->last_tag = sblk->status_tag;
3523                         __netif_rx_schedule(dev);
3524                 }
3525         } else {        /* shared interrupt */
3526                 handled = 0;
3527         }
3528 out:
3529         return IRQ_RETVAL(handled);
3530 }
3531
3532 /* ISR for interrupt test */
3533 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3534                 struct pt_regs *regs)
3535 {
3536         struct net_device *dev = dev_id;
3537         struct tg3 *tp = netdev_priv(dev);
3538         struct tg3_hw_status *sblk = tp->hw_status;
3539
3540         if ((sblk->status & SD_STATUS_UPDATED) ||
3541             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3542                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3543                              0x00000001);
3544                 return IRQ_RETVAL(1);
3545         }
3546         return IRQ_RETVAL(0);
3547 }
3548
3549 static int tg3_init_hw(struct tg3 *, int);
3550 static int tg3_halt(struct tg3 *, int, int);
3551
3552 #ifdef CONFIG_NET_POLL_CONTROLLER
3553 static void tg3_poll_controller(struct net_device *dev)
3554 {
3555         struct tg3 *tp = netdev_priv(dev);
3556
3557         tg3_interrupt(tp->pdev->irq, dev, NULL);
3558 }
3559 #endif
3560
3561 static void tg3_reset_task(void *_data)
3562 {
3563         struct tg3 *tp = _data;
3564         unsigned int restart_timer;
3565
3566         tg3_full_lock(tp, 0);
3567         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3568
3569         if (!netif_running(tp->dev)) {
3570                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3571                 tg3_full_unlock(tp);
3572                 return;
3573         }
3574
3575         tg3_full_unlock(tp);
3576
3577         tg3_netif_stop(tp);
3578
3579         tg3_full_lock(tp, 1);
3580
3581         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3582         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3583
3584         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3585         tg3_init_hw(tp, 1);
3586
3587         tg3_netif_start(tp);
3588
3589         if (restart_timer)
3590                 mod_timer(&tp->timer, jiffies + 1);
3591
3592         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3593
3594         tg3_full_unlock(tp);
3595 }
3596
3597 static void tg3_tx_timeout(struct net_device *dev)
3598 {
3599         struct tg3 *tp = netdev_priv(dev);
3600
3601         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3602                dev->name);
3603
3604         schedule_work(&tp->reset_task);
3605 }
3606
3607 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3608 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3609 {
3610         u32 base = (u32) mapping & 0xffffffff;
3611
3612         return ((base > 0xffffdcc0) &&
3613                 (base + len + 8 < base));
3614 }
3615
3616 /* Test for DMA addresses > 40-bit */
3617 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3618                                           int len)
3619 {
3620 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3621         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3622                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3623         return 0;
3624 #else
3625         return 0;
3626 #endif
3627 }
3628
3629 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3630
3631 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3632 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3633                                        u32 last_plus_one, u32 *start,
3634                                        u32 base_flags, u32 mss)
3635 {
3636         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3637         dma_addr_t new_addr = 0;
3638         u32 entry = *start;
3639         int i, ret = 0;
3640
3641         if (!new_skb) {
3642                 ret = -1;
3643         } else {
3644                 /* New SKB is guaranteed to be linear. */
3645                 entry = *start;
3646                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3647                                           PCI_DMA_TODEVICE);
3648                 /* Make sure new skb does not cross any 4G boundaries.
3649                  * Drop the packet if it does.
3650                  */
3651                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3652                         ret = -1;
3653                         dev_kfree_skb(new_skb);
3654                         new_skb = NULL;
3655                 } else {
3656                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3657                                     base_flags, 1 | (mss << 1));
3658                         *start = NEXT_TX(entry);
3659                 }
3660         }
3661
3662         /* Now clean up the sw ring entries. */
3663         i = 0;
3664         while (entry != last_plus_one) {
3665                 int len;
3666
3667                 if (i == 0)
3668                         len = skb_headlen(skb);
3669                 else
3670                         len = skb_shinfo(skb)->frags[i-1].size;
3671                 pci_unmap_single(tp->pdev,
3672                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3673                                  len, PCI_DMA_TODEVICE);
3674                 if (i == 0) {
3675                         tp->tx_buffers[entry].skb = new_skb;
3676                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3677                 } else {
3678                         tp->tx_buffers[entry].skb = NULL;
3679                 }
3680                 entry = NEXT_TX(entry);
3681                 i++;
3682         }
3683
3684         dev_kfree_skb(skb);
3685
3686         return ret;
3687 }
3688
3689 static void tg3_set_txd(struct tg3 *tp, int entry,
3690                         dma_addr_t mapping, int len, u32 flags,
3691                         u32 mss_and_is_end)
3692 {
3693         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3694         int is_end = (mss_and_is_end & 0x1);
3695         u32 mss = (mss_and_is_end >> 1);
3696         u32 vlan_tag = 0;
3697
3698         if (is_end)
3699                 flags |= TXD_FLAG_END;
3700         if (flags & TXD_FLAG_VLAN) {
3701                 vlan_tag = flags >> 16;
3702                 flags &= 0xffff;
3703         }
3704         vlan_tag |= (mss << TXD_MSS_SHIFT);
3705
3706         txd->addr_hi = ((u64) mapping >> 32);
3707         txd->addr_lo = ((u64) mapping & 0xffffffff);
3708         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3709         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3710 }
3711
3712 /* hard_start_xmit for devices that don't have any bugs and
3713  * support TG3_FLG2_HW_TSO_2 only.
3714  */
3715 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3716 {
3717         struct tg3 *tp = netdev_priv(dev);
3718         dma_addr_t mapping;
3719         u32 len, entry, base_flags, mss;
3720
3721         len = skb_headlen(skb);
3722
3723         /* No BH disabling for tx_lock here.  We are running in BH disabled
3724          * context and TX reclaim runs via tp->poll inside of a software
3725          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3726          * no IRQ context deadlocks to worry about either.  Rejoice!
3727          */
3728         if (!spin_trylock(&tp->tx_lock))
3729                 return NETDEV_TX_LOCKED;
3730
3731         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3732                 if (!netif_queue_stopped(dev)) {
3733                         netif_stop_queue(dev);
3734
3735                         /* This is a hard error, log it. */
3736                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3737                                "queue awake!\n", dev->name);
3738                 }
3739                 spin_unlock(&tp->tx_lock);
3740                 return NETDEV_TX_BUSY;
3741         }
3742
3743         entry = tp->tx_prod;
3744         base_flags = 0;
3745 #if TG3_TSO_SUPPORT != 0
3746         mss = 0;
3747         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3748             (mss = skb_shinfo(skb)->tso_size) != 0) {
3749                 int tcp_opt_len, ip_tcp_len;
3750
3751                 if (skb_header_cloned(skb) &&
3752                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3753                         dev_kfree_skb(skb);
3754                         goto out_unlock;
3755                 }
3756
3757                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3758                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3759
3760                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3761                                TXD_FLAG_CPU_POST_DMA);
3762
3763                 skb->nh.iph->check = 0;
3764                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3765
3766                 skb->h.th->check = 0;
3767
3768                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3769         }
3770         else if (skb->ip_summed == CHECKSUM_HW)
3771                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3772 #else
3773         mss = 0;
3774         if (skb->ip_summed == CHECKSUM_HW)
3775                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3776 #endif
3777 #if TG3_VLAN_TAG_USED
3778         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3779                 base_flags |= (TXD_FLAG_VLAN |
3780                                (vlan_tx_tag_get(skb) << 16));
3781 #endif
3782
3783         /* Queue skb data, a.k.a. the main skb fragment. */
3784         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3785
3786         tp->tx_buffers[entry].skb = skb;
3787         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3788
3789         tg3_set_txd(tp, entry, mapping, len, base_flags,
3790                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3791
3792         entry = NEXT_TX(entry);
3793
3794         /* Now loop through additional data fragments, and queue them. */
3795         if (skb_shinfo(skb)->nr_frags > 0) {
3796                 unsigned int i, last;
3797
3798                 last = skb_shinfo(skb)->nr_frags - 1;
3799                 for (i = 0; i <= last; i++) {
3800                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3801
3802                         len = frag->size;
3803                         mapping = pci_map_page(tp->pdev,
3804                                                frag->page,
3805                                                frag->page_offset,
3806                                                len, PCI_DMA_TODEVICE);
3807
3808                         tp->tx_buffers[entry].skb = NULL;
3809                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3810
3811                         tg3_set_txd(tp, entry, mapping, len,
3812                                     base_flags, (i == last) | (mss << 1));
3813
3814                         entry = NEXT_TX(entry);
3815                 }
3816         }
3817
3818         /* Packets are ready, update Tx producer idx local and on card. */
3819         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3820
3821         tp->tx_prod = entry;
3822         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3823                 netif_stop_queue(dev);
3824                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3825                         netif_wake_queue(tp->dev);
3826         }
3827
3828 out_unlock:
3829         mmiowb();
3830         spin_unlock(&tp->tx_lock);
3831
3832         dev->trans_start = jiffies;
3833
3834         return NETDEV_TX_OK;
3835 }
3836
3837 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3838  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3839  */
3840 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3841 {
3842         struct tg3 *tp = netdev_priv(dev);
3843         dma_addr_t mapping;
3844         u32 len, entry, base_flags, mss;
3845         int would_hit_hwbug;
3846
3847         len = skb_headlen(skb);
3848
3849         /* No BH disabling for tx_lock here.  We are running in BH disabled
3850          * context and TX reclaim runs via tp->poll inside of a software
3851          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3852          * no IRQ context deadlocks to worry about either.  Rejoice!
3853          */
3854         if (!spin_trylock(&tp->tx_lock))
3855                 return NETDEV_TX_LOCKED; 
3856
3857         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3858                 if (!netif_queue_stopped(dev)) {
3859                         netif_stop_queue(dev);
3860
3861                         /* This is a hard error, log it. */
3862                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3863                                "queue awake!\n", dev->name);
3864                 }
3865                 spin_unlock(&tp->tx_lock);
3866                 return NETDEV_TX_BUSY;
3867         }
3868
3869         entry = tp->tx_prod;
3870         base_flags = 0;
3871         if (skb->ip_summed == CHECKSUM_HW)
3872                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3873 #if TG3_TSO_SUPPORT != 0
3874         mss = 0;
3875         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3876             (mss = skb_shinfo(skb)->tso_size) != 0) {
3877                 int tcp_opt_len, ip_tcp_len;
3878
3879                 if (skb_header_cloned(skb) &&
3880                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3881                         dev_kfree_skb(skb);
3882                         goto out_unlock;
3883                 }
3884
3885                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3886                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3887
3888                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3889                                TXD_FLAG_CPU_POST_DMA);
3890
3891                 skb->nh.iph->check = 0;
3892                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3893                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3894                         skb->h.th->check = 0;
3895                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3896                 }
3897                 else {
3898                         skb->h.th->check =
3899                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3900                                                    skb->nh.iph->daddr,
3901                                                    0, IPPROTO_TCP, 0);
3902                 }
3903
3904                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3905                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3906                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3907                                 int tsflags;
3908
3909                                 tsflags = ((skb->nh.iph->ihl - 5) +
3910                                            (tcp_opt_len >> 2));
3911                                 mss |= (tsflags << 11);
3912                         }
3913                 } else {
3914                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3915                                 int tsflags;
3916
3917                                 tsflags = ((skb->nh.iph->ihl - 5) +
3918                                            (tcp_opt_len >> 2));
3919                                 base_flags |= tsflags << 12;
3920                         }
3921                 }
3922         }
3923 #else
3924         mss = 0;
3925 #endif
3926 #if TG3_VLAN_TAG_USED
3927         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3928                 base_flags |= (TXD_FLAG_VLAN |
3929                                (vlan_tx_tag_get(skb) << 16));
3930 #endif
3931
3932         /* Queue skb data, a.k.a. the main skb fragment. */
3933         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3934
3935         tp->tx_buffers[entry].skb = skb;
3936         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3937
3938         would_hit_hwbug = 0;
3939
3940         if (tg3_4g_overflow_test(mapping, len))
3941                 would_hit_hwbug = 1;
3942
3943         tg3_set_txd(tp, entry, mapping, len, base_flags,
3944                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3945
3946         entry = NEXT_TX(entry);
3947
3948         /* Now loop through additional data fragments, and queue them. */
3949         if (skb_shinfo(skb)->nr_frags > 0) {
3950                 unsigned int i, last;
3951
3952                 last = skb_shinfo(skb)->nr_frags - 1;
3953                 for (i = 0; i <= last; i++) {
3954                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3955
3956                         len = frag->size;
3957                         mapping = pci_map_page(tp->pdev,
3958                                                frag->page,
3959                                                frag->page_offset,
3960                                                len, PCI_DMA_TODEVICE);
3961
3962                         tp->tx_buffers[entry].skb = NULL;
3963                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3964
3965                         if (tg3_4g_overflow_test(mapping, len))
3966                                 would_hit_hwbug = 1;
3967
3968                         if (tg3_40bit_overflow_test(tp, mapping, len))
3969                                 would_hit_hwbug = 1;
3970
3971                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3972                                 tg3_set_txd(tp, entry, mapping, len,
3973                                             base_flags, (i == last)|(mss << 1));
3974                         else
3975                                 tg3_set_txd(tp, entry, mapping, len,
3976                                             base_flags, (i == last));
3977
3978                         entry = NEXT_TX(entry);
3979                 }
3980         }
3981
3982         if (would_hit_hwbug) {
3983                 u32 last_plus_one = entry;
3984                 u32 start;
3985
3986                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3987                 start &= (TG3_TX_RING_SIZE - 1);
3988
3989                 /* If the workaround fails due to memory/mapping
3990                  * failure, silently drop this packet.
3991                  */
3992                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3993                                                 &start, base_flags, mss))
3994                         goto out_unlock;
3995
3996                 entry = start;
3997         }
3998
3999         /* Packets are ready, update Tx producer idx local and on card. */
4000         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4001
4002         tp->tx_prod = entry;
4003         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
4004                 netif_stop_queue(dev);
4005                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4006                         netif_wake_queue(tp->dev);
4007         }
4008
4009 out_unlock:
4010         mmiowb();
4011         spin_unlock(&tp->tx_lock);
4012
4013         dev->trans_start = jiffies;
4014
4015         return NETDEV_TX_OK;
4016 }
4017
4018 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4019                                int new_mtu)
4020 {
4021         dev->mtu = new_mtu;
4022
4023         if (new_mtu > ETH_DATA_LEN) {
4024                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4025                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4026                         ethtool_op_set_tso(dev, 0);
4027                 }
4028                 else
4029                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4030         } else {
4031                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4032                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4033                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4034         }
4035 }
4036
4037 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4038 {
4039         struct tg3 *tp = netdev_priv(dev);
4040
4041         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4042                 return -EINVAL;
4043
4044         if (!netif_running(dev)) {
4045                 /* We'll just catch it later when the
4046                  * device is up'd.
4047                  */
4048                 tg3_set_mtu(dev, tp, new_mtu);
4049                 return 0;
4050         }
4051
4052         tg3_netif_stop(tp);
4053
4054         tg3_full_lock(tp, 1);
4055
4056         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4057
4058         tg3_set_mtu(dev, tp, new_mtu);
4059
4060         tg3_init_hw(tp, 0);
4061
4062         tg3_netif_start(tp);
4063
4064         tg3_full_unlock(tp);
4065
4066         return 0;
4067 }
4068
4069 /* Free up pending packets in all rx/tx rings.
4070  *
4071  * The chip has been shut down and the driver detached from
4072  * the networking, so no interrupts or new tx packets will
4073  * end up in the driver.  tp->{tx,}lock is not held and we are not
4074  * in an interrupt context and thus may sleep.
4075  */
4076 static void tg3_free_rings(struct tg3 *tp)
4077 {
4078         struct ring_info *rxp;
4079         int i;
4080
4081         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4082                 rxp = &tp->rx_std_buffers[i];
4083
4084                 if (rxp->skb == NULL)
4085                         continue;
4086                 pci_unmap_single(tp->pdev,
4087                                  pci_unmap_addr(rxp, mapping),
4088                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4089                                  PCI_DMA_FROMDEVICE);
4090                 dev_kfree_skb_any(rxp->skb);
4091                 rxp->skb = NULL;
4092         }
4093
4094         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4095                 rxp = &tp->rx_jumbo_buffers[i];
4096
4097                 if (rxp->skb == NULL)
4098                         continue;
4099                 pci_unmap_single(tp->pdev,
4100                                  pci_unmap_addr(rxp, mapping),
4101                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4102                                  PCI_DMA_FROMDEVICE);
4103                 dev_kfree_skb_any(rxp->skb);
4104                 rxp->skb = NULL;
4105         }
4106
4107         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4108                 struct tx_ring_info *txp;
4109                 struct sk_buff *skb;
4110                 int j;
4111
4112                 txp = &tp->tx_buffers[i];
4113                 skb = txp->skb;
4114
4115                 if (skb == NULL) {
4116                         i++;
4117                         continue;
4118                 }
4119
4120                 pci_unmap_single(tp->pdev,
4121                                  pci_unmap_addr(txp, mapping),
4122                                  skb_headlen(skb),
4123                                  PCI_DMA_TODEVICE);
4124                 txp->skb = NULL;
4125
4126                 i++;
4127
4128                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4129                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4130                         pci_unmap_page(tp->pdev,
4131                                        pci_unmap_addr(txp, mapping),
4132                                        skb_shinfo(skb)->frags[j].size,
4133                                        PCI_DMA_TODEVICE);
4134                         i++;
4135                 }
4136
4137                 dev_kfree_skb_any(skb);
4138         }
4139 }
4140
4141 /* Initialize tx/rx rings for packet processing.
4142  *
4143  * The chip has been shut down and the driver detached from
4144  * the networking, so no interrupts or new tx packets will
4145  * end up in the driver.  tp->{tx,}lock are held and thus
4146  * we may not sleep.
4147  */
4148 static void tg3_init_rings(struct tg3 *tp)
4149 {
4150         u32 i;
4151
4152         /* Free up all the SKBs. */
4153         tg3_free_rings(tp);
4154
4155         /* Zero out all descriptors. */
4156         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4157         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4158         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4159         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4160
4161         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4162         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4163             (tp->dev->mtu > ETH_DATA_LEN))
4164                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4165
4166         /* Initialize invariants of the rings, we only set this
4167          * stuff once.  This works because the card does not
4168          * write into the rx buffer posting rings.
4169          */
4170         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4171                 struct tg3_rx_buffer_desc *rxd;
4172
4173                 rxd = &tp->rx_std[i];
4174                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4175                         << RXD_LEN_SHIFT;
4176                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4177                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4178                                (i << RXD_OPAQUE_INDEX_SHIFT));
4179         }
4180
4181         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4182                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4183                         struct tg3_rx_buffer_desc *rxd;
4184
4185                         rxd = &tp->rx_jumbo[i];
4186                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4187                                 << RXD_LEN_SHIFT;
4188                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4189                                 RXD_FLAG_JUMBO;
4190                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4191                                (i << RXD_OPAQUE_INDEX_SHIFT));
4192                 }
4193         }
4194
4195         /* Now allocate fresh SKBs for each rx ring. */
4196         for (i = 0; i < tp->rx_pending; i++) {
4197                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4198                                      -1, i) < 0)
4199                         break;
4200         }
4201
4202         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4203                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4204                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4205                                              -1, i) < 0)
4206                                 break;
4207                 }
4208         }
4209 }
4210
4211 /*
4212  * Must not be invoked with interrupt sources disabled and
4213  * the hardware shutdown down.
4214  */
4215 static void tg3_free_consistent(struct tg3 *tp)
4216 {
4217         kfree(tp->rx_std_buffers);
4218         tp->rx_std_buffers = NULL;
4219         if (tp->rx_std) {
4220                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4221                                     tp->rx_std, tp->rx_std_mapping);
4222                 tp->rx_std = NULL;
4223         }
4224         if (tp->rx_jumbo) {
4225                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4226                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4227                 tp->rx_jumbo = NULL;
4228         }
4229         if (tp->rx_rcb) {
4230                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4231                                     tp->rx_rcb, tp->rx_rcb_mapping);
4232                 tp->rx_rcb = NULL;
4233         }
4234         if (tp->tx_ring) {
4235                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4236                         tp->tx_ring, tp->tx_desc_mapping);
4237                 tp->tx_ring = NULL;
4238         }
4239         if (tp->hw_status) {
4240                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4241                                     tp->hw_status, tp->status_mapping);
4242                 tp->hw_status = NULL;
4243         }
4244         if (tp->hw_stats) {
4245                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4246                                     tp->hw_stats, tp->stats_mapping);
4247                 tp->hw_stats = NULL;
4248         }
4249 }
4250
4251 /*
4252  * Must not be invoked with interrupt sources disabled and
4253  * the hardware shutdown down.  Can sleep.
4254  */
4255 static int tg3_alloc_consistent(struct tg3 *tp)
4256 {
4257         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4258                                       (TG3_RX_RING_SIZE +
4259                                        TG3_RX_JUMBO_RING_SIZE)) +
4260                                      (sizeof(struct tx_ring_info) *
4261                                       TG3_TX_RING_SIZE),
4262                                      GFP_KERNEL);
4263         if (!tp->rx_std_buffers)
4264                 return -ENOMEM;
4265
4266         memset(tp->rx_std_buffers, 0,
4267                (sizeof(struct ring_info) *
4268                 (TG3_RX_RING_SIZE +
4269                  TG3_RX_JUMBO_RING_SIZE)) +
4270                (sizeof(struct tx_ring_info) *
4271                 TG3_TX_RING_SIZE));
4272
4273         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4274         tp->tx_buffers = (struct tx_ring_info *)
4275                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4276
4277         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4278                                           &tp->rx_std_mapping);
4279         if (!tp->rx_std)
4280                 goto err_out;
4281
4282         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4283                                             &tp->rx_jumbo_mapping);
4284
4285         if (!tp->rx_jumbo)
4286                 goto err_out;
4287
4288         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4289                                           &tp->rx_rcb_mapping);
4290         if (!tp->rx_rcb)
4291                 goto err_out;
4292
4293         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4294                                            &tp->tx_desc_mapping);
4295         if (!tp->tx_ring)
4296                 goto err_out;
4297
4298         tp->hw_status = pci_alloc_consistent(tp->pdev,
4299                                              TG3_HW_STATUS_SIZE,
4300                                              &tp->status_mapping);
4301         if (!tp->hw_status)
4302                 goto err_out;
4303
4304         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4305                                             sizeof(struct tg3_hw_stats),
4306                                             &tp->stats_mapping);
4307         if (!tp->hw_stats)
4308                 goto err_out;
4309
4310         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4311         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4312
4313         return 0;
4314
4315 err_out:
4316         tg3_free_consistent(tp);
4317         return -ENOMEM;
4318 }
4319
4320 #define MAX_WAIT_CNT 1000
4321
4322 /* To stop a block, clear the enable bit and poll till it
4323  * clears.  tp->lock is held.
4324  */
4325 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4326 {
4327         unsigned int i;
4328         u32 val;
4329
4330         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4331                 switch (ofs) {
4332                 case RCVLSC_MODE:
4333                 case DMAC_MODE:
4334                 case MBFREE_MODE:
4335                 case BUFMGR_MODE:
4336                 case MEMARB_MODE:
4337                         /* We can't enable/disable these bits of the
4338                          * 5705/5750, just say success.
4339                          */
4340                         return 0;
4341
4342                 default:
4343                         break;
4344                 };
4345         }
4346
4347         val = tr32(ofs);
4348         val &= ~enable_bit;
4349         tw32_f(ofs, val);
4350
4351         for (i = 0; i < MAX_WAIT_CNT; i++) {
4352                 udelay(100);
4353                 val = tr32(ofs);
4354                 if ((val & enable_bit) == 0)
4355                         break;
4356         }
4357
4358         if (i == MAX_WAIT_CNT && !silent) {
4359                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4360                        "ofs=%lx enable_bit=%x\n",
4361                        ofs, enable_bit);
4362                 return -ENODEV;
4363         }
4364
4365         return 0;
4366 }
4367
4368 /* tp->lock is held. */
4369 static int tg3_abort_hw(struct tg3 *tp, int silent)
4370 {
4371         int i, err;
4372
4373         tg3_disable_ints(tp);
4374
4375         tp->rx_mode &= ~RX_MODE_ENABLE;
4376         tw32_f(MAC_RX_MODE, tp->rx_mode);
4377         udelay(10);
4378
4379         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4380         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4381         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4382         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4383         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4384         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4385
4386         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4387         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4388         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4389         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4390         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4391         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4392         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4393
4394         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4395         tw32_f(MAC_MODE, tp->mac_mode);
4396         udelay(40);
4397
4398         tp->tx_mode &= ~TX_MODE_ENABLE;
4399         tw32_f(MAC_TX_MODE, tp->tx_mode);
4400
4401         for (i = 0; i < MAX_WAIT_CNT; i++) {
4402                 udelay(100);
4403                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4404                         break;
4405         }
4406         if (i >= MAX_WAIT_CNT) {
4407                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4408                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4409                        tp->dev->name, tr32(MAC_TX_MODE));
4410                 err |= -ENODEV;
4411         }
4412
4413         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4414         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4415         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4416
4417         tw32(FTQ_RESET, 0xffffffff);
4418         tw32(FTQ_RESET, 0x00000000);
4419
4420         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4421         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4422
4423         if (tp->hw_status)
4424                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4425         if (tp->hw_stats)
4426                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4427
4428         return err;
4429 }
4430
4431 /* tp->lock is held. */
4432 static int tg3_nvram_lock(struct tg3 *tp)
4433 {
4434         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4435                 int i;
4436
4437                 if (tp->nvram_lock_cnt == 0) {
4438                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4439                         for (i = 0; i < 8000; i++) {
4440                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4441                                         break;
4442                                 udelay(20);
4443                         }
4444                         if (i == 8000) {
4445                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4446                                 return -ENODEV;
4447                         }
4448                 }
4449                 tp->nvram_lock_cnt++;
4450         }
4451         return 0;
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_nvram_unlock(struct tg3 *tp)
4456 {
4457         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4458                 if (tp->nvram_lock_cnt > 0)
4459                         tp->nvram_lock_cnt--;
4460                 if (tp->nvram_lock_cnt == 0)
4461                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_enable_nvram_access(struct tg3 *tp)
4467 {
4468         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4469             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4470                 u32 nvaccess = tr32(NVRAM_ACCESS);
4471
4472                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4473         }
4474 }
4475
4476 /* tp->lock is held. */
4477 static void tg3_disable_nvram_access(struct tg3 *tp)
4478 {
4479         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4480             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4481                 u32 nvaccess = tr32(NVRAM_ACCESS);
4482
4483                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4484         }
4485 }
4486
4487 /* tp->lock is held. */
4488 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4489 {
4490         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4491                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4492
4493         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4494                 switch (kind) {
4495                 case RESET_KIND_INIT:
4496                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4497                                       DRV_STATE_START);
4498                         break;
4499
4500                 case RESET_KIND_SHUTDOWN:
4501                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4502                                       DRV_STATE_UNLOAD);
4503                         break;
4504
4505                 case RESET_KIND_SUSPEND:
4506                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4507                                       DRV_STATE_SUSPEND);
4508                         break;
4509
4510                 default:
4511                         break;
4512                 };
4513         }
4514 }
4515
4516 /* tp->lock is held. */
4517 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4518 {
4519         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4520                 switch (kind) {
4521                 case RESET_KIND_INIT:
4522                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4523                                       DRV_STATE_START_DONE);
4524                         break;
4525
4526                 case RESET_KIND_SHUTDOWN:
4527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4528                                       DRV_STATE_UNLOAD_DONE);
4529                         break;
4530
4531                 default:
4532                         break;
4533                 };
4534         }
4535 }
4536
4537 /* tp->lock is held. */
4538 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4539 {
4540         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4541                 switch (kind) {
4542                 case RESET_KIND_INIT:
4543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4544                                       DRV_STATE_START);
4545                         break;
4546
4547                 case RESET_KIND_SHUTDOWN:
4548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4549                                       DRV_STATE_UNLOAD);
4550                         break;
4551
4552                 case RESET_KIND_SUSPEND:
4553                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4554                                       DRV_STATE_SUSPEND);
4555                         break;
4556
4557                 default:
4558                         break;
4559                 };
4560         }
4561 }
4562
4563 static void tg3_stop_fw(struct tg3 *);
4564
4565 /* tp->lock is held. */
4566 static int tg3_chip_reset(struct tg3 *tp)
4567 {
4568         u32 val;
4569         void (*write_op)(struct tg3 *, u32, u32);
4570         int i;
4571
4572         tg3_nvram_lock(tp);
4573
4574         /* No matching tg3_nvram_unlock() after this because
4575          * chip reset below will undo the nvram lock.
4576          */
4577         tp->nvram_lock_cnt = 0;
4578
4579         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4581             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4582                 tw32(GRC_FASTBOOT_PC, 0);
4583
4584         /*
4585          * We must avoid the readl() that normally takes place.
4586          * It locks machines, causes machine checks, and other
4587          * fun things.  So, temporarily disable the 5701
4588          * hardware workaround, while we do the reset.
4589          */
4590         write_op = tp->write32;
4591         if (write_op == tg3_write_flush_reg32)
4592                 tp->write32 = tg3_write32;
4593
4594         /* do the reset */
4595         val = GRC_MISC_CFG_CORECLK_RESET;
4596
4597         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4598                 if (tr32(0x7e2c) == 0x60) {
4599                         tw32(0x7e2c, 0x20);
4600                 }
4601                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4602                         tw32(GRC_MISC_CFG, (1 << 29));
4603                         val |= (1 << 29);
4604                 }
4605         }
4606
4607         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4608                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4609         tw32(GRC_MISC_CFG, val);
4610
4611         /* restore 5701 hardware bug workaround write method */
4612         tp->write32 = write_op;
4613
4614         /* Unfortunately, we have to delay before the PCI read back.
4615          * Some 575X chips even will not respond to a PCI cfg access
4616          * when the reset command is given to the chip.
4617          *
4618          * How do these hardware designers expect things to work
4619          * properly if the PCI write is posted for a long period
4620          * of time?  It is always necessary to have some method by
4621          * which a register read back can occur to push the write
4622          * out which does the reset.
4623          *
4624          * For most tg3 variants the trick below was working.
4625          * Ho hum...
4626          */
4627         udelay(120);
4628
4629         /* Flush PCI posted writes.  The normal MMIO registers
4630          * are inaccessible at this time so this is the only
4631          * way to make this reliably (actually, this is no longer
4632          * the case, see above).  I tried to use indirect
4633          * register read/write but this upset some 5701 variants.
4634          */
4635         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4636
4637         udelay(120);
4638
4639         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4640                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4641                         int i;
4642                         u32 cfg_val;
4643
4644                         /* Wait for link training to complete.  */
4645                         for (i = 0; i < 5000; i++)
4646                                 udelay(100);
4647
4648                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4649                         pci_write_config_dword(tp->pdev, 0xc4,
4650                                                cfg_val | (1 << 15));
4651                 }
4652                 /* Set PCIE max payload size and clear error status.  */
4653                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4654         }
4655
4656         /* Re-enable indirect register accesses. */
4657         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4658                                tp->misc_host_ctrl);
4659
4660         /* Set MAX PCI retry to zero. */
4661         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4662         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4663             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4664                 val |= PCISTATE_RETRY_SAME_DMA;
4665         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4666
4667         pci_restore_state(tp->pdev);
4668
4669         /* Make sure PCI-X relaxed ordering bit is clear. */
4670         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4671         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4672         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4673
4674         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4675                 u32 val;
4676
4677                 /* Chip reset on 5780 will reset MSI enable bit,
4678                  * so need to restore it.
4679                  */
4680                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4681                         u16 ctrl;
4682
4683                         pci_read_config_word(tp->pdev,
4684                                              tp->msi_cap + PCI_MSI_FLAGS,
4685                                              &ctrl);
4686                         pci_write_config_word(tp->pdev,
4687                                               tp->msi_cap + PCI_MSI_FLAGS,
4688                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4689                         val = tr32(MSGINT_MODE);
4690                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4691                 }
4692
4693                 val = tr32(MEMARB_MODE);
4694                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4695
4696         } else
4697                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4698
4699         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4700                 tg3_stop_fw(tp);
4701                 tw32(0x5000, 0x400);
4702         }
4703
4704         tw32(GRC_MODE, tp->grc_mode);
4705
4706         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4707                 u32 val = tr32(0xc4);
4708
4709                 tw32(0xc4, val | (1 << 15));
4710         }
4711
4712         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4714                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4715                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4716                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4717                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4718         }
4719
4720         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4721                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4722                 tw32_f(MAC_MODE, tp->mac_mode);
4723         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4724                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4725                 tw32_f(MAC_MODE, tp->mac_mode);
4726         } else
4727                 tw32_f(MAC_MODE, 0);
4728         udelay(40);
4729
4730         /* Wait for firmware initialization to complete. */
4731         for (i = 0; i < 100000; i++) {
4732                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4733                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4734                         break;
4735                 udelay(10);
4736         }
4737
4738         /* Chip might not be fitted with firmare.  Some Sun onboard
4739          * parts are configured like that.  So don't signal the timeout
4740          * of the above loop as an error, but do report the lack of
4741          * running firmware once.
4742          */
4743         if (i >= 100000 &&
4744             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4745                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4746
4747                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4748                        tp->dev->name);
4749         }
4750
4751         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4752             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4753                 u32 val = tr32(0x7c00);
4754
4755                 tw32(0x7c00, val | (1 << 25));
4756         }
4757
4758         /* Reprobe ASF enable state.  */
4759         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4760         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4761         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4762         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4763                 u32 nic_cfg;
4764
4765                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4766                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4767                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4768                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4769                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4770                 }
4771         }
4772
4773         return 0;
4774 }
4775
4776 /* tp->lock is held. */
4777 static void tg3_stop_fw(struct tg3 *tp)
4778 {
4779         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4780                 u32 val;
4781                 int i;
4782
4783                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4784                 val = tr32(GRC_RX_CPU_EVENT);
4785                 val |= (1 << 14);
4786                 tw32(GRC_RX_CPU_EVENT, val);
4787
4788                 /* Wait for RX cpu to ACK the event.  */
4789                 for (i = 0; i < 100; i++) {
4790                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4791                                 break;
4792                         udelay(1);
4793                 }
4794         }
4795 }
4796
4797 /* tp->lock is held. */
4798 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4799 {
4800         int err;
4801
4802         tg3_stop_fw(tp);
4803
4804         tg3_write_sig_pre_reset(tp, kind);
4805
4806         tg3_abort_hw(tp, silent);
4807         err = tg3_chip_reset(tp);
4808
4809         tg3_write_sig_legacy(tp, kind);
4810         tg3_write_sig_post_reset(tp, kind);
4811
4812         if (err)
4813                 return err;
4814
4815         return 0;
4816 }
4817
4818 #define TG3_FW_RELEASE_MAJOR    0x0
4819 #define TG3_FW_RELASE_MINOR     0x0
4820 #define TG3_FW_RELEASE_FIX      0x0
4821 #define TG3_FW_START_ADDR       0x08000000
4822 #define TG3_FW_TEXT_ADDR        0x08000000
4823 #define TG3_FW_TEXT_LEN         0x9c0
4824 #define TG3_FW_RODATA_ADDR      0x080009c0
4825 #define TG3_FW_RODATA_LEN       0x60
4826 #define TG3_FW_DATA_ADDR        0x08000a40
4827 #define TG3_FW_DATA_LEN         0x20
4828 #define TG3_FW_SBSS_ADDR        0x08000a60
4829 #define TG3_FW_SBSS_LEN         0xc
4830 #define TG3_FW_BSS_ADDR         0x08000a70
4831 #define TG3_FW_BSS_LEN          0x10
4832
4833 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4834         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4835         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4836         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4837         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4838         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4839         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4840         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4841         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4842         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4843         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4844         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4845         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4846         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4847         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4848         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4849         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4850         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4851         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4852         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4853         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4854         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4855         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4856         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4857         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4858         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4859         0, 0, 0, 0, 0, 0,
4860         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4861         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4862         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4863         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4864         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4865         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4866         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4867         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4868         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4869         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4870         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4871         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4872         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4873         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4874         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4875         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4876         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4877         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4878         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4879         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4880         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4881         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4882         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4883         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4884         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4885         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4886         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4887         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4888         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4889         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4890         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4891         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4892         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4893         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4894         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4895         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4896         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4897         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4898         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4899         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4900         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4901         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4902         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4903         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4904         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4905         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4906         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4907         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4908         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4909         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4910         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4911         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4912         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4913         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4914         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4915         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4916         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4917         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4918         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4919         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4920         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4921         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4922         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4923         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4924         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4925 };
4926
4927 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4928         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4929         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4930         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4931         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4932         0x00000000
4933 };
4934
4935 #if 0 /* All zeros, don't eat up space with it. */
4936 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4937         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4938         0x00000000, 0x00000000, 0x00000000, 0x00000000
4939 };
4940 #endif
4941
4942 #define RX_CPU_SCRATCH_BASE     0x30000
4943 #define RX_CPU_SCRATCH_SIZE     0x04000
4944 #define TX_CPU_SCRATCH_BASE     0x34000
4945 #define TX_CPU_SCRATCH_SIZE     0x04000
4946
4947 /* tp->lock is held. */
4948 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4949 {
4950         int i;
4951
4952         BUG_ON(offset == TX_CPU_BASE &&
4953             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4954
4955         if (offset == RX_CPU_BASE) {
4956                 for (i = 0; i < 10000; i++) {
4957                         tw32(offset + CPU_STATE, 0xffffffff);
4958                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4959                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4960                                 break;
4961                 }
4962
4963                 tw32(offset + CPU_STATE, 0xffffffff);
4964                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4965                 udelay(10);
4966         } else {
4967                 for (i = 0; i < 10000; i++) {
4968                         tw32(offset + CPU_STATE, 0xffffffff);
4969                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4970                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4971                                 break;
4972                 }
4973         }
4974
4975         if (i >= 10000) {
4976                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4977                        "and %s CPU\n",
4978                        tp->dev->name,
4979                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4980                 return -ENODEV;
4981         }
4982
4983         /* Clear firmware's nvram arbitration. */
4984         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4985                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4986         return 0;
4987 }
4988
4989 struct fw_info {
4990         unsigned int text_base;
4991         unsigned int text_len;
4992         u32 *text_data;
4993         unsigned int rodata_base;
4994         unsigned int rodata_len;
4995         u32 *rodata_data;
4996         unsigned int data_base;
4997         unsigned int data_len;
4998         u32 *data_data;
4999 };
5000
5001 /* tp->lock is held. */
5002 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5003                                  int cpu_scratch_size, struct fw_info *info)
5004 {
5005         int err, lock_err, i;
5006         void (*write_op)(struct tg3 *, u32, u32);
5007
5008         if (cpu_base == TX_CPU_BASE &&
5009             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5010                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5011                        "TX cpu firmware on %s which is 5705.\n",
5012                        tp->dev->name);
5013                 return -EINVAL;
5014         }
5015
5016         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5017                 write_op = tg3_write_mem;
5018         else
5019                 write_op = tg3_write_indirect_reg32;
5020
5021         /* It is possible that bootcode is still loading at this point.
5022          * Get the nvram lock first before halting the cpu.
5023          */
5024         lock_err = tg3_nvram_lock(tp);
5025         err = tg3_halt_cpu(tp, cpu_base);
5026         if (!lock_err)
5027                 tg3_nvram_unlock(tp);
5028         if (err)
5029                 goto out;
5030
5031         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5032                 write_op(tp, cpu_scratch_base + i, 0);
5033         tw32(cpu_base + CPU_STATE, 0xffffffff);
5034         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5035         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5036                 write_op(tp, (cpu_scratch_base +
5037                               (info->text_base & 0xffff) +
5038                               (i * sizeof(u32))),
5039                          (info->text_data ?
5040                           info->text_data[i] : 0));
5041         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5042                 write_op(tp, (cpu_scratch_base +
5043                               (info->rodata_base & 0xffff) +
5044                               (i * sizeof(u32))),
5045                          (info->rodata_data ?
5046                           info->rodata_data[i] : 0));
5047         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5048                 write_op(tp, (cpu_scratch_base +
5049                               (info->data_base & 0xffff) +
5050                               (i * sizeof(u32))),
5051                          (info->data_data ?
5052                           info->data_data[i] : 0));
5053
5054         err = 0;
5055
5056 out:
5057         return err;
5058 }
5059
5060 /* tp->lock is held. */
5061 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5062 {
5063         struct fw_info info;
5064         int err, i;
5065
5066         info.text_base = TG3_FW_TEXT_ADDR;
5067         info.text_len = TG3_FW_TEXT_LEN;
5068         info.text_data = &tg3FwText[0];
5069         info.rodata_base = TG3_FW_RODATA_ADDR;
5070         info.rodata_len = TG3_FW_RODATA_LEN;
5071         info.rodata_data = &tg3FwRodata[0];
5072         info.data_base = TG3_FW_DATA_ADDR;
5073         info.data_len = TG3_FW_DATA_LEN;
5074         info.data_data = NULL;
5075
5076         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5077                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5078                                     &info);
5079         if (err)
5080                 return err;
5081
5082         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5083                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5084                                     &info);
5085         if (err)
5086                 return err;
5087
5088         /* Now startup only the RX cpu. */
5089         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5090         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5091
5092         for (i = 0; i < 5; i++) {
5093                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5094                         break;
5095                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5096                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5097                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5098                 udelay(1000);
5099         }
5100         if (i >= 5) {
5101                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5102                        "to set RX CPU PC, is %08x should be %08x\n",
5103                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5104                        TG3_FW_TEXT_ADDR);
5105                 return -ENODEV;
5106         }
5107         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5108         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5109
5110         return 0;
5111 }
5112
5113 #if TG3_TSO_SUPPORT != 0
5114
5115 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5116 #define TG3_TSO_FW_RELASE_MINOR         0x6
5117 #define TG3_TSO_FW_RELEASE_FIX          0x0
5118 #define TG3_TSO_FW_START_ADDR           0x08000000
5119 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5120 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5121 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5122 #define TG3_TSO_FW_RODATA_LEN           0x60
5123 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5124 #define TG3_TSO_FW_DATA_LEN             0x30
5125 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5126 #define TG3_TSO_FW_SBSS_LEN             0x2c
5127 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5128 #define TG3_TSO_FW_BSS_LEN              0x894
5129
5130 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5131         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5132         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5133         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5134         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5135         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5136         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5137         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5138         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5139         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5140         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5141         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5142         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5143         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5144         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5145         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5146         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5147         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5148         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5149         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5150         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5151         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5152         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5153         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5154         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5155         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5156         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5157         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5158         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5159         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5160         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5161         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5162         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5163         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5164         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5165         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5166         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5167         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5168         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5169         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5170         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5171         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5172         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5173         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5174         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5175         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5176         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5177         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5178         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5179         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5180         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5181         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5182         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5183         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5184         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5185         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5186         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5187         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5188         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5189         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5190         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5191         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5192         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5193         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5194         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5195         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5196         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5197         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5198         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5199         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5200         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5201         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5202         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5203         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5204         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5205         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5206         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5207         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5208         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5209         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5210         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5211         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5212         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5213         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5214         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5215         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5216         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5217         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5218         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5219         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5220         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5221         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5222         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5223         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5224         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5225         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5226         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5227         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5228         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5229         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5230         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5231         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5232         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5233         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5234         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5235         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5236         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5237         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5238         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5239         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5240         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5241         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5242         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5243         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5244         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5245         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5246         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5247         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5248         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5249         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5250         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5251         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5252         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5253         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5254         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5255         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5256         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5257         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5258         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5259         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5260         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5261         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5262         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5263         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5264         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5265         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5266         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5267         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5268         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5269         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5270         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5271         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5272         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5273         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5274         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5275         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5276         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5277         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5278         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5279         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5280         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5281         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5282         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5283         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5284         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5285         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5286         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5287         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5288         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5289         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5290         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5291         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5292         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5293         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5294         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5295         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5296         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5297         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5298         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5299         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5300         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5301         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5302         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5303         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5304         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5305         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5306         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5307         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5308         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5309         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5310         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5311         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5312         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5313         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5314         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5315         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5316         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5317         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5318         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5319         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5320         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5321         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5322         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5323         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5324         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5325         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5326         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5327         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5328         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5329         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5330         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5331         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5332         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5333         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5334         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5335         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5336         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5337         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5338         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5339         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5340         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5341         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5342         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5343         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5344         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5345         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5346         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5347         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5348         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5349         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5350         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5351         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5352         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5353         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5354         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5355         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5356         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5357         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5358         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5359         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5360         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5361         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5362         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5363         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5364         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5365         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5366         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5367         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5368         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5369         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5370         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5371         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5372         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5373         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5374         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5375         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5376         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5377         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5378         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5379         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5380         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5381         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5382         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5383         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5384         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5385         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5386         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5387         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5388         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5389         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5390         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5391         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5392         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5393         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5394         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5395         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5396         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5397         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5398         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5399         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5400         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5401         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5402         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5403         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5404         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5405         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5406         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5407         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5408         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5409         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5410         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5411         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5412         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5413         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5414         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5415 };
5416
5417 static u32 tg3TsoFwRodata[] = {
5418         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5419         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5420         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5421         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5422         0x00000000,
5423 };
5424
5425 static u32 tg3TsoFwData[] = {
5426         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5427         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5428         0x00000000,
5429 };
5430
5431 /* 5705 needs a special version of the TSO firmware.  */
5432 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5433 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5434 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5435 #define TG3_TSO5_FW_START_ADDR          0x00010000
5436 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5437 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5438 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5439 #define TG3_TSO5_FW_RODATA_LEN          0x50
5440 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5441 #define TG3_TSO5_FW_DATA_LEN            0x20
5442 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5443 #define TG3_TSO5_FW_SBSS_LEN            0x28
5444 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5445 #define TG3_TSO5_FW_BSS_LEN             0x88
5446
5447 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5448         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5449         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5450         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5451         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5452         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5453         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5454         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5455         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5456         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5457         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5458         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5459         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5460         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5461         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5462         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5463         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5464         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5465         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5466         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5467         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5468         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5469         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5470         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5471         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5472         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5473         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5474         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5475         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5476         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5477         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5478         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5479         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5480         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5481         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5482         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5483         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5484         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5485         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5486         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5487         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5488         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5489         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5490         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5491         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5492         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5493         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5494         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5495         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5496         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5497         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5498         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5499         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5500         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5501         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5502         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5503         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5504         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5505         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5506         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5507         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5508         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5509         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5510         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5511         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5512         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5513         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5514         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5515         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5516         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5517         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5518         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5519         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5520         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5521         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5522         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5523         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5524         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5525         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5526         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5527         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5528         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5529         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5530         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5531         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5532         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5533         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5534         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5535         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5536         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5537         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5538         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5539         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5540         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5541         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5542         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5543         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5544         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5545         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5546         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5547         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5548         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5549         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5550         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5551         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5552         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5553         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5554         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5555         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5556         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5557         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5558         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5559         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5560         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5561         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5562         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5563         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5564         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5565         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5566         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5567         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5568         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5569         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5570         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5571         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5572         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5573         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5574         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5575         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5576         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5577         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5578         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5579         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5580         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5581         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5582         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5583         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5584         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5585         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5586         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5587         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5588         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5589         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5590         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5591         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5592         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5593         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5594         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5595         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5596         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5597         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5598         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5599         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5600         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5601         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5602         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5603         0x00000000, 0x00000000, 0x00000000,
5604 };
5605
5606 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5607         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5608         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5609         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5610         0x00000000, 0x00000000, 0x00000000,
5611 };
5612
5613 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5614         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5615         0x00000000, 0x00000000, 0x00000000,
5616 };
5617
5618 /* tp->lock is held. */
5619 static int tg3_load_tso_firmware(struct tg3 *tp)
5620 {
5621         struct fw_info info;
5622         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5623         int err, i;
5624
5625         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5626                 return 0;
5627
5628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5629                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5630                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5631                 info.text_data = &tg3Tso5FwText[0];
5632                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5633                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5634                 info.rodata_data = &tg3Tso5FwRodata[0];
5635                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5636                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5637                 info.data_data = &tg3Tso5FwData[0];
5638                 cpu_base = RX_CPU_BASE;
5639                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5640                 cpu_scratch_size = (info.text_len +
5641                                     info.rodata_len +
5642                                     info.data_len +
5643                                     TG3_TSO5_FW_SBSS_LEN +
5644                                     TG3_TSO5_FW_BSS_LEN);
5645         } else {
5646                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5647                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5648                 info.text_data = &tg3TsoFwText[0];
5649                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5650                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5651                 info.rodata_data = &tg3TsoFwRodata[0];
5652                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5653                 info.data_len = TG3_TSO_FW_DATA_LEN;
5654                 info.data_data = &tg3TsoFwData[0];
5655                 cpu_base = TX_CPU_BASE;
5656                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5657                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5658         }
5659
5660         err = tg3_load_firmware_cpu(tp, cpu_base,
5661                                     cpu_scratch_base, cpu_scratch_size,
5662                                     &info);
5663         if (err)
5664                 return err;
5665
5666         /* Now startup the cpu. */
5667         tw32(cpu_base + CPU_STATE, 0xffffffff);
5668         tw32_f(cpu_base + CPU_PC,    info.text_base);
5669
5670         for (i = 0; i < 5; i++) {
5671                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5672                         break;
5673                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5674                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5675                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5676                 udelay(1000);
5677         }
5678         if (i >= 5) {
5679                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5680                        "to set CPU PC, is %08x should be %08x\n",
5681                        tp->dev->name, tr32(cpu_base + CPU_PC),
5682                        info.text_base);
5683                 return -ENODEV;
5684         }
5685         tw32(cpu_base + CPU_STATE, 0xffffffff);
5686         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5687         return 0;
5688 }
5689
5690 #endif /* TG3_TSO_SUPPORT != 0 */
5691
5692 /* tp->lock is held. */
5693 static void __tg3_set_mac_addr(struct tg3 *tp)
5694 {
5695         u32 addr_high, addr_low;
5696         int i;
5697
5698         addr_high = ((tp->dev->dev_addr[0] << 8) |
5699                      tp->dev->dev_addr[1]);
5700         addr_low = ((tp->dev->dev_addr[2] << 24) |
5701                     (tp->dev->dev_addr[3] << 16) |
5702                     (tp->dev->dev_addr[4] <<  8) |
5703                     (tp->dev->dev_addr[5] <<  0));
5704         for (i = 0; i < 4; i++) {
5705                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5706                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5707         }
5708
5709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5711                 for (i = 0; i < 12; i++) {
5712                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5713                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5714                 }
5715         }
5716
5717         addr_high = (tp->dev->dev_addr[0] +
5718                      tp->dev->dev_addr[1] +
5719                      tp->dev->dev_addr[2] +
5720                      tp->dev->dev_addr[3] +
5721                      tp->dev->dev_addr[4] +
5722                      tp->dev->dev_addr[5]) &
5723                 TX_BACKOFF_SEED_MASK;
5724         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5725 }
5726
5727 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5728 {
5729         struct tg3 *tp = netdev_priv(dev);
5730         struct sockaddr *addr = p;
5731
5732         if (!is_valid_ether_addr(addr->sa_data))
5733                 return -EINVAL;
5734
5735         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5736
5737         if (!netif_running(dev))
5738                 return 0;
5739
5740         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5741                 /* Reset chip so that ASF can re-init any MAC addresses it
5742                  * needs.
5743                  */
5744                 tg3_netif_stop(tp);
5745                 tg3_full_lock(tp, 1);
5746
5747                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5748                 tg3_init_hw(tp, 0);
5749
5750                 tg3_netif_start(tp);
5751                 tg3_full_unlock(tp);
5752         } else {
5753                 spin_lock_bh(&tp->lock);
5754                 __tg3_set_mac_addr(tp);
5755                 spin_unlock_bh(&tp->lock);
5756         }
5757
5758         return 0;
5759 }
5760
5761 /* tp->lock is held. */
5762 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5763                            dma_addr_t mapping, u32 maxlen_flags,
5764                            u32 nic_addr)
5765 {
5766         tg3_write_mem(tp,
5767                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5768                       ((u64) mapping >> 32));
5769         tg3_write_mem(tp,
5770                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5771                       ((u64) mapping & 0xffffffff));
5772         tg3_write_mem(tp,
5773                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5774                        maxlen_flags);
5775
5776         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5777                 tg3_write_mem(tp,
5778                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5779                               nic_addr);
5780 }
5781
5782 static void __tg3_set_rx_mode(struct net_device *);
5783 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5784 {
5785         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5786         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5787         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5788         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5789         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5790                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5791                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5792         }
5793         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5794         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5796                 u32 val = ec->stats_block_coalesce_usecs;
5797
5798                 if (!netif_carrier_ok(tp->dev))
5799                         val = 0;
5800
5801                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5802         }
5803 }
5804
5805 /* tp->lock is held. */
5806 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5807 {
5808         u32 val, rdmac_mode;
5809         int i, err, limit;
5810
5811         tg3_disable_ints(tp);
5812
5813         tg3_stop_fw(tp);
5814
5815         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5816
5817         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5818                 tg3_abort_hw(tp, 1);
5819         }
5820
5821         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5822                 tg3_phy_reset(tp);
5823
5824         err = tg3_chip_reset(tp);
5825         if (err)
5826                 return err;
5827
5828         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5829
5830         /* This works around an issue with Athlon chipsets on
5831          * B3 tigon3 silicon.  This bit has no effect on any
5832          * other revision.  But do not set this on PCI Express
5833          * chips.
5834          */
5835         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5836                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5837         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5838
5839         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5840             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5841                 val = tr32(TG3PCI_PCISTATE);
5842                 val |= PCISTATE_RETRY_SAME_DMA;
5843                 tw32(TG3PCI_PCISTATE, val);
5844         }
5845
5846         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5847                 /* Enable some hw fixes.  */
5848                 val = tr32(TG3PCI_MSI_DATA);
5849                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5850                 tw32(TG3PCI_MSI_DATA, val);
5851         }
5852
5853         /* Descriptor ring init may make accesses to the
5854          * NIC SRAM area to setup the TX descriptors, so we
5855          * can only do this after the hardware has been
5856          * successfully reset.
5857          */
5858         tg3_init_rings(tp);
5859
5860         /* This value is determined during the probe time DMA
5861          * engine test, tg3_test_dma.
5862          */
5863         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5864
5865         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5866                           GRC_MODE_4X_NIC_SEND_RINGS |
5867                           GRC_MODE_NO_TX_PHDR_CSUM |
5868                           GRC_MODE_NO_RX_PHDR_CSUM);
5869         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5870
5871         /* Pseudo-header checksum is done by hardware logic and not
5872          * the offload processers, so make the chip do the pseudo-
5873          * header checksums on receive.  For transmit it is more
5874          * convenient to do the pseudo-header checksum in software
5875          * as Linux does that on transmit for us in all cases.
5876          */
5877         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5878
5879         tw32(GRC_MODE,
5880              tp->grc_mode |
5881              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5882
5883         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5884         val = tr32(GRC_MISC_CFG);
5885         val &= ~0xff;
5886         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5887         tw32(GRC_MISC_CFG, val);
5888
5889         /* Initialize MBUF/DESC pool. */
5890         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5891                 /* Do nothing.  */
5892         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5893                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5894                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5895                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5896                 else
5897                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5898                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5899                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5900         }
5901 #if TG3_TSO_SUPPORT != 0
5902         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5903                 int fw_len;
5904
5905                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5906                           TG3_TSO5_FW_RODATA_LEN +
5907                           TG3_TSO5_FW_DATA_LEN +
5908                           TG3_TSO5_FW_SBSS_LEN +
5909                           TG3_TSO5_FW_BSS_LEN);
5910                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5911                 tw32(BUFMGR_MB_POOL_ADDR,
5912                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5913                 tw32(BUFMGR_MB_POOL_SIZE,
5914                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5915         }
5916 #endif
5917
5918         if (tp->dev->mtu <= ETH_DATA_LEN) {
5919                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5920                      tp->bufmgr_config.mbuf_read_dma_low_water);
5921                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5922                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5923                 tw32(BUFMGR_MB_HIGH_WATER,
5924                      tp->bufmgr_config.mbuf_high_water);
5925         } else {
5926                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5927                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5928                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5929                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5930                 tw32(BUFMGR_MB_HIGH_WATER,
5931                      tp->bufmgr_config.mbuf_high_water_jumbo);
5932         }
5933         tw32(BUFMGR_DMA_LOW_WATER,
5934              tp->bufmgr_config.dma_low_water);
5935         tw32(BUFMGR_DMA_HIGH_WATER,
5936              tp->bufmgr_config.dma_high_water);
5937
5938         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5939         for (i = 0; i < 2000; i++) {
5940                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5941                         break;
5942                 udelay(10);
5943         }
5944         if (i >= 2000) {
5945                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5946                        tp->dev->name);
5947                 return -ENODEV;
5948         }
5949
5950         /* Setup replenish threshold. */
5951         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5952
5953         /* Initialize TG3_BDINFO's at:
5954          *  RCVDBDI_STD_BD:     standard eth size rx ring
5955          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5956          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5957          *
5958          * like so:
5959          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5960          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5961          *                              ring attribute flags
5962          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5963          *
5964          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5965          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5966          *
5967          * The size of each ring is fixed in the firmware, but the location is
5968          * configurable.
5969          */
5970         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5971              ((u64) tp->rx_std_mapping >> 32));
5972         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5973              ((u64) tp->rx_std_mapping & 0xffffffff));
5974         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5975              NIC_SRAM_RX_BUFFER_DESC);
5976
5977         /* Don't even try to program the JUMBO/MINI buffer descriptor
5978          * configs on 5705.
5979          */
5980         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5981                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5982                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5983         } else {
5984                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5985                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5986
5987                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5988                      BDINFO_FLAGS_DISABLED);
5989
5990                 /* Setup replenish threshold. */
5991                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5992
5993                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5994                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5995                              ((u64) tp->rx_jumbo_mapping >> 32));
5996                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5997                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5998                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5999                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6000                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6001                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6002                 } else {
6003                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6004                              BDINFO_FLAGS_DISABLED);
6005                 }
6006
6007         }
6008
6009         /* There is only one send ring on 5705/5750, no need to explicitly
6010          * disable the others.
6011          */
6012         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6013                 /* Clear out send RCB ring in SRAM. */
6014                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6015                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6016                                       BDINFO_FLAGS_DISABLED);
6017         }
6018
6019         tp->tx_prod = 0;
6020         tp->tx_cons = 0;
6021         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6022         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6023
6024         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6025                        tp->tx_desc_mapping,
6026                        (TG3_TX_RING_SIZE <<
6027                         BDINFO_FLAGS_MAXLEN_SHIFT),
6028                        NIC_SRAM_TX_BUFFER_DESC);
6029
6030         /* There is only one receive return ring on 5705/5750, no need
6031          * to explicitly disable the others.
6032          */
6033         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6034                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6035                      i += TG3_BDINFO_SIZE) {
6036                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6037                                       BDINFO_FLAGS_DISABLED);
6038                 }
6039         }
6040
6041         tp->rx_rcb_ptr = 0;
6042         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6043
6044         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6045                        tp->rx_rcb_mapping,
6046                        (TG3_RX_RCB_RING_SIZE(tp) <<
6047                         BDINFO_FLAGS_MAXLEN_SHIFT),
6048                        0);
6049
6050         tp->rx_std_ptr = tp->rx_pending;
6051         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6052                      tp->rx_std_ptr);
6053
6054         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6055                                                 tp->rx_jumbo_pending : 0;
6056         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6057                      tp->rx_jumbo_ptr);
6058
6059         /* Initialize MAC address and backoff seed. */
6060         __tg3_set_mac_addr(tp);
6061
6062         /* MTU + ethernet header + FCS + optional VLAN tag */
6063         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6064
6065         /* The slot time is changed by tg3_setup_phy if we
6066          * run at gigabit with half duplex.
6067          */
6068         tw32(MAC_TX_LENGTHS,
6069              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6070              (6 << TX_LENGTHS_IPG_SHIFT) |
6071              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6072
6073         /* Receive rules. */
6074         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6075         tw32(RCVLPC_CONFIG, 0x0181);
6076
6077         /* Calculate RDMAC_MODE setting early, we need it to determine
6078          * the RCVLPC_STATE_ENABLE mask.
6079          */
6080         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6081                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6082                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6083                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6084                       RDMAC_MODE_LNGREAD_ENAB);
6085         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6086                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6087
6088         /* If statement applies to 5705 and 5750 PCI devices only */
6089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6090              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6091             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6092                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6093                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6094                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6095                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6096                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6097                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6098                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6099                 }
6100         }
6101
6102         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6103                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6104
6105 #if TG3_TSO_SUPPORT != 0
6106         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6107                 rdmac_mode |= (1 << 27);
6108 #endif
6109
6110         /* Receive/send statistics. */
6111         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6112             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6113                 val = tr32(RCVLPC_STATS_ENABLE);
6114                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6115                 tw32(RCVLPC_STATS_ENABLE, val);
6116         } else {
6117                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6118         }
6119         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6120         tw32(SNDDATAI_STATSENAB, 0xffffff);
6121         tw32(SNDDATAI_STATSCTRL,
6122              (SNDDATAI_SCTRL_ENABLE |
6123               SNDDATAI_SCTRL_FASTUPD));
6124
6125         /* Setup host coalescing engine. */
6126         tw32(HOSTCC_MODE, 0);
6127         for (i = 0; i < 2000; i++) {
6128                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6129                         break;
6130                 udelay(10);
6131         }
6132
6133         __tg3_set_coalesce(tp, &tp->coal);
6134
6135         /* set status block DMA address */
6136         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6137              ((u64) tp->status_mapping >> 32));
6138         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6139              ((u64) tp->status_mapping & 0xffffffff));
6140
6141         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6142                 /* Status/statistics block address.  See tg3_timer,
6143                  * the tg3_periodic_fetch_stats call there, and
6144                  * tg3_get_stats to see how this works for 5705/5750 chips.
6145                  */
6146                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6147                      ((u64) tp->stats_mapping >> 32));
6148                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6149                      ((u64) tp->stats_mapping & 0xffffffff));
6150                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6151                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6152         }
6153
6154         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6155
6156         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6157         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6158         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6159                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6160
6161         /* Clear statistics/status block in chip, and status block in ram. */
6162         for (i = NIC_SRAM_STATS_BLK;
6163              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6164              i += sizeof(u32)) {
6165                 tg3_write_mem(tp, i, 0);
6166                 udelay(40);
6167         }
6168         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6169
6170         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6171                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6172                 /* reset to prevent losing 1st rx packet intermittently */
6173                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6174                 udelay(10);
6175         }
6176
6177         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6178                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6179         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6180         udelay(40);
6181
6182         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6183          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6184          * register to preserve the GPIO settings for LOMs. The GPIOs,
6185          * whether used as inputs or outputs, are set by boot code after
6186          * reset.
6187          */
6188         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6189                 u32 gpio_mask;
6190
6191                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6192                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6193
6194                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6195                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6196                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6197
6198                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6199                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6200
6201                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6202
6203                 /* GPIO1 must be driven high for eeprom write protect */
6204                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6205                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6206         }
6207         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6208         udelay(100);
6209
6210         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6211         tp->last_tag = 0;
6212
6213         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6214                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6215                 udelay(40);
6216         }
6217
6218         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6219                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6220                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6221                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6222                WDMAC_MODE_LNGREAD_ENAB);
6223
6224         /* If statement applies to 5705 and 5750 PCI devices only */
6225         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6226              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6228                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6229                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6230                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6231                         /* nothing */
6232                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6233                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6234                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6235                         val |= WDMAC_MODE_RX_ACCEL;
6236                 }
6237         }
6238
6239         /* Enable host coalescing bug fix */
6240         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6241             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6242                 val |= (1 << 29);
6243
6244         tw32_f(WDMAC_MODE, val);
6245         udelay(40);
6246
6247         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6248                 val = tr32(TG3PCI_X_CAPS);
6249                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6250                         val &= ~PCIX_CAPS_BURST_MASK;
6251                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6252                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6253                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6254                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6255                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6256                                 val |= (tp->split_mode_max_reqs <<
6257                                         PCIX_CAPS_SPLIT_SHIFT);
6258                 }
6259                 tw32(TG3PCI_X_CAPS, val);
6260         }
6261
6262         tw32_f(RDMAC_MODE, rdmac_mode);
6263         udelay(40);
6264
6265         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6266         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6267                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6268         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6269         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6270         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6271         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6272         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6273 #if TG3_TSO_SUPPORT != 0
6274         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6275                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6276 #endif
6277         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6278         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6279
6280         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6281                 err = tg3_load_5701_a0_firmware_fix(tp);
6282                 if (err)
6283                         return err;
6284         }
6285
6286 #if TG3_TSO_SUPPORT != 0
6287         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6288                 err = tg3_load_tso_firmware(tp);
6289                 if (err)
6290                         return err;
6291         }
6292 #endif
6293
6294         tp->tx_mode = TX_MODE_ENABLE;
6295         tw32_f(MAC_TX_MODE, tp->tx_mode);
6296         udelay(100);
6297
6298         tp->rx_mode = RX_MODE_ENABLE;
6299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6300                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6301
6302         tw32_f(MAC_RX_MODE, tp->rx_mode);
6303         udelay(10);
6304
6305         if (tp->link_config.phy_is_low_power) {
6306                 tp->link_config.phy_is_low_power = 0;
6307                 tp->link_config.speed = tp->link_config.orig_speed;
6308                 tp->link_config.duplex = tp->link_config.orig_duplex;
6309                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6310         }
6311
6312         tp->mi_mode = MAC_MI_MODE_BASE;
6313         tw32_f(MAC_MI_MODE, tp->mi_mode);
6314         udelay(80);
6315
6316         tw32(MAC_LED_CTRL, tp->led_ctrl);
6317
6318         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6319         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6320                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6321                 udelay(10);
6322         }
6323         tw32_f(MAC_RX_MODE, tp->rx_mode);
6324         udelay(10);
6325
6326         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6327                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6328                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6329                         /* Set drive transmission level to 1.2V  */
6330                         /* only if the signal pre-emphasis bit is not set  */
6331                         val = tr32(MAC_SERDES_CFG);
6332                         val &= 0xfffff000;
6333                         val |= 0x880;
6334                         tw32(MAC_SERDES_CFG, val);
6335                 }
6336                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6337                         tw32(MAC_SERDES_CFG, 0x616000);
6338         }
6339
6340         /* Prevent chip from dropping frames when flow control
6341          * is enabled.
6342          */
6343         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6344
6345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6346             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6347                 /* Use hardware link auto-negotiation */
6348                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6349         }
6350
6351         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6352             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6353                 u32 tmp;
6354
6355                 tmp = tr32(SERDES_RX_CTRL);
6356                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6357                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6358                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6359                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6360         }
6361
6362         err = tg3_setup_phy(tp, reset_phy);
6363         if (err)
6364                 return err;
6365
6366         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6367                 u32 tmp;
6368
6369                 /* Clear CRC stats. */
6370                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6371                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6372                         tg3_readphy(tp, 0x14, &tmp);
6373                 }
6374         }
6375
6376         __tg3_set_rx_mode(tp->dev);
6377
6378         /* Initialize receive rules. */
6379         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6380         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6381         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6382         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6383
6384         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6385             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6386                 limit = 8;
6387         else
6388                 limit = 16;
6389         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6390                 limit -= 4;
6391         switch (limit) {
6392         case 16:
6393                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6394         case 15:
6395                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6396         case 14:
6397                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6398         case 13:
6399                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6400         case 12:
6401                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6402         case 11:
6403                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6404         case 10:
6405                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6406         case 9:
6407                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6408         case 8:
6409                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6410         case 7:
6411                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6412         case 6:
6413                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6414         case 5:
6415                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6416         case 4:
6417                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6418         case 3:
6419                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6420         case 2:
6421         case 1:
6422
6423         default:
6424                 break;
6425         };
6426
6427         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6428
6429         return 0;
6430 }
6431
6432 /* Called at device open time to get the chip ready for
6433  * packet processing.  Invoked with tp->lock held.
6434  */
6435 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6436 {
6437         int err;
6438
6439         /* Force the chip into D0. */
6440         err = tg3_set_power_state(tp, PCI_D0);
6441         if (err)
6442                 goto out;
6443
6444         tg3_switch_clocks(tp);
6445
6446         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6447
6448         err = tg3_reset_hw(tp, reset_phy);
6449
6450 out:
6451         return err;
6452 }
6453
6454 #define TG3_STAT_ADD32(PSTAT, REG) \
6455 do {    u32 __val = tr32(REG); \
6456         (PSTAT)->low += __val; \
6457         if ((PSTAT)->low < __val) \
6458                 (PSTAT)->high += 1; \
6459 } while (0)
6460
6461 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6462 {
6463         struct tg3_hw_stats *sp = tp->hw_stats;
6464
6465         if (!netif_carrier_ok(tp->dev))
6466                 return;
6467
6468         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6469         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6470         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6471         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6472         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6473         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6474         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6475         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6476         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6477         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6478         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6479         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6480         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6481
6482         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6483         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6484         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6485         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6486         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6487         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6488         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6489         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6490         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6491         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6492         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6493         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6494         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6495         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6496
6497         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6498         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6499         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6500 }
6501
6502 static void tg3_timer(unsigned long __opaque)
6503 {
6504         struct tg3 *tp = (struct tg3 *) __opaque;
6505
6506         if (tp->irq_sync)
6507                 goto restart_timer;
6508
6509         spin_lock(&tp->lock);
6510
6511         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6512                 /* All of this garbage is because when using non-tagged
6513                  * IRQ status the mailbox/status_block protocol the chip
6514                  * uses with the cpu is race prone.
6515                  */
6516                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6517                         tw32(GRC_LOCAL_CTRL,
6518                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6519                 } else {
6520                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6521                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6522                 }
6523
6524                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6525                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6526                         spin_unlock(&tp->lock);
6527                         schedule_work(&tp->reset_task);
6528                         return;
6529                 }
6530         }
6531
6532         /* This part only runs once per second. */
6533         if (!--tp->timer_counter) {
6534                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6535                         tg3_periodic_fetch_stats(tp);
6536
6537                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6538                         u32 mac_stat;
6539                         int phy_event;
6540
6541                         mac_stat = tr32(MAC_STATUS);
6542
6543                         phy_event = 0;
6544                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6545                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6546                                         phy_event = 1;
6547                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6548                                 phy_event = 1;
6549
6550                         if (phy_event)
6551                                 tg3_setup_phy(tp, 0);
6552                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6553                         u32 mac_stat = tr32(MAC_STATUS);
6554                         int need_setup = 0;
6555
6556                         if (netif_carrier_ok(tp->dev) &&
6557                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6558                                 need_setup = 1;
6559                         }
6560                         if (! netif_carrier_ok(tp->dev) &&
6561                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6562                                          MAC_STATUS_SIGNAL_DET))) {
6563                                 need_setup = 1;
6564                         }
6565                         if (need_setup) {
6566                                 tw32_f(MAC_MODE,
6567                                      (tp->mac_mode &
6568                                       ~MAC_MODE_PORT_MODE_MASK));
6569                                 udelay(40);
6570                                 tw32_f(MAC_MODE, tp->mac_mode);
6571                                 udelay(40);
6572                                 tg3_setup_phy(tp, 0);
6573                         }
6574                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6575                         tg3_serdes_parallel_detect(tp);
6576
6577                 tp->timer_counter = tp->timer_multiplier;
6578         }
6579
6580         /* Heartbeat is only sent once every 2 seconds.  */
6581         if (!--tp->asf_counter) {
6582                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6583                         u32 val;
6584
6585                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6586                                       FWCMD_NICDRV_ALIVE2);
6587                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6588                         /* 5 seconds timeout */
6589                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6590                         val = tr32(GRC_RX_CPU_EVENT);
6591                         val |= (1 << 14);
6592                         tw32(GRC_RX_CPU_EVENT, val);
6593                 }
6594                 tp->asf_counter = tp->asf_multiplier;
6595         }
6596
6597         spin_unlock(&tp->lock);
6598
6599 restart_timer:
6600         tp->timer.expires = jiffies + tp->timer_offset;
6601         add_timer(&tp->timer);
6602 }
6603
6604 static int tg3_request_irq(struct tg3 *tp)
6605 {
6606         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6607         unsigned long flags;
6608         struct net_device *dev = tp->dev;
6609
6610         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6611                 fn = tg3_msi;
6612                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6613                         fn = tg3_msi_1shot;
6614                 flags = SA_SAMPLE_RANDOM;
6615         } else {
6616                 fn = tg3_interrupt;
6617                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6618                         fn = tg3_interrupt_tagged;
6619                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6620         }
6621         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6622 }
6623
6624 static int tg3_test_interrupt(struct tg3 *tp)
6625 {
6626         struct net_device *dev = tp->dev;
6627         int err, i;
6628         u32 int_mbox = 0;
6629
6630         if (!netif_running(dev))
6631                 return -ENODEV;
6632
6633         tg3_disable_ints(tp);
6634
6635         free_irq(tp->pdev->irq, dev);
6636
6637         err = request_irq(tp->pdev->irq, tg3_test_isr,
6638                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6639         if (err)
6640                 return err;
6641
6642         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6643         tg3_enable_ints(tp);
6644
6645         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6646                HOSTCC_MODE_NOW);
6647
6648         for (i = 0; i < 5; i++) {
6649                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6650                                         TG3_64BIT_REG_LOW);
6651                 if (int_mbox != 0)
6652                         break;
6653                 msleep(10);
6654         }
6655
6656         tg3_disable_ints(tp);
6657
6658         free_irq(tp->pdev->irq, dev);
6659         
6660         err = tg3_request_irq(tp);
6661
6662         if (err)
6663                 return err;
6664
6665         if (int_mbox != 0)
6666                 return 0;
6667
6668         return -EIO;
6669 }
6670
6671 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6672  * successfully restored
6673  */
6674 static int tg3_test_msi(struct tg3 *tp)
6675 {
6676         struct net_device *dev = tp->dev;
6677         int err;
6678         u16 pci_cmd;
6679
6680         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6681                 return 0;
6682
6683         /* Turn off SERR reporting in case MSI terminates with Master
6684          * Abort.
6685          */
6686         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6687         pci_write_config_word(tp->pdev, PCI_COMMAND,
6688                               pci_cmd & ~PCI_COMMAND_SERR);
6689
6690         err = tg3_test_interrupt(tp);
6691
6692         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6693
6694         if (!err)
6695                 return 0;
6696
6697         /* other failures */
6698         if (err != -EIO)
6699                 return err;
6700
6701         /* MSI test failed, go back to INTx mode */
6702         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6703                "switching to INTx mode. Please report this failure to "
6704                "the PCI maintainer and include system chipset information.\n",
6705                        tp->dev->name);
6706
6707         free_irq(tp->pdev->irq, dev);
6708         pci_disable_msi(tp->pdev);
6709
6710         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6711
6712         err = tg3_request_irq(tp);
6713         if (err)
6714                 return err;
6715
6716         /* Need to reset the chip because the MSI cycle may have terminated
6717          * with Master Abort.
6718          */
6719         tg3_full_lock(tp, 1);
6720
6721         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6722         err = tg3_init_hw(tp, 1);
6723
6724         tg3_full_unlock(tp);
6725
6726         if (err)
6727                 free_irq(tp->pdev->irq, dev);
6728
6729         return err;
6730 }
6731
6732 static int tg3_open(struct net_device *dev)
6733 {
6734         struct tg3 *tp = netdev_priv(dev);
6735         int err;
6736
6737         tg3_full_lock(tp, 0);
6738
6739         err = tg3_set_power_state(tp, PCI_D0);
6740         if (err)
6741                 return err;
6742
6743         tg3_disable_ints(tp);
6744         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6745
6746         tg3_full_unlock(tp);
6747
6748         /* The placement of this call is tied
6749          * to the setup and use of Host TX descriptors.
6750          */
6751         err = tg3_alloc_consistent(tp);
6752         if (err)
6753                 return err;
6754
6755         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6756             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6757             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6758             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6759               (tp->pdev_peer == tp->pdev))) {
6760                 /* All MSI supporting chips should support tagged
6761                  * status.  Assert that this is the case.
6762                  */
6763                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6764                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6765                                "Not using MSI.\n", tp->dev->name);
6766                 } else if (pci_enable_msi(tp->pdev) == 0) {
6767                         u32 msi_mode;
6768
6769                         msi_mode = tr32(MSGINT_MODE);
6770                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6771                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6772                 }
6773         }
6774         err = tg3_request_irq(tp);
6775
6776         if (err) {
6777                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6778                         pci_disable_msi(tp->pdev);
6779                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6780                 }
6781                 tg3_free_consistent(tp);
6782                 return err;
6783         }
6784
6785         tg3_full_lock(tp, 0);
6786
6787         err = tg3_init_hw(tp, 1);
6788         if (err) {
6789                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6790                 tg3_free_rings(tp);
6791         } else {
6792                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6793                         tp->timer_offset = HZ;
6794                 else
6795                         tp->timer_offset = HZ / 10;
6796
6797                 BUG_ON(tp->timer_offset > HZ);
6798                 tp->timer_counter = tp->timer_multiplier =
6799                         (HZ / tp->timer_offset);
6800                 tp->asf_counter = tp->asf_multiplier =
6801                         ((HZ / tp->timer_offset) * 2);
6802
6803                 init_timer(&tp->timer);
6804                 tp->timer.expires = jiffies + tp->timer_offset;
6805                 tp->timer.data = (unsigned long) tp;
6806                 tp->timer.function = tg3_timer;
6807         }
6808
6809         tg3_full_unlock(tp);
6810
6811         if (err) {
6812                 free_irq(tp->pdev->irq, dev);
6813                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6814                         pci_disable_msi(tp->pdev);
6815                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6816                 }
6817                 tg3_free_consistent(tp);
6818                 return err;
6819         }
6820
6821         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6822                 err = tg3_test_msi(tp);
6823
6824                 if (err) {
6825                         tg3_full_lock(tp, 0);
6826
6827                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6828                                 pci_disable_msi(tp->pdev);
6829                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6830                         }
6831                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6832                         tg3_free_rings(tp);
6833                         tg3_free_consistent(tp);
6834
6835                         tg3_full_unlock(tp);
6836
6837                         return err;
6838                 }
6839
6840                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6841                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6842                                 u32 val = tr32(0x7c04);
6843
6844                                 tw32(0x7c04, val | (1 << 29));
6845                         }
6846                 }
6847         }
6848
6849         tg3_full_lock(tp, 0);
6850
6851         add_timer(&tp->timer);
6852         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6853         tg3_enable_ints(tp);
6854
6855         tg3_full_unlock(tp);
6856
6857         netif_start_queue(dev);
6858
6859         return 0;
6860 }
6861
6862 #if 0
6863 /*static*/ void tg3_dump_state(struct tg3 *tp)
6864 {
6865         u32 val32, val32_2, val32_3, val32_4, val32_5;
6866         u16 val16;
6867         int i;
6868
6869         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6870         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6871         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6872                val16, val32);
6873
6874         /* MAC block */
6875         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6876                tr32(MAC_MODE), tr32(MAC_STATUS));
6877         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6878                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6879         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6880                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6881         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6882                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6883
6884         /* Send data initiator control block */
6885         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6886                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6887         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6888                tr32(SNDDATAI_STATSCTRL));
6889
6890         /* Send data completion control block */
6891         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6892
6893         /* Send BD ring selector block */
6894         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6895                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6896
6897         /* Send BD initiator control block */
6898         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6899                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6900
6901         /* Send BD completion control block */
6902         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6903
6904         /* Receive list placement control block */
6905         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6906                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6907         printk("       RCVLPC_STATSCTRL[%08x]\n",
6908                tr32(RCVLPC_STATSCTRL));
6909
6910         /* Receive data and receive BD initiator control block */
6911         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6912                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6913
6914         /* Receive data completion control block */
6915         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6916                tr32(RCVDCC_MODE));
6917
6918         /* Receive BD initiator control block */
6919         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6920                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6921
6922         /* Receive BD completion control block */
6923         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6924                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6925
6926         /* Receive list selector control block */
6927         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6928                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6929
6930         /* Mbuf cluster free block */
6931         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6932                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6933
6934         /* Host coalescing control block */
6935         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6936                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6937         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6938                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6939                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6940         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6941                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6942                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6943         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6944                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6945         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6946                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6947
6948         /* Memory arbiter control block */
6949         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6950                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6951
6952         /* Buffer manager control block */
6953         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6954                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6955         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6956                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6957         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6958                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6959                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6960                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6961
6962         /* Read DMA control block */
6963         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6964                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6965
6966         /* Write DMA control block */
6967         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6968                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6969
6970         /* DMA completion block */
6971         printk("DEBUG: DMAC_MODE[%08x]\n",
6972                tr32(DMAC_MODE));
6973
6974         /* GRC block */
6975         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6976                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6977         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6978                tr32(GRC_LOCAL_CTRL));
6979
6980         /* TG3_BDINFOs */
6981         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6982                tr32(RCVDBDI_JUMBO_BD + 0x0),
6983                tr32(RCVDBDI_JUMBO_BD + 0x4),
6984                tr32(RCVDBDI_JUMBO_BD + 0x8),
6985                tr32(RCVDBDI_JUMBO_BD + 0xc));
6986         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6987                tr32(RCVDBDI_STD_BD + 0x0),
6988                tr32(RCVDBDI_STD_BD + 0x4),
6989                tr32(RCVDBDI_STD_BD + 0x8),
6990                tr32(RCVDBDI_STD_BD + 0xc));
6991         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6992                tr32(RCVDBDI_MINI_BD + 0x0),
6993                tr32(RCVDBDI_MINI_BD + 0x4),
6994                tr32(RCVDBDI_MINI_BD + 0x8),
6995                tr32(RCVDBDI_MINI_BD + 0xc));
6996
6997         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6998         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6999         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7000         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7001         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7002                val32, val32_2, val32_3, val32_4);
7003
7004         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7005         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7006         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7007         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7008         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7009                val32, val32_2, val32_3, val32_4);
7010
7011         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7012         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7013         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7014         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7015         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7016         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7017                val32, val32_2, val32_3, val32_4, val32_5);
7018
7019         /* SW status block */
7020         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7021                tp->hw_status->status,
7022                tp->hw_status->status_tag,
7023                tp->hw_status->rx_jumbo_consumer,
7024                tp->hw_status->rx_consumer,
7025                tp->hw_status->rx_mini_consumer,
7026                tp->hw_status->idx[0].rx_producer,
7027                tp->hw_status->idx[0].tx_consumer);
7028
7029         /* SW statistics block */
7030         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7031                ((u32 *)tp->hw_stats)[0],
7032                ((u32 *)tp->hw_stats)[1],
7033                ((u32 *)tp->hw_stats)[2],
7034                ((u32 *)tp->hw_stats)[3]);
7035
7036         /* Mailboxes */
7037         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7038                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7039                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7040                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7041                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7042
7043         /* NIC side send descriptors. */
7044         for (i = 0; i < 6; i++) {
7045                 unsigned long txd;
7046
7047                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7048                         + (i * sizeof(struct tg3_tx_buffer_desc));
7049                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7050                        i,
7051                        readl(txd + 0x0), readl(txd + 0x4),
7052                        readl(txd + 0x8), readl(txd + 0xc));
7053         }
7054
7055         /* NIC side RX descriptors. */
7056         for (i = 0; i < 6; i++) {
7057                 unsigned long rxd;
7058
7059                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7060                         + (i * sizeof(struct tg3_rx_buffer_desc));
7061                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7062                        i,
7063                        readl(rxd + 0x0), readl(rxd + 0x4),
7064                        readl(rxd + 0x8), readl(rxd + 0xc));
7065                 rxd += (4 * sizeof(u32));
7066                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7067                        i,
7068                        readl(rxd + 0x0), readl(rxd + 0x4),
7069                        readl(rxd + 0x8), readl(rxd + 0xc));
7070         }
7071
7072         for (i = 0; i < 6; i++) {
7073                 unsigned long rxd;
7074
7075                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7076                         + (i * sizeof(struct tg3_rx_buffer_desc));
7077                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7078                        i,
7079                        readl(rxd + 0x0), readl(rxd + 0x4),
7080                        readl(rxd + 0x8), readl(rxd + 0xc));
7081                 rxd += (4 * sizeof(u32));
7082                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7083                        i,
7084                        readl(rxd + 0x0), readl(rxd + 0x4),
7085                        readl(rxd + 0x8), readl(rxd + 0xc));
7086         }
7087 }
7088 #endif
7089
7090 static struct net_device_stats *tg3_get_stats(struct net_device *);
7091 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7092
7093 static int tg3_close(struct net_device *dev)
7094 {
7095         struct tg3 *tp = netdev_priv(dev);
7096
7097         /* Calling flush_scheduled_work() may deadlock because
7098          * linkwatch_event() may be on the workqueue and it will try to get
7099          * the rtnl_lock which we are holding.
7100          */
7101         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7102                 msleep(1);
7103
7104         netif_stop_queue(dev);
7105
7106         del_timer_sync(&tp->timer);
7107
7108         tg3_full_lock(tp, 1);
7109 #if 0
7110         tg3_dump_state(tp);
7111 #endif
7112
7113         tg3_disable_ints(tp);
7114
7115         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7116         tg3_free_rings(tp);
7117         tp->tg3_flags &=
7118                 ~(TG3_FLAG_INIT_COMPLETE |
7119                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7120
7121         tg3_full_unlock(tp);
7122
7123         free_irq(tp->pdev->irq, dev);
7124         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7125                 pci_disable_msi(tp->pdev);
7126                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7127         }
7128
7129         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7130                sizeof(tp->net_stats_prev));
7131         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7132                sizeof(tp->estats_prev));
7133
7134         tg3_free_consistent(tp);
7135
7136         tg3_set_power_state(tp, PCI_D3hot);
7137
7138         netif_carrier_off(tp->dev);
7139
7140         return 0;
7141 }
7142
7143 static inline unsigned long get_stat64(tg3_stat64_t *val)
7144 {
7145         unsigned long ret;
7146
7147 #if (BITS_PER_LONG == 32)
7148         ret = val->low;
7149 #else
7150         ret = ((u64)val->high << 32) | ((u64)val->low);
7151 #endif
7152         return ret;
7153 }
7154
7155 static unsigned long calc_crc_errors(struct tg3 *tp)
7156 {
7157         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7158
7159         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7160             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7161              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7162                 u32 val;
7163
7164                 spin_lock_bh(&tp->lock);
7165                 if (!tg3_readphy(tp, 0x1e, &val)) {
7166                         tg3_writephy(tp, 0x1e, val | 0x8000);
7167                         tg3_readphy(tp, 0x14, &val);
7168                 } else
7169                         val = 0;
7170                 spin_unlock_bh(&tp->lock);
7171
7172                 tp->phy_crc_errors += val;
7173
7174                 return tp->phy_crc_errors;
7175         }
7176
7177         return get_stat64(&hw_stats->rx_fcs_errors);
7178 }
7179
7180 #define ESTAT_ADD(member) \
7181         estats->member =        old_estats->member + \
7182                                 get_stat64(&hw_stats->member)
7183
7184 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7185 {
7186         struct tg3_ethtool_stats *estats = &tp->estats;
7187         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7188         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7189
7190         if (!hw_stats)
7191                 return old_estats;
7192
7193         ESTAT_ADD(rx_octets);
7194         ESTAT_ADD(rx_fragments);
7195         ESTAT_ADD(rx_ucast_packets);
7196         ESTAT_ADD(rx_mcast_packets);
7197         ESTAT_ADD(rx_bcast_packets);
7198         ESTAT_ADD(rx_fcs_errors);
7199         ESTAT_ADD(rx_align_errors);
7200         ESTAT_ADD(rx_xon_pause_rcvd);
7201         ESTAT_ADD(rx_xoff_pause_rcvd);
7202         ESTAT_ADD(rx_mac_ctrl_rcvd);
7203         ESTAT_ADD(rx_xoff_entered);
7204         ESTAT_ADD(rx_frame_too_long_errors);
7205         ESTAT_ADD(rx_jabbers);
7206         ESTAT_ADD(rx_undersize_packets);
7207         ESTAT_ADD(rx_in_length_errors);
7208         ESTAT_ADD(rx_out_length_errors);
7209         ESTAT_ADD(rx_64_or_less_octet_packets);
7210         ESTAT_ADD(rx_65_to_127_octet_packets);
7211         ESTAT_ADD(rx_128_to_255_octet_packets);
7212         ESTAT_ADD(rx_256_to_511_octet_packets);
7213         ESTAT_ADD(rx_512_to_1023_octet_packets);
7214         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7215         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7216         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7217         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7218         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7219
7220         ESTAT_ADD(tx_octets);
7221         ESTAT_ADD(tx_collisions);
7222         ESTAT_ADD(tx_xon_sent);
7223         ESTAT_ADD(tx_xoff_sent);
7224         ESTAT_ADD(tx_flow_control);
7225         ESTAT_ADD(tx_mac_errors);
7226         ESTAT_ADD(tx_single_collisions);
7227         ESTAT_ADD(tx_mult_collisions);
7228         ESTAT_ADD(tx_deferred);
7229         ESTAT_ADD(tx_excessive_collisions);
7230         ESTAT_ADD(tx_late_collisions);
7231         ESTAT_ADD(tx_collide_2times);
7232         ESTAT_ADD(tx_collide_3times);
7233         ESTAT_ADD(tx_collide_4times);
7234         ESTAT_ADD(tx_collide_5times);
7235         ESTAT_ADD(tx_collide_6times);
7236         ESTAT_ADD(tx_collide_7times);
7237         ESTAT_ADD(tx_collide_8times);
7238         ESTAT_ADD(tx_collide_9times);
7239         ESTAT_ADD(tx_collide_10times);
7240         ESTAT_ADD(tx_collide_11times);
7241         ESTAT_ADD(tx_collide_12times);
7242         ESTAT_ADD(tx_collide_13times);
7243         ESTAT_ADD(tx_collide_14times);
7244         ESTAT_ADD(tx_collide_15times);
7245         ESTAT_ADD(tx_ucast_packets);
7246         ESTAT_ADD(tx_mcast_packets);
7247         ESTAT_ADD(tx_bcast_packets);
7248         ESTAT_ADD(tx_carrier_sense_errors);
7249         ESTAT_ADD(tx_discards);
7250         ESTAT_ADD(tx_errors);
7251
7252         ESTAT_ADD(dma_writeq_full);
7253         ESTAT_ADD(dma_write_prioq_full);
7254         ESTAT_ADD(rxbds_empty);
7255         ESTAT_ADD(rx_discards);
7256         ESTAT_ADD(rx_errors);
7257         ESTAT_ADD(rx_threshold_hit);
7258
7259         ESTAT_ADD(dma_readq_full);
7260         ESTAT_ADD(dma_read_prioq_full);
7261         ESTAT_ADD(tx_comp_queue_full);
7262
7263         ESTAT_ADD(ring_set_send_prod_index);
7264         ESTAT_ADD(ring_status_update);
7265         ESTAT_ADD(nic_irqs);
7266         ESTAT_ADD(nic_avoided_irqs);
7267         ESTAT_ADD(nic_tx_threshold_hit);
7268
7269         return estats;
7270 }
7271
7272 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7273 {
7274         struct tg3 *tp = netdev_priv(dev);
7275         struct net_device_stats *stats = &tp->net_stats;
7276         struct net_device_stats *old_stats = &tp->net_stats_prev;
7277         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7278
7279         if (!hw_stats)
7280                 return old_stats;
7281
7282         stats->rx_packets = old_stats->rx_packets +
7283                 get_stat64(&hw_stats->rx_ucast_packets) +
7284                 get_stat64(&hw_stats->rx_mcast_packets) +
7285                 get_stat64(&hw_stats->rx_bcast_packets);
7286                 
7287         stats->tx_packets = old_stats->tx_packets +
7288                 get_stat64(&hw_stats->tx_ucast_packets) +
7289                 get_stat64(&hw_stats->tx_mcast_packets) +
7290                 get_stat64(&hw_stats->tx_bcast_packets);
7291
7292         stats->rx_bytes = old_stats->rx_bytes +
7293                 get_stat64(&hw_stats->rx_octets);
7294         stats->tx_bytes = old_stats->tx_bytes +
7295                 get_stat64(&hw_stats->tx_octets);
7296
7297         stats->rx_errors = old_stats->rx_errors +
7298                 get_stat64(&hw_stats->rx_errors);
7299         stats->tx_errors = old_stats->tx_errors +
7300                 get_stat64(&hw_stats->tx_errors) +
7301                 get_stat64(&hw_stats->tx_mac_errors) +
7302                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7303                 get_stat64(&hw_stats->tx_discards);
7304
7305         stats->multicast = old_stats->multicast +
7306                 get_stat64(&hw_stats->rx_mcast_packets);
7307         stats->collisions = old_stats->collisions +
7308                 get_stat64(&hw_stats->tx_collisions);
7309
7310         stats->rx_length_errors = old_stats->rx_length_errors +
7311                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7312                 get_stat64(&hw_stats->rx_undersize_packets);
7313
7314         stats->rx_over_errors = old_stats->rx_over_errors +
7315                 get_stat64(&hw_stats->rxbds_empty);
7316         stats->rx_frame_errors = old_stats->rx_frame_errors +
7317                 get_stat64(&hw_stats->rx_align_errors);
7318         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7319                 get_stat64(&hw_stats->tx_discards);
7320         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7321                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7322
7323         stats->rx_crc_errors = old_stats->rx_crc_errors +
7324                 calc_crc_errors(tp);
7325
7326         stats->rx_missed_errors = old_stats->rx_missed_errors +
7327                 get_stat64(&hw_stats->rx_discards);
7328
7329         return stats;
7330 }
7331
7332 static inline u32 calc_crc(unsigned char *buf, int len)
7333 {
7334         u32 reg;
7335         u32 tmp;
7336         int j, k;
7337
7338         reg = 0xffffffff;
7339
7340         for (j = 0; j < len; j++) {
7341                 reg ^= buf[j];
7342
7343                 for (k = 0; k < 8; k++) {
7344                         tmp = reg & 0x01;
7345
7346                         reg >>= 1;
7347
7348                         if (tmp) {
7349                                 reg ^= 0xedb88320;
7350                         }
7351                 }
7352         }
7353
7354         return ~reg;
7355 }
7356
7357 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7358 {
7359         /* accept or reject all multicast frames */
7360         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7361         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7362         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7363         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7364 }
7365
7366 static void __tg3_set_rx_mode(struct net_device *dev)
7367 {
7368         struct tg3 *tp = netdev_priv(dev);
7369         u32 rx_mode;
7370
7371         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7372                                   RX_MODE_KEEP_VLAN_TAG);
7373
7374         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7375          * flag clear.
7376          */
7377 #if TG3_VLAN_TAG_USED
7378         if (!tp->vlgrp &&
7379             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7380                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7381 #else
7382         /* By definition, VLAN is disabled always in this
7383          * case.
7384          */
7385         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7386                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7387 #endif
7388
7389         if (dev->flags & IFF_PROMISC) {
7390                 /* Promiscuous mode. */
7391                 rx_mode |= RX_MODE_PROMISC;
7392         } else if (dev->flags & IFF_ALLMULTI) {
7393                 /* Accept all multicast. */
7394                 tg3_set_multi (tp, 1);
7395         } else if (dev->mc_count < 1) {
7396                 /* Reject all multicast. */
7397                 tg3_set_multi (tp, 0);
7398         } else {
7399                 /* Accept one or more multicast(s). */
7400                 struct dev_mc_list *mclist;
7401                 unsigned int i;
7402                 u32 mc_filter[4] = { 0, };
7403                 u32 regidx;
7404                 u32 bit;
7405                 u32 crc;
7406
7407                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7408                      i++, mclist = mclist->next) {
7409
7410                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7411                         bit = ~crc & 0x7f;
7412                         regidx = (bit & 0x60) >> 5;
7413                         bit &= 0x1f;
7414                         mc_filter[regidx] |= (1 << bit);
7415                 }
7416
7417                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7418                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7419                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7420                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7421         }
7422
7423         if (rx_mode != tp->rx_mode) {
7424                 tp->rx_mode = rx_mode;
7425                 tw32_f(MAC_RX_MODE, rx_mode);
7426                 udelay(10);
7427         }
7428 }
7429
7430 static void tg3_set_rx_mode(struct net_device *dev)
7431 {
7432         struct tg3 *tp = netdev_priv(dev);
7433
7434         if (!netif_running(dev))
7435                 return;
7436
7437         tg3_full_lock(tp, 0);
7438         __tg3_set_rx_mode(dev);
7439         tg3_full_unlock(tp);
7440 }
7441
7442 #define TG3_REGDUMP_LEN         (32 * 1024)
7443
7444 static int tg3_get_regs_len(struct net_device *dev)
7445 {
7446         return TG3_REGDUMP_LEN;
7447 }
7448
7449 static void tg3_get_regs(struct net_device *dev,
7450                 struct ethtool_regs *regs, void *_p)
7451 {
7452         u32 *p = _p;
7453         struct tg3 *tp = netdev_priv(dev);
7454         u8 *orig_p = _p;
7455         int i;
7456
7457         regs->version = 0;
7458
7459         memset(p, 0, TG3_REGDUMP_LEN);
7460
7461         if (tp->link_config.phy_is_low_power)
7462                 return;
7463
7464         tg3_full_lock(tp, 0);
7465
7466 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7467 #define GET_REG32_LOOP(base,len)                \
7468 do {    p = (u32 *)(orig_p + (base));           \
7469         for (i = 0; i < len; i += 4)            \
7470                 __GET_REG32((base) + i);        \
7471 } while (0)
7472 #define GET_REG32_1(reg)                        \
7473 do {    p = (u32 *)(orig_p + (reg));            \
7474         __GET_REG32((reg));                     \
7475 } while (0)
7476
7477         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7478         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7479         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7480         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7481         GET_REG32_1(SNDDATAC_MODE);
7482         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7483         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7484         GET_REG32_1(SNDBDC_MODE);
7485         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7486         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7487         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7488         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7489         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7490         GET_REG32_1(RCVDCC_MODE);
7491         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7492         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7493         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7494         GET_REG32_1(MBFREE_MODE);
7495         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7496         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7497         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7498         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7499         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7500         GET_REG32_1(RX_CPU_MODE);
7501         GET_REG32_1(RX_CPU_STATE);
7502         GET_REG32_1(RX_CPU_PGMCTR);
7503         GET_REG32_1(RX_CPU_HWBKPT);
7504         GET_REG32_1(TX_CPU_MODE);
7505         GET_REG32_1(TX_CPU_STATE);
7506         GET_REG32_1(TX_CPU_PGMCTR);
7507         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7508         GET_REG32_LOOP(FTQ_RESET, 0x120);
7509         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7510         GET_REG32_1(DMAC_MODE);
7511         GET_REG32_LOOP(GRC_MODE, 0x4c);
7512         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7513                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7514
7515 #undef __GET_REG32
7516 #undef GET_REG32_LOOP
7517 #undef GET_REG32_1
7518
7519         tg3_full_unlock(tp);
7520 }
7521
7522 static int tg3_get_eeprom_len(struct net_device *dev)
7523 {
7524         struct tg3 *tp = netdev_priv(dev);
7525
7526         return tp->nvram_size;
7527 }
7528
7529 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7530 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7531
7532 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7533 {
7534         struct tg3 *tp = netdev_priv(dev);
7535         int ret;
7536         u8  *pd;
7537         u32 i, offset, len, val, b_offset, b_count;
7538
7539         if (tp->link_config.phy_is_low_power)
7540                 return -EAGAIN;
7541
7542         offset = eeprom->offset;
7543         len = eeprom->len;
7544         eeprom->len = 0;
7545
7546         eeprom->magic = TG3_EEPROM_MAGIC;
7547
7548         if (offset & 3) {
7549                 /* adjustments to start on required 4 byte boundary */
7550                 b_offset = offset & 3;
7551                 b_count = 4 - b_offset;
7552                 if (b_count > len) {
7553                         /* i.e. offset=1 len=2 */
7554                         b_count = len;
7555                 }
7556                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7557                 if (ret)
7558                         return ret;
7559                 val = cpu_to_le32(val);
7560                 memcpy(data, ((char*)&val) + b_offset, b_count);
7561                 len -= b_count;
7562                 offset += b_count;
7563                 eeprom->len += b_count;
7564         }
7565
7566         /* read bytes upto the last 4 byte boundary */
7567         pd = &data[eeprom->len];
7568         for (i = 0; i < (len - (len & 3)); i += 4) {
7569                 ret = tg3_nvram_read(tp, offset + i, &val);
7570                 if (ret) {
7571                         eeprom->len += i;
7572                         return ret;
7573                 }
7574                 val = cpu_to_le32(val);
7575                 memcpy(pd + i, &val, 4);
7576         }
7577         eeprom->len += i;
7578
7579         if (len & 3) {
7580                 /* read last bytes not ending on 4 byte boundary */
7581                 pd = &data[eeprom->len];
7582                 b_count = len & 3;
7583                 b_offset = offset + len - b_count;
7584                 ret = tg3_nvram_read(tp, b_offset, &val);
7585                 if (ret)
7586                         return ret;
7587                 val = cpu_to_le32(val);
7588                 memcpy(pd, ((char*)&val), b_count);
7589                 eeprom->len += b_count;
7590         }
7591         return 0;
7592 }
7593
7594 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7595
7596 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7597 {
7598         struct tg3 *tp = netdev_priv(dev);
7599         int ret;
7600         u32 offset, len, b_offset, odd_len, start, end;
7601         u8 *buf;
7602
7603         if (tp->link_config.phy_is_low_power)
7604                 return -EAGAIN;
7605
7606         if (eeprom->magic != TG3_EEPROM_MAGIC)
7607                 return -EINVAL;
7608
7609         offset = eeprom->offset;
7610         len = eeprom->len;
7611
7612         if ((b_offset = (offset & 3))) {
7613                 /* adjustments to start on required 4 byte boundary */
7614                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7615                 if (ret)
7616                         return ret;
7617                 start = cpu_to_le32(start);
7618                 len += b_offset;
7619                 offset &= ~3;
7620                 if (len < 4)
7621                         len = 4;
7622         }
7623
7624         odd_len = 0;
7625         if (len & 3) {
7626                 /* adjustments to end on required 4 byte boundary */
7627                 odd_len = 1;
7628                 len = (len + 3) & ~3;
7629                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7630                 if (ret)
7631                         return ret;
7632                 end = cpu_to_le32(end);
7633         }
7634
7635         buf = data;
7636         if (b_offset || odd_len) {
7637                 buf = kmalloc(len, GFP_KERNEL);
7638                 if (buf == 0)
7639                         return -ENOMEM;
7640                 if (b_offset)
7641                         memcpy(buf, &start, 4);
7642                 if (odd_len)
7643                         memcpy(buf+len-4, &end, 4);
7644                 memcpy(buf + b_offset, data, eeprom->len);
7645         }
7646
7647         ret = tg3_nvram_write_block(tp, offset, len, buf);
7648
7649         if (buf != data)
7650                 kfree(buf);
7651
7652         return ret;
7653 }
7654
7655 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7656 {
7657         struct tg3 *tp = netdev_priv(dev);
7658   
7659         cmd->supported = (SUPPORTED_Autoneg);
7660
7661         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7662                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7663                                    SUPPORTED_1000baseT_Full);
7664
7665         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7666                 cmd->supported |= (SUPPORTED_100baseT_Half |
7667                                   SUPPORTED_100baseT_Full |
7668                                   SUPPORTED_10baseT_Half |
7669                                   SUPPORTED_10baseT_Full |
7670                                   SUPPORTED_MII);
7671                 cmd->port = PORT_TP;
7672         } else {
7673                 cmd->supported |= SUPPORTED_FIBRE;
7674                 cmd->port = PORT_FIBRE;
7675         }
7676   
7677         cmd->advertising = tp->link_config.advertising;
7678         if (netif_running(dev)) {
7679                 cmd->speed = tp->link_config.active_speed;
7680                 cmd->duplex = tp->link_config.active_duplex;
7681         }
7682         cmd->phy_address = PHY_ADDR;
7683         cmd->transceiver = 0;
7684         cmd->autoneg = tp->link_config.autoneg;
7685         cmd->maxtxpkt = 0;
7686         cmd->maxrxpkt = 0;
7687         return 0;
7688 }
7689   
7690 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7691 {
7692         struct tg3 *tp = netdev_priv(dev);
7693   
7694         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7695                 /* These are the only valid advertisement bits allowed.  */
7696                 if (cmd->autoneg == AUTONEG_ENABLE &&
7697                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7698                                           ADVERTISED_1000baseT_Full |
7699                                           ADVERTISED_Autoneg |
7700                                           ADVERTISED_FIBRE)))
7701                         return -EINVAL;
7702                 /* Fiber can only do SPEED_1000.  */
7703                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7704                          (cmd->speed != SPEED_1000))
7705                         return -EINVAL;
7706         /* Copper cannot force SPEED_1000.  */
7707         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7708                    (cmd->speed == SPEED_1000))
7709                 return -EINVAL;
7710         else if ((cmd->speed == SPEED_1000) &&
7711                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7712                 return -EINVAL;
7713
7714         tg3_full_lock(tp, 0);
7715
7716         tp->link_config.autoneg = cmd->autoneg;
7717         if (cmd->autoneg == AUTONEG_ENABLE) {
7718                 tp->link_config.advertising = cmd->advertising;
7719                 tp->link_config.speed = SPEED_INVALID;
7720                 tp->link_config.duplex = DUPLEX_INVALID;
7721         } else {
7722                 tp->link_config.advertising = 0;
7723                 tp->link_config.speed = cmd->speed;
7724                 tp->link_config.duplex = cmd->duplex;
7725         }
7726   
7727         if (netif_running(dev))
7728                 tg3_setup_phy(tp, 1);
7729
7730         tg3_full_unlock(tp);
7731   
7732         return 0;
7733 }
7734   
7735 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7736 {
7737         struct tg3 *tp = netdev_priv(dev);
7738   
7739         strcpy(info->driver, DRV_MODULE_NAME);
7740         strcpy(info->version, DRV_MODULE_VERSION);
7741         strcpy(info->fw_version, tp->fw_ver);
7742         strcpy(info->bus_info, pci_name(tp->pdev));
7743 }
7744   
7745 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7746 {
7747         struct tg3 *tp = netdev_priv(dev);
7748   
7749         wol->supported = WAKE_MAGIC;
7750         wol->wolopts = 0;
7751         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7752                 wol->wolopts = WAKE_MAGIC;
7753         memset(&wol->sopass, 0, sizeof(wol->sopass));
7754 }
7755   
7756 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7757 {
7758         struct tg3 *tp = netdev_priv(dev);
7759   
7760         if (wol->wolopts & ~WAKE_MAGIC)
7761                 return -EINVAL;
7762         if ((wol->wolopts & WAKE_MAGIC) &&
7763             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7764             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7765                 return -EINVAL;
7766   
7767         spin_lock_bh(&tp->lock);
7768         if (wol->wolopts & WAKE_MAGIC)
7769                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7770         else
7771                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7772         spin_unlock_bh(&tp->lock);
7773   
7774         return 0;
7775 }
7776   
7777 static u32 tg3_get_msglevel(struct net_device *dev)
7778 {
7779         struct tg3 *tp = netdev_priv(dev);
7780         return tp->msg_enable;
7781 }
7782   
7783 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7784 {
7785         struct tg3 *tp = netdev_priv(dev);
7786         tp->msg_enable = value;
7787 }
7788   
7789 #if TG3_TSO_SUPPORT != 0
7790 static int tg3_set_tso(struct net_device *dev, u32 value)
7791 {
7792         struct tg3 *tp = netdev_priv(dev);
7793
7794         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7795                 if (value)
7796                         return -EINVAL;
7797                 return 0;
7798         }
7799         return ethtool_op_set_tso(dev, value);
7800 }
7801 #endif
7802   
7803 static int tg3_nway_reset(struct net_device *dev)
7804 {
7805         struct tg3 *tp = netdev_priv(dev);
7806         u32 bmcr;
7807         int r;
7808   
7809         if (!netif_running(dev))
7810                 return -EAGAIN;
7811
7812         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7813                 return -EINVAL;
7814
7815         spin_lock_bh(&tp->lock);
7816         r = -EINVAL;
7817         tg3_readphy(tp, MII_BMCR, &bmcr);
7818         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7819             ((bmcr & BMCR_ANENABLE) ||
7820              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7821                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7822                                            BMCR_ANENABLE);
7823                 r = 0;
7824         }
7825         spin_unlock_bh(&tp->lock);
7826   
7827         return r;
7828 }
7829   
7830 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7831 {
7832         struct tg3 *tp = netdev_priv(dev);
7833   
7834         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7835         ering->rx_mini_max_pending = 0;
7836         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7837                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7838         else
7839                 ering->rx_jumbo_max_pending = 0;
7840
7841         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7842
7843         ering->rx_pending = tp->rx_pending;
7844         ering->rx_mini_pending = 0;
7845         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7846                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7847         else
7848                 ering->rx_jumbo_pending = 0;
7849
7850         ering->tx_pending = tp->tx_pending;
7851 }
7852   
7853 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7854 {
7855         struct tg3 *tp = netdev_priv(dev);
7856         int irq_sync = 0;
7857   
7858         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7859             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7860             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7861                 return -EINVAL;
7862   
7863         if (netif_running(dev)) {
7864                 tg3_netif_stop(tp);
7865                 irq_sync = 1;
7866         }
7867
7868         tg3_full_lock(tp, irq_sync);
7869   
7870         tp->rx_pending = ering->rx_pending;
7871
7872         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7873             tp->rx_pending > 63)
7874                 tp->rx_pending = 63;
7875         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7876         tp->tx_pending = ering->tx_pending;
7877
7878         if (netif_running(dev)) {
7879                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7880                 tg3_init_hw(tp, 1);
7881                 tg3_netif_start(tp);
7882         }
7883
7884         tg3_full_unlock(tp);
7885   
7886         return 0;
7887 }
7888   
7889 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892   
7893         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7894         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7895         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7896 }
7897   
7898 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7899 {
7900         struct tg3 *tp = netdev_priv(dev);
7901         int irq_sync = 0;
7902   
7903         if (netif_running(dev)) {
7904                 tg3_netif_stop(tp);
7905                 irq_sync = 1;
7906         }
7907
7908         tg3_full_lock(tp, irq_sync);
7909
7910         if (epause->autoneg)
7911                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7912         else
7913                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7914         if (epause->rx_pause)
7915                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7916         else
7917                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7918         if (epause->tx_pause)
7919                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7920         else
7921                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7922
7923         if (netif_running(dev)) {
7924                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7925                 tg3_init_hw(tp, 1);
7926                 tg3_netif_start(tp);
7927         }
7928
7929         tg3_full_unlock(tp);
7930   
7931         return 0;
7932 }
7933   
7934 static u32 tg3_get_rx_csum(struct net_device *dev)
7935 {
7936         struct tg3 *tp = netdev_priv(dev);
7937         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7938 }
7939   
7940 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7941 {
7942         struct tg3 *tp = netdev_priv(dev);
7943   
7944         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7945                 if (data != 0)
7946                         return -EINVAL;
7947                 return 0;
7948         }
7949   
7950         spin_lock_bh(&tp->lock);
7951         if (data)
7952                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7953         else
7954                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7955         spin_unlock_bh(&tp->lock);
7956   
7957         return 0;
7958 }
7959   
7960 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7961 {
7962         struct tg3 *tp = netdev_priv(dev);
7963   
7964         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7965                 if (data != 0)
7966                         return -EINVAL;
7967                 return 0;
7968         }
7969   
7970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7972                 ethtool_op_set_tx_hw_csum(dev, data);
7973         else
7974                 ethtool_op_set_tx_csum(dev, data);
7975
7976         return 0;
7977 }
7978
7979 static int tg3_get_stats_count (struct net_device *dev)
7980 {
7981         return TG3_NUM_STATS;
7982 }
7983
7984 static int tg3_get_test_count (struct net_device *dev)
7985 {
7986         return TG3_NUM_TEST;
7987 }
7988
7989 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7990 {
7991         switch (stringset) {
7992         case ETH_SS_STATS:
7993                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7994                 break;
7995         case ETH_SS_TEST:
7996                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7997                 break;
7998         default:
7999                 WARN_ON(1);     /* we need a WARN() */
8000                 break;
8001         }
8002 }
8003
8004 static int tg3_phys_id(struct net_device *dev, u32 data)
8005 {
8006         struct tg3 *tp = netdev_priv(dev);
8007         int i;
8008
8009         if (!netif_running(tp->dev))
8010                 return -EAGAIN;
8011
8012         if (data == 0)
8013                 data = 2;
8014
8015         for (i = 0; i < (data * 2); i++) {
8016                 if ((i % 2) == 0)
8017                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8018                                            LED_CTRL_1000MBPS_ON |
8019                                            LED_CTRL_100MBPS_ON |
8020                                            LED_CTRL_10MBPS_ON |
8021                                            LED_CTRL_TRAFFIC_OVERRIDE |
8022                                            LED_CTRL_TRAFFIC_BLINK |
8023                                            LED_CTRL_TRAFFIC_LED);
8024         
8025                 else
8026                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8027                                            LED_CTRL_TRAFFIC_OVERRIDE);
8028
8029                 if (msleep_interruptible(500))
8030                         break;
8031         }
8032         tw32(MAC_LED_CTRL, tp->led_ctrl);
8033         return 0;
8034 }
8035
8036 static void tg3_get_ethtool_stats (struct net_device *dev,
8037                                    struct ethtool_stats *estats, u64 *tmp_stats)
8038 {
8039         struct tg3 *tp = netdev_priv(dev);
8040         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8041 }
8042
8043 #define NVRAM_TEST_SIZE 0x100
8044 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8045
8046 static int tg3_test_nvram(struct tg3 *tp)
8047 {
8048         u32 *buf, csum, magic;
8049         int i, j, err = 0, size;
8050
8051         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8052                 return -EIO;
8053
8054         if (magic == TG3_EEPROM_MAGIC)
8055                 size = NVRAM_TEST_SIZE;
8056         else if ((magic & 0xff000000) == 0xa5000000) {
8057                 if ((magic & 0xe00000) == 0x200000)
8058                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8059                 else
8060                         return 0;
8061         } else
8062                 return -EIO;
8063
8064         buf = kmalloc(size, GFP_KERNEL);
8065         if (buf == NULL)
8066                 return -ENOMEM;
8067
8068         err = -EIO;
8069         for (i = 0, j = 0; i < size; i += 4, j++) {
8070                 u32 val;
8071
8072                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8073                         break;
8074                 buf[j] = cpu_to_le32(val);
8075         }
8076         if (i < size)
8077                 goto out;
8078
8079         /* Selfboot format */
8080         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8081                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8082
8083                 for (i = 0; i < size; i++)
8084                         csum8 += buf8[i];
8085
8086                 if (csum8 == 0) {
8087                         err = 0;
8088                         goto out;
8089                 }
8090
8091                 err = -EIO;
8092                 goto out;
8093         }
8094
8095         /* Bootstrap checksum at offset 0x10 */
8096         csum = calc_crc((unsigned char *) buf, 0x10);
8097         if(csum != cpu_to_le32(buf[0x10/4]))
8098                 goto out;
8099
8100         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8101         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8102         if (csum != cpu_to_le32(buf[0xfc/4]))
8103                  goto out;
8104
8105         err = 0;
8106
8107 out:
8108         kfree(buf);
8109         return err;
8110 }
8111
8112 #define TG3_SERDES_TIMEOUT_SEC  2
8113 #define TG3_COPPER_TIMEOUT_SEC  6
8114
8115 static int tg3_test_link(struct tg3 *tp)
8116 {
8117         int i, max;
8118
8119         if (!netif_running(tp->dev))
8120                 return -ENODEV;
8121
8122         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8123                 max = TG3_SERDES_TIMEOUT_SEC;
8124         else
8125                 max = TG3_COPPER_TIMEOUT_SEC;
8126
8127         for (i = 0; i < max; i++) {
8128                 if (netif_carrier_ok(tp->dev))
8129                         return 0;
8130
8131                 if (msleep_interruptible(1000))
8132                         break;
8133         }
8134
8135         return -EIO;
8136 }
8137
8138 /* Only test the commonly used registers */
8139 static int tg3_test_registers(struct tg3 *tp)
8140 {
8141         int i, is_5705;
8142         u32 offset, read_mask, write_mask, val, save_val, read_val;
8143         static struct {
8144                 u16 offset;
8145                 u16 flags;
8146 #define TG3_FL_5705     0x1
8147 #define TG3_FL_NOT_5705 0x2
8148 #define TG3_FL_NOT_5788 0x4
8149                 u32 read_mask;
8150                 u32 write_mask;
8151         } reg_tbl[] = {
8152                 /* MAC Control Registers */
8153                 { MAC_MODE, TG3_FL_NOT_5705,
8154                         0x00000000, 0x00ef6f8c },
8155                 { MAC_MODE, TG3_FL_5705,
8156                         0x00000000, 0x01ef6b8c },
8157                 { MAC_STATUS, TG3_FL_NOT_5705,
8158                         0x03800107, 0x00000000 },
8159                 { MAC_STATUS, TG3_FL_5705,
8160                         0x03800100, 0x00000000 },
8161                 { MAC_ADDR_0_HIGH, 0x0000,
8162                         0x00000000, 0x0000ffff },
8163                 { MAC_ADDR_0_LOW, 0x0000,
8164                         0x00000000, 0xffffffff },
8165                 { MAC_RX_MTU_SIZE, 0x0000,
8166                         0x00000000, 0x0000ffff },
8167                 { MAC_TX_MODE, 0x0000,
8168                         0x00000000, 0x00000070 },
8169                 { MAC_TX_LENGTHS, 0x0000,
8170                         0x00000000, 0x00003fff },
8171                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8172                         0x00000000, 0x000007fc },
8173                 { MAC_RX_MODE, TG3_FL_5705,
8174                         0x00000000, 0x000007dc },
8175                 { MAC_HASH_REG_0, 0x0000,
8176                         0x00000000, 0xffffffff },
8177                 { MAC_HASH_REG_1, 0x0000,
8178                         0x00000000, 0xffffffff },
8179                 { MAC_HASH_REG_2, 0x0000,
8180                         0x00000000, 0xffffffff },
8181                 { MAC_HASH_REG_3, 0x0000,
8182                         0x00000000, 0xffffffff },
8183
8184                 /* Receive Data and Receive BD Initiator Control Registers. */
8185                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8186                         0x00000000, 0xffffffff },
8187                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8188                         0x00000000, 0xffffffff },
8189                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8190                         0x00000000, 0x00000003 },
8191                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8192                         0x00000000, 0xffffffff },
8193                 { RCVDBDI_STD_BD+0, 0x0000,
8194                         0x00000000, 0xffffffff },
8195                 { RCVDBDI_STD_BD+4, 0x0000,
8196                         0x00000000, 0xffffffff },
8197                 { RCVDBDI_STD_BD+8, 0x0000,
8198                         0x00000000, 0xffff0002 },
8199                 { RCVDBDI_STD_BD+0xc, 0x0000,
8200                         0x00000000, 0xffffffff },
8201         
8202                 /* Receive BD Initiator Control Registers. */
8203                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8204                         0x00000000, 0xffffffff },
8205                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8206                         0x00000000, 0x000003ff },
8207                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8208                         0x00000000, 0xffffffff },
8209         
8210                 /* Host Coalescing Control Registers. */
8211                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8212                         0x00000000, 0x00000004 },
8213                 { HOSTCC_MODE, TG3_FL_5705,
8214                         0x00000000, 0x000000f6 },
8215                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8216                         0x00000000, 0xffffffff },
8217                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8218                         0x00000000, 0x000003ff },
8219                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8220                         0x00000000, 0xffffffff },
8221                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8222                         0x00000000, 0x000003ff },
8223                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8224                         0x00000000, 0xffffffff },
8225                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8226                         0x00000000, 0x000000ff },
8227                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8228                         0x00000000, 0xffffffff },
8229                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8230                         0x00000000, 0x000000ff },
8231                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8232                         0x00000000, 0xffffffff },
8233                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8234                         0x00000000, 0xffffffff },
8235                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8236                         0x00000000, 0xffffffff },
8237                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8238                         0x00000000, 0x000000ff },
8239                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8240                         0x00000000, 0xffffffff },
8241                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8242                         0x00000000, 0x000000ff },
8243                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8244                         0x00000000, 0xffffffff },
8245                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8246                         0x00000000, 0xffffffff },
8247                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8248                         0x00000000, 0xffffffff },
8249                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8250                         0x00000000, 0xffffffff },
8251                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8252                         0x00000000, 0xffffffff },
8253                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8254                         0xffffffff, 0x00000000 },
8255                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8256                         0xffffffff, 0x00000000 },
8257
8258                 /* Buffer Manager Control Registers. */
8259                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8260                         0x00000000, 0x007fff80 },
8261                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8262                         0x00000000, 0x007fffff },
8263                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8264                         0x00000000, 0x0000003f },
8265                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8266                         0x00000000, 0x000001ff },
8267                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8268                         0x00000000, 0x000001ff },
8269                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8270                         0xffffffff, 0x00000000 },
8271                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8272                         0xffffffff, 0x00000000 },
8273         
8274                 /* Mailbox Registers */
8275                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8276                         0x00000000, 0x000001ff },
8277                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8278                         0x00000000, 0x000001ff },
8279                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8280                         0x00000000, 0x000007ff },
8281                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8282                         0x00000000, 0x000001ff },
8283
8284                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8285         };
8286
8287         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8288                 is_5705 = 1;
8289         else
8290                 is_5705 = 0;
8291
8292         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8293                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8294                         continue;
8295
8296                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8297                         continue;
8298
8299                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8300                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8301                         continue;
8302
8303                 offset = (u32) reg_tbl[i].offset;
8304                 read_mask = reg_tbl[i].read_mask;
8305                 write_mask = reg_tbl[i].write_mask;
8306
8307                 /* Save the original register content */
8308                 save_val = tr32(offset);
8309
8310                 /* Determine the read-only value. */
8311                 read_val = save_val & read_mask;
8312
8313                 /* Write zero to the register, then make sure the read-only bits
8314                  * are not changed and the read/write bits are all zeros.
8315                  */
8316                 tw32(offset, 0);
8317
8318                 val = tr32(offset);
8319
8320                 /* Test the read-only and read/write bits. */
8321                 if (((val & read_mask) != read_val) || (val & write_mask))
8322                         goto out;
8323
8324                 /* Write ones to all the bits defined by RdMask and WrMask, then
8325                  * make sure the read-only bits are not changed and the
8326                  * read/write bits are all ones.
8327                  */
8328                 tw32(offset, read_mask | write_mask);
8329
8330                 val = tr32(offset);
8331
8332                 /* Test the read-only bits. */
8333                 if ((val & read_mask) != read_val)
8334                         goto out;
8335
8336                 /* Test the read/write bits. */
8337                 if ((val & write_mask) != write_mask)
8338                         goto out;
8339
8340                 tw32(offset, save_val);
8341         }
8342
8343         return 0;
8344
8345 out:
8346         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8347         tw32(offset, save_val);
8348         return -EIO;
8349 }
8350
8351 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8352 {
8353         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8354         int i;
8355         u32 j;
8356
8357         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8358                 for (j = 0; j < len; j += 4) {
8359                         u32 val;
8360
8361                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8362                         tg3_read_mem(tp, offset + j, &val);
8363                         if (val != test_pattern[i])
8364                                 return -EIO;
8365                 }
8366         }
8367         return 0;
8368 }
8369
8370 static int tg3_test_memory(struct tg3 *tp)
8371 {
8372         static struct mem_entry {
8373                 u32 offset;
8374                 u32 len;
8375         } mem_tbl_570x[] = {
8376                 { 0x00000000, 0x00b50},
8377                 { 0x00002000, 0x1c000},
8378                 { 0xffffffff, 0x00000}
8379         }, mem_tbl_5705[] = {
8380                 { 0x00000100, 0x0000c},
8381                 { 0x00000200, 0x00008},
8382                 { 0x00004000, 0x00800},
8383                 { 0x00006000, 0x01000},
8384                 { 0x00008000, 0x02000},
8385                 { 0x00010000, 0x0e000},
8386                 { 0xffffffff, 0x00000}
8387         }, mem_tbl_5755[] = {
8388                 { 0x00000200, 0x00008},
8389                 { 0x00004000, 0x00800},
8390                 { 0x00006000, 0x00800},
8391                 { 0x00008000, 0x02000},
8392                 { 0x00010000, 0x0c000},
8393                 { 0xffffffff, 0x00000}
8394         };
8395         struct mem_entry *mem_tbl;
8396         int err = 0;
8397         int i;
8398
8399         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8400                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8401                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8402                         mem_tbl = mem_tbl_5755;
8403                 else
8404                         mem_tbl = mem_tbl_5705;
8405         } else
8406                 mem_tbl = mem_tbl_570x;
8407
8408         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8409                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8410                     mem_tbl[i].len)) != 0)
8411                         break;
8412         }
8413         
8414         return err;
8415 }
8416
8417 #define TG3_MAC_LOOPBACK        0
8418 #define TG3_PHY_LOOPBACK        1
8419
8420 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8421 {
8422         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8423         u32 desc_idx;
8424         struct sk_buff *skb, *rx_skb;
8425         u8 *tx_data;
8426         dma_addr_t map;
8427         int num_pkts, tx_len, rx_len, i, err;
8428         struct tg3_rx_buffer_desc *desc;
8429
8430         if (loopback_mode == TG3_MAC_LOOPBACK) {
8431                 /* HW errata - mac loopback fails in some cases on 5780.
8432                  * Normal traffic and PHY loopback are not affected by
8433                  * errata.
8434                  */
8435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8436                         return 0;
8437
8438                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8439                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8440                            MAC_MODE_PORT_MODE_GMII;
8441                 tw32(MAC_MODE, mac_mode);
8442         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8443                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8444                                            BMCR_SPEED1000);
8445                 udelay(40);
8446                 /* reset to prevent losing 1st rx packet intermittently */
8447                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8448                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8449                         udelay(10);
8450                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8451                 }
8452                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8453                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8454                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8455                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8456                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8457                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8458                 }
8459                 tw32(MAC_MODE, mac_mode);
8460         }
8461         else
8462                 return -EINVAL;
8463
8464         err = -EIO;
8465
8466         tx_len = 1514;
8467         skb = dev_alloc_skb(tx_len);
8468         if (!skb)
8469                 return -ENOMEM;
8470
8471         tx_data = skb_put(skb, tx_len);
8472         memcpy(tx_data, tp->dev->dev_addr, 6);
8473         memset(tx_data + 6, 0x0, 8);
8474
8475         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8476
8477         for (i = 14; i < tx_len; i++)
8478                 tx_data[i] = (u8) (i & 0xff);
8479
8480         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8481
8482         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8483              HOSTCC_MODE_NOW);
8484
8485         udelay(10);
8486
8487         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8488
8489         num_pkts = 0;
8490
8491         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8492
8493         tp->tx_prod++;
8494         num_pkts++;
8495
8496         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8497                      tp->tx_prod);
8498         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8499
8500         udelay(10);
8501
8502         for (i = 0; i < 10; i++) {
8503                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8504                        HOSTCC_MODE_NOW);
8505
8506                 udelay(10);
8507
8508                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8509                 rx_idx = tp->hw_status->idx[0].rx_producer;
8510                 if ((tx_idx == tp->tx_prod) &&
8511                     (rx_idx == (rx_start_idx + num_pkts)))
8512                         break;
8513         }
8514
8515         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8516         dev_kfree_skb(skb);
8517
8518         if (tx_idx != tp->tx_prod)
8519                 goto out;
8520
8521         if (rx_idx != rx_start_idx + num_pkts)
8522                 goto out;
8523
8524         desc = &tp->rx_rcb[rx_start_idx];
8525         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8526         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8527         if (opaque_key != RXD_OPAQUE_RING_STD)
8528                 goto out;
8529
8530         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8531             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8532                 goto out;
8533
8534         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8535         if (rx_len != tx_len)
8536                 goto out;
8537
8538         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8539
8540         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8541         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8542
8543         for (i = 14; i < tx_len; i++) {
8544                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8545                         goto out;
8546         }
8547         err = 0;
8548         
8549         /* tg3_free_rings will unmap and free the rx_skb */
8550 out:
8551         return err;
8552 }
8553
8554 #define TG3_MAC_LOOPBACK_FAILED         1
8555 #define TG3_PHY_LOOPBACK_FAILED         2
8556 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8557                                          TG3_PHY_LOOPBACK_FAILED)
8558
8559 static int tg3_test_loopback(struct tg3 *tp)
8560 {
8561         int err = 0;
8562
8563         if (!netif_running(tp->dev))
8564                 return TG3_LOOPBACK_FAILED;
8565
8566         tg3_reset_hw(tp, 1);
8567
8568         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8569                 err |= TG3_MAC_LOOPBACK_FAILED;
8570         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8571                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8572                         err |= TG3_PHY_LOOPBACK_FAILED;
8573         }
8574
8575         return err;
8576 }
8577
8578 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8579                           u64 *data)
8580 {
8581         struct tg3 *tp = netdev_priv(dev);
8582
8583         if (tp->link_config.phy_is_low_power)
8584                 tg3_set_power_state(tp, PCI_D0);
8585
8586         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8587
8588         if (tg3_test_nvram(tp) != 0) {
8589                 etest->flags |= ETH_TEST_FL_FAILED;
8590                 data[0] = 1;
8591         }
8592         if (tg3_test_link(tp) != 0) {
8593                 etest->flags |= ETH_TEST_FL_FAILED;
8594                 data[1] = 1;
8595         }
8596         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8597                 int err, irq_sync = 0;
8598
8599                 if (netif_running(dev)) {
8600                         tg3_netif_stop(tp);
8601                         irq_sync = 1;
8602                 }
8603
8604                 tg3_full_lock(tp, irq_sync);
8605
8606                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8607                 err = tg3_nvram_lock(tp);
8608                 tg3_halt_cpu(tp, RX_CPU_BASE);
8609                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8610                         tg3_halt_cpu(tp, TX_CPU_BASE);
8611                 if (!err)
8612                         tg3_nvram_unlock(tp);
8613
8614                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8615                         tg3_phy_reset(tp);
8616
8617                 if (tg3_test_registers(tp) != 0) {
8618                         etest->flags |= ETH_TEST_FL_FAILED;
8619                         data[2] = 1;
8620                 }
8621                 if (tg3_test_memory(tp) != 0) {
8622                         etest->flags |= ETH_TEST_FL_FAILED;
8623                         data[3] = 1;
8624                 }
8625                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8626                         etest->flags |= ETH_TEST_FL_FAILED;
8627
8628                 tg3_full_unlock(tp);
8629
8630                 if (tg3_test_interrupt(tp) != 0) {
8631                         etest->flags |= ETH_TEST_FL_FAILED;
8632                         data[5] = 1;
8633                 }
8634
8635                 tg3_full_lock(tp, 0);
8636
8637                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8638                 if (netif_running(dev)) {
8639                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8640                         tg3_init_hw(tp, 1);
8641                         tg3_netif_start(tp);
8642                 }
8643
8644                 tg3_full_unlock(tp);
8645         }
8646         if (tp->link_config.phy_is_low_power)
8647                 tg3_set_power_state(tp, PCI_D3hot);
8648
8649 }
8650
8651 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8652 {
8653         struct mii_ioctl_data *data = if_mii(ifr);
8654         struct tg3 *tp = netdev_priv(dev);
8655         int err;
8656
8657         switch(cmd) {
8658         case SIOCGMIIPHY:
8659                 data->phy_id = PHY_ADDR;
8660
8661                 /* fallthru */
8662         case SIOCGMIIREG: {
8663                 u32 mii_regval;
8664
8665                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8666                         break;                  /* We have no PHY */
8667
8668                 if (tp->link_config.phy_is_low_power)
8669                         return -EAGAIN;
8670
8671                 spin_lock_bh(&tp->lock);
8672                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8673                 spin_unlock_bh(&tp->lock);
8674
8675                 data->val_out = mii_regval;
8676
8677                 return err;
8678         }
8679
8680         case SIOCSMIIREG:
8681                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8682                         break;                  /* We have no PHY */
8683
8684                 if (!capable(CAP_NET_ADMIN))
8685                         return -EPERM;
8686
8687                 if (tp->link_config.phy_is_low_power)
8688                         return -EAGAIN;
8689
8690                 spin_lock_bh(&tp->lock);
8691                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8692                 spin_unlock_bh(&tp->lock);
8693
8694                 return err;
8695
8696         default:
8697                 /* do nothing */
8698                 break;
8699         }
8700         return -EOPNOTSUPP;
8701 }
8702
8703 #if TG3_VLAN_TAG_USED
8704 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8705 {
8706         struct tg3 *tp = netdev_priv(dev);
8707
8708         tg3_full_lock(tp, 0);
8709
8710         tp->vlgrp = grp;
8711
8712         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8713         __tg3_set_rx_mode(dev);
8714
8715         tg3_full_unlock(tp);
8716 }
8717
8718 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8719 {
8720         struct tg3 *tp = netdev_priv(dev);
8721
8722         tg3_full_lock(tp, 0);
8723         if (tp->vlgrp)
8724                 tp->vlgrp->vlan_devices[vid] = NULL;
8725         tg3_full_unlock(tp);
8726 }
8727 #endif
8728
8729 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8730 {
8731         struct tg3 *tp = netdev_priv(dev);
8732
8733         memcpy(ec, &tp->coal, sizeof(*ec));
8734         return 0;
8735 }
8736
8737 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8738 {
8739         struct tg3 *tp = netdev_priv(dev);
8740         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8741         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8742
8743         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8744                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8745                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8746                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8747                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8748         }
8749
8750         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8751             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8752             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8753             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8754             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8755             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8756             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8757             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8758             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8759             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8760                 return -EINVAL;
8761
8762         /* No rx interrupts will be generated if both are zero */
8763         if ((ec->rx_coalesce_usecs == 0) &&
8764             (ec->rx_max_coalesced_frames == 0))
8765                 return -EINVAL;
8766
8767         /* No tx interrupts will be generated if both are zero */
8768         if ((ec->tx_coalesce_usecs == 0) &&
8769             (ec->tx_max_coalesced_frames == 0))
8770                 return -EINVAL;
8771
8772         /* Only copy relevant parameters, ignore all others. */
8773         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8774         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8775         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8776         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8777         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8778         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8779         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8780         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8781         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8782
8783         if (netif_running(dev)) {
8784                 tg3_full_lock(tp, 0);
8785                 __tg3_set_coalesce(tp, &tp->coal);
8786                 tg3_full_unlock(tp);
8787         }
8788         return 0;
8789 }
8790
8791 static struct ethtool_ops tg3_ethtool_ops = {
8792         .get_settings           = tg3_get_settings,
8793         .set_settings           = tg3_set_settings,
8794         .get_drvinfo            = tg3_get_drvinfo,
8795         .get_regs_len           = tg3_get_regs_len,
8796         .get_regs               = tg3_get_regs,
8797         .get_wol                = tg3_get_wol,
8798         .set_wol                = tg3_set_wol,
8799         .get_msglevel           = tg3_get_msglevel,
8800         .set_msglevel           = tg3_set_msglevel,
8801         .nway_reset             = tg3_nway_reset,
8802         .get_link               = ethtool_op_get_link,
8803         .get_eeprom_len         = tg3_get_eeprom_len,
8804         .get_eeprom             = tg3_get_eeprom,
8805         .set_eeprom             = tg3_set_eeprom,
8806         .get_ringparam          = tg3_get_ringparam,
8807         .set_ringparam          = tg3_set_ringparam,
8808         .get_pauseparam         = tg3_get_pauseparam,
8809         .set_pauseparam         = tg3_set_pauseparam,
8810         .get_rx_csum            = tg3_get_rx_csum,
8811         .set_rx_csum            = tg3_set_rx_csum,
8812         .get_tx_csum            = ethtool_op_get_tx_csum,
8813         .set_tx_csum            = tg3_set_tx_csum,
8814         .get_sg                 = ethtool_op_get_sg,
8815         .set_sg                 = ethtool_op_set_sg,
8816 #if TG3_TSO_SUPPORT != 0
8817         .get_tso                = ethtool_op_get_tso,
8818         .set_tso                = tg3_set_tso,
8819 #endif
8820         .self_test_count        = tg3_get_test_count,
8821         .self_test              = tg3_self_test,
8822         .get_strings            = tg3_get_strings,
8823         .phys_id                = tg3_phys_id,
8824         .get_stats_count        = tg3_get_stats_count,
8825         .get_ethtool_stats      = tg3_get_ethtool_stats,
8826         .get_coalesce           = tg3_get_coalesce,
8827         .set_coalesce           = tg3_set_coalesce,
8828         .get_perm_addr          = ethtool_op_get_perm_addr,
8829 };
8830
8831 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8832 {
8833         u32 cursize, val, magic;
8834
8835         tp->nvram_size = EEPROM_CHIP_SIZE;
8836
8837         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8838                 return;
8839
8840         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8841                 return;
8842
8843         /*
8844          * Size the chip by reading offsets at increasing powers of two.
8845          * When we encounter our validation signature, we know the addressing
8846          * has wrapped around, and thus have our chip size.
8847          */
8848         cursize = 0x10;
8849
8850         while (cursize < tp->nvram_size) {
8851                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8852                         return;
8853
8854                 if (val == magic)
8855                         break;
8856
8857                 cursize <<= 1;
8858         }
8859
8860         tp->nvram_size = cursize;
8861 }
8862                 
8863 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8864 {
8865         u32 val;
8866
8867         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8868                 return;
8869
8870         /* Selfboot format */
8871         if (val != TG3_EEPROM_MAGIC) {
8872                 tg3_get_eeprom_size(tp);
8873                 return;
8874         }
8875
8876         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8877                 if (val != 0) {
8878                         tp->nvram_size = (val >> 16) * 1024;
8879                         return;
8880                 }
8881         }
8882         tp->nvram_size = 0x20000;
8883 }
8884
8885 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8886 {
8887         u32 nvcfg1;
8888
8889         nvcfg1 = tr32(NVRAM_CFG1);
8890         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8891                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8892         }
8893         else {
8894                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8895                 tw32(NVRAM_CFG1, nvcfg1);
8896         }
8897
8898         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8899             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8900                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8901                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8902                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8903                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8904                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8905                                 break;
8906                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8907                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8908                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8909                                 break;
8910                         case FLASH_VENDOR_ATMEL_EEPROM:
8911                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8912                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8913                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8914                                 break;
8915                         case FLASH_VENDOR_ST:
8916                                 tp->nvram_jedecnum = JEDEC_ST;
8917                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8918                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8919                                 break;
8920                         case FLASH_VENDOR_SAIFUN:
8921                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8922                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8923                                 break;
8924                         case FLASH_VENDOR_SST_SMALL:
8925                         case FLASH_VENDOR_SST_LARGE:
8926                                 tp->nvram_jedecnum = JEDEC_SST;
8927                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8928                                 break;
8929                 }
8930         }
8931         else {
8932                 tp->nvram_jedecnum = JEDEC_ATMEL;
8933                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8934                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8935         }
8936 }
8937
8938 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8939 {
8940         u32 nvcfg1;
8941
8942         nvcfg1 = tr32(NVRAM_CFG1);
8943
8944         /* NVRAM protection for TPM */
8945         if (nvcfg1 & (1 << 27))
8946                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8947
8948         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8949                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8950                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8951                         tp->nvram_jedecnum = JEDEC_ATMEL;
8952                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8953                         break;
8954                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8955                         tp->nvram_jedecnum = JEDEC_ATMEL;
8956                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8957                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8958                         break;
8959                 case FLASH_5752VENDOR_ST_M45PE10:
8960                 case FLASH_5752VENDOR_ST_M45PE20:
8961                 case FLASH_5752VENDOR_ST_M45PE40:
8962                         tp->nvram_jedecnum = JEDEC_ST;
8963                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8964                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8965                         break;
8966         }
8967
8968         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8969                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8970                         case FLASH_5752PAGE_SIZE_256:
8971                                 tp->nvram_pagesize = 256;
8972                                 break;
8973                         case FLASH_5752PAGE_SIZE_512:
8974                                 tp->nvram_pagesize = 512;
8975                                 break;
8976                         case FLASH_5752PAGE_SIZE_1K:
8977                                 tp->nvram_pagesize = 1024;
8978                                 break;
8979                         case FLASH_5752PAGE_SIZE_2K:
8980                                 tp->nvram_pagesize = 2048;
8981                                 break;
8982                         case FLASH_5752PAGE_SIZE_4K:
8983                                 tp->nvram_pagesize = 4096;
8984                                 break;
8985                         case FLASH_5752PAGE_SIZE_264:
8986                                 tp->nvram_pagesize = 264;
8987                                 break;
8988                 }
8989         }
8990         else {
8991                 /* For eeprom, set pagesize to maximum eeprom size */
8992                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8993
8994                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8995                 tw32(NVRAM_CFG1, nvcfg1);
8996         }
8997 }
8998
8999 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9000 {
9001         u32 nvcfg1;
9002
9003         nvcfg1 = tr32(NVRAM_CFG1);
9004
9005         /* NVRAM protection for TPM */
9006         if (nvcfg1 & (1 << 27))
9007                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9008
9009         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9010                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9011                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9012                         tp->nvram_jedecnum = JEDEC_ATMEL;
9013                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9014                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9015
9016                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9017                         tw32(NVRAM_CFG1, nvcfg1);
9018                         break;
9019                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9020                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9021                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9022                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9023                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9024                         tp->nvram_jedecnum = JEDEC_ATMEL;
9025                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9026                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9027                         tp->nvram_pagesize = 264;
9028                         break;
9029                 case FLASH_5752VENDOR_ST_M45PE10:
9030                 case FLASH_5752VENDOR_ST_M45PE20:
9031                 case FLASH_5752VENDOR_ST_M45PE40:
9032                         tp->nvram_jedecnum = JEDEC_ST;
9033                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9034                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9035                         tp->nvram_pagesize = 256;
9036                         break;
9037         }
9038 }
9039
9040 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9041 {
9042         u32 nvcfg1;
9043
9044         nvcfg1 = tr32(NVRAM_CFG1);
9045
9046         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9047                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9048                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9049                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9050                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9051                         tp->nvram_jedecnum = JEDEC_ATMEL;
9052                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9053                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9054
9055                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9056                         tw32(NVRAM_CFG1, nvcfg1);
9057                         break;
9058                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9059                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9060                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9061                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9062                         tp->nvram_jedecnum = JEDEC_ATMEL;
9063                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9064                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9065                         tp->nvram_pagesize = 264;
9066                         break;
9067                 case FLASH_5752VENDOR_ST_M45PE10:
9068                 case FLASH_5752VENDOR_ST_M45PE20:
9069                 case FLASH_5752VENDOR_ST_M45PE40:
9070                         tp->nvram_jedecnum = JEDEC_ST;
9071                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9072                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9073                         tp->nvram_pagesize = 256;
9074                         break;
9075         }
9076 }
9077
9078 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9079 static void __devinit tg3_nvram_init(struct tg3 *tp)
9080 {
9081         int j;
9082
9083         tw32_f(GRC_EEPROM_ADDR,
9084              (EEPROM_ADDR_FSM_RESET |
9085               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9086                EEPROM_ADDR_CLKPERD_SHIFT)));
9087
9088         /* XXX schedule_timeout() ... */
9089         for (j = 0; j < 100; j++)
9090                 udelay(10);
9091
9092         /* Enable seeprom accesses. */
9093         tw32_f(GRC_LOCAL_CTRL,
9094              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9095         udelay(100);
9096
9097         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9098             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9099                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9100
9101                 if (tg3_nvram_lock(tp)) {
9102                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9103                                "tg3_nvram_init failed.\n", tp->dev->name);
9104                         return;
9105                 }
9106                 tg3_enable_nvram_access(tp);
9107
9108                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9109                         tg3_get_5752_nvram_info(tp);
9110                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9111                         tg3_get_5755_nvram_info(tp);
9112                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9113                         tg3_get_5787_nvram_info(tp);
9114                 else
9115                         tg3_get_nvram_info(tp);
9116
9117                 tg3_get_nvram_size(tp);
9118
9119                 tg3_disable_nvram_access(tp);
9120                 tg3_nvram_unlock(tp);
9121
9122         } else {
9123                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9124
9125                 tg3_get_eeprom_size(tp);
9126         }
9127 }
9128
9129 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9130                                         u32 offset, u32 *val)
9131 {
9132         u32 tmp;
9133         int i;
9134
9135         if (offset > EEPROM_ADDR_ADDR_MASK ||
9136             (offset % 4) != 0)
9137                 return -EINVAL;
9138
9139         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9140                                         EEPROM_ADDR_DEVID_MASK |
9141                                         EEPROM_ADDR_READ);
9142         tw32(GRC_EEPROM_ADDR,
9143              tmp |
9144              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9145              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9146               EEPROM_ADDR_ADDR_MASK) |
9147              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9148
9149         for (i = 0; i < 10000; i++) {
9150                 tmp = tr32(GRC_EEPROM_ADDR);
9151
9152                 if (tmp & EEPROM_ADDR_COMPLETE)
9153                         break;
9154                 udelay(100);
9155         }
9156         if (!(tmp & EEPROM_ADDR_COMPLETE))
9157                 return -EBUSY;
9158
9159         *val = tr32(GRC_EEPROM_DATA);
9160         return 0;
9161 }
9162
9163 #define NVRAM_CMD_TIMEOUT 10000
9164
9165 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9166 {
9167         int i;
9168
9169         tw32(NVRAM_CMD, nvram_cmd);
9170         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9171                 udelay(10);
9172                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9173                         udelay(10);
9174                         break;
9175                 }
9176         }
9177         if (i == NVRAM_CMD_TIMEOUT) {
9178                 return -EBUSY;
9179         }
9180         return 0;
9181 }
9182
9183 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9184 {
9185         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9186             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9187             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9188             (tp->nvram_jedecnum == JEDEC_ATMEL))
9189
9190                 addr = ((addr / tp->nvram_pagesize) <<
9191                         ATMEL_AT45DB0X1B_PAGE_POS) +
9192                        (addr % tp->nvram_pagesize);
9193
9194         return addr;
9195 }
9196
9197 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9198 {
9199         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9200             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9201             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9202             (tp->nvram_jedecnum == JEDEC_ATMEL))
9203
9204                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9205                         tp->nvram_pagesize) +
9206                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9207
9208         return addr;
9209 }
9210
9211 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9212 {
9213         int ret;
9214
9215         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9216                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9217
9218         offset = tg3_nvram_phys_addr(tp, offset);
9219
9220         if (offset > NVRAM_ADDR_MSK)
9221                 return -EINVAL;
9222
9223         ret = tg3_nvram_lock(tp);
9224         if (ret)
9225                 return ret;
9226
9227         tg3_enable_nvram_access(tp);
9228
9229         tw32(NVRAM_ADDR, offset);
9230         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9231                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9232
9233         if (ret == 0)
9234                 *val = swab32(tr32(NVRAM_RDDATA));
9235
9236         tg3_disable_nvram_access(tp);
9237
9238         tg3_nvram_unlock(tp);
9239
9240         return ret;
9241 }
9242
9243 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9244 {
9245         int err;
9246         u32 tmp;
9247
9248         err = tg3_nvram_read(tp, offset, &tmp);
9249         *val = swab32(tmp);
9250         return err;
9251 }
9252
9253 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9254                                     u32 offset, u32 len, u8 *buf)
9255 {
9256         int i, j, rc = 0;
9257         u32 val;
9258
9259         for (i = 0; i < len; i += 4) {
9260                 u32 addr, data;
9261
9262                 addr = offset + i;
9263
9264                 memcpy(&data, buf + i, 4);
9265
9266                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9267
9268                 val = tr32(GRC_EEPROM_ADDR);
9269                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9270
9271                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9272                         EEPROM_ADDR_READ);
9273                 tw32(GRC_EEPROM_ADDR, val |
9274                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9275                         (addr & EEPROM_ADDR_ADDR_MASK) |
9276                         EEPROM_ADDR_START |
9277                         EEPROM_ADDR_WRITE);
9278                 
9279                 for (j = 0; j < 10000; j++) {
9280                         val = tr32(GRC_EEPROM_ADDR);
9281
9282                         if (val & EEPROM_ADDR_COMPLETE)
9283                                 break;
9284                         udelay(100);
9285                 }
9286                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9287                         rc = -EBUSY;
9288                         break;
9289                 }
9290         }
9291
9292         return rc;
9293 }
9294
9295 /* offset and length are dword aligned */
9296 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9297                 u8 *buf)
9298 {
9299         int ret = 0;
9300         u32 pagesize = tp->nvram_pagesize;
9301         u32 pagemask = pagesize - 1;
9302         u32 nvram_cmd;
9303         u8 *tmp;
9304
9305         tmp = kmalloc(pagesize, GFP_KERNEL);
9306         if (tmp == NULL)
9307                 return -ENOMEM;
9308
9309         while (len) {
9310                 int j;
9311                 u32 phy_addr, page_off, size;
9312
9313                 phy_addr = offset & ~pagemask;
9314         
9315                 for (j = 0; j < pagesize; j += 4) {
9316                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9317                                                 (u32 *) (tmp + j))))
9318                                 break;
9319                 }
9320                 if (ret)
9321                         break;
9322
9323                 page_off = offset & pagemask;
9324                 size = pagesize;
9325                 if (len < size)
9326                         size = len;
9327
9328                 len -= size;
9329
9330                 memcpy(tmp + page_off, buf, size);
9331
9332                 offset = offset + (pagesize - page_off);
9333
9334                 tg3_enable_nvram_access(tp);
9335
9336                 /*
9337                  * Before we can erase the flash page, we need
9338                  * to issue a special "write enable" command.
9339                  */
9340                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9341
9342                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9343                         break;
9344
9345                 /* Erase the target page */
9346                 tw32(NVRAM_ADDR, phy_addr);
9347
9348                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9349                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9350
9351                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9352                         break;
9353
9354                 /* Issue another write enable to start the write. */
9355                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9356
9357                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9358                         break;
9359
9360                 for (j = 0; j < pagesize; j += 4) {
9361                         u32 data;
9362
9363                         data = *((u32 *) (tmp + j));
9364                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9365
9366                         tw32(NVRAM_ADDR, phy_addr + j);
9367
9368                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9369                                 NVRAM_CMD_WR;
9370
9371                         if (j == 0)
9372                                 nvram_cmd |= NVRAM_CMD_FIRST;
9373                         else if (j == (pagesize - 4))
9374                                 nvram_cmd |= NVRAM_CMD_LAST;
9375
9376                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9377                                 break;
9378                 }
9379                 if (ret)
9380                         break;
9381         }
9382
9383         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9384         tg3_nvram_exec_cmd(tp, nvram_cmd);
9385
9386         kfree(tmp);
9387
9388         return ret;
9389 }
9390
9391 /* offset and length are dword aligned */
9392 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9393                 u8 *buf)
9394 {
9395         int i, ret = 0;
9396
9397         for (i = 0; i < len; i += 4, offset += 4) {
9398                 u32 data, page_off, phy_addr, nvram_cmd;
9399
9400                 memcpy(&data, buf + i, 4);
9401                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9402
9403                 page_off = offset % tp->nvram_pagesize;
9404
9405                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9406
9407                 tw32(NVRAM_ADDR, phy_addr);
9408
9409                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9410
9411                 if ((page_off == 0) || (i == 0))
9412                         nvram_cmd |= NVRAM_CMD_FIRST;
9413                 if (page_off == (tp->nvram_pagesize - 4))
9414                         nvram_cmd |= NVRAM_CMD_LAST;
9415
9416                 if (i == (len - 4))
9417                         nvram_cmd |= NVRAM_CMD_LAST;
9418
9419                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9420                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9421                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9422                     (tp->nvram_jedecnum == JEDEC_ST) &&
9423                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9424
9425                         if ((ret = tg3_nvram_exec_cmd(tp,
9426                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9427                                 NVRAM_CMD_DONE)))
9428
9429                                 break;
9430                 }
9431                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9432                         /* We always do complete word writes to eeprom. */
9433                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9434                 }
9435
9436                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9437                         break;
9438         }
9439         return ret;
9440 }
9441
9442 /* offset and length are dword aligned */
9443 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9444 {
9445         int ret;
9446
9447         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9448                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9449                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9450                 udelay(40);
9451         }
9452
9453         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9454                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9455         }
9456         else {
9457                 u32 grc_mode;
9458
9459                 ret = tg3_nvram_lock(tp);
9460                 if (ret)
9461                         return ret;
9462
9463                 tg3_enable_nvram_access(tp);
9464                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9465                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9466                         tw32(NVRAM_WRITE1, 0x406);
9467
9468                 grc_mode = tr32(GRC_MODE);
9469                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9470
9471                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9472                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9473
9474                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9475                                 buf);
9476                 }
9477                 else {
9478                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9479                                 buf);
9480                 }
9481
9482                 grc_mode = tr32(GRC_MODE);
9483                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9484
9485                 tg3_disable_nvram_access(tp);
9486                 tg3_nvram_unlock(tp);
9487         }
9488
9489         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9490                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9491                 udelay(40);
9492         }
9493
9494         return ret;
9495 }
9496
9497 struct subsys_tbl_ent {
9498         u16 subsys_vendor, subsys_devid;
9499         u32 phy_id;
9500 };
9501
9502 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9503         /* Broadcom boards. */
9504         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9505         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9506         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9507         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9508         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9509         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9510         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9511         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9512         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9513         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9514         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9515
9516         /* 3com boards. */
9517         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9518         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9519         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9520         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9521         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9522
9523         /* DELL boards. */
9524         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9525         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9526         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9527         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9528
9529         /* Compaq boards. */
9530         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9531         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9532         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9533         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9534         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9535
9536         /* IBM boards. */
9537         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9538 };
9539
9540 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9541 {
9542         int i;
9543
9544         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9545                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9546                      tp->pdev->subsystem_vendor) &&
9547                     (subsys_id_to_phy_id[i].subsys_devid ==
9548                      tp->pdev->subsystem_device))
9549                         return &subsys_id_to_phy_id[i];
9550         }
9551         return NULL;
9552 }
9553
9554 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9555 {
9556         u32 val;
9557         u16 pmcsr;
9558
9559         /* On some early chips the SRAM cannot be accessed in D3hot state,
9560          * so need make sure we're in D0.
9561          */
9562         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9563         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9564         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9565         msleep(1);
9566
9567         /* Make sure register accesses (indirect or otherwise)
9568          * will function correctly.
9569          */
9570         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9571                                tp->misc_host_ctrl);
9572
9573         /* The memory arbiter has to be enabled in order for SRAM accesses
9574          * to succeed.  Normally on powerup the tg3 chip firmware will make
9575          * sure it is enabled, but other entities such as system netboot
9576          * code might disable it.
9577          */
9578         val = tr32(MEMARB_MODE);
9579         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9580
9581         tp->phy_id = PHY_ID_INVALID;
9582         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9583
9584         /* Assume an onboard device by default.  */
9585         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9586
9587         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9588         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9589                 u32 nic_cfg, led_cfg;
9590                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9591                 int eeprom_phy_serdes = 0;
9592
9593                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9594                 tp->nic_sram_data_cfg = nic_cfg;
9595
9596                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9597                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9598                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9599                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9600                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9601                     (ver > 0) && (ver < 0x100))
9602                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9603
9604                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9605                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9606                         eeprom_phy_serdes = 1;
9607
9608                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9609                 if (nic_phy_id != 0) {
9610                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9611                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9612
9613                         eeprom_phy_id  = (id1 >> 16) << 10;
9614                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9615                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9616                 } else
9617                         eeprom_phy_id = 0;
9618
9619                 tp->phy_id = eeprom_phy_id;
9620                 if (eeprom_phy_serdes) {
9621                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9622                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9623                         else
9624                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9625                 }
9626
9627                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9628                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9629                                     SHASTA_EXT_LED_MODE_MASK);
9630                 else
9631                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9632
9633                 switch (led_cfg) {
9634                 default:
9635                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9636                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9637                         break;
9638
9639                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9640                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9641                         break;
9642
9643                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9644                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9645
9646                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9647                          * read on some older 5700/5701 bootcode.
9648                          */
9649                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9650                             ASIC_REV_5700 ||
9651                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9652                             ASIC_REV_5701)
9653                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9654
9655                         break;
9656
9657                 case SHASTA_EXT_LED_SHARED:
9658                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9659                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9660                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9661                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9662                                                  LED_CTRL_MODE_PHY_2);
9663                         break;
9664
9665                 case SHASTA_EXT_LED_MAC:
9666                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9667                         break;
9668
9669                 case SHASTA_EXT_LED_COMBO:
9670                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9671                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9672                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9673                                                  LED_CTRL_MODE_PHY_2);
9674                         break;
9675
9676                 };
9677
9678                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9679                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9680                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9681                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9682
9683                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9684                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9685                 else
9686                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9687
9688                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9689                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9690                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9691                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9692                 }
9693                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9694                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9695
9696                 if (cfg2 & (1 << 17))
9697                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9698
9699                 /* serdes signal pre-emphasis in register 0x590 set by */
9700                 /* bootcode if bit 18 is set */
9701                 if (cfg2 & (1 << 18))
9702                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9703         }
9704 }
9705
9706 static int __devinit tg3_phy_probe(struct tg3 *tp)
9707 {
9708         u32 hw_phy_id_1, hw_phy_id_2;
9709         u32 hw_phy_id, hw_phy_id_masked;
9710         int err;
9711
9712         /* Reading the PHY ID register can conflict with ASF
9713          * firwmare access to the PHY hardware.
9714          */
9715         err = 0;
9716         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9717                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9718         } else {
9719                 /* Now read the physical PHY_ID from the chip and verify
9720                  * that it is sane.  If it doesn't look good, we fall back
9721                  * to either the hard-coded table based PHY_ID and failing
9722                  * that the value found in the eeprom area.
9723                  */
9724                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9725                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9726
9727                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9728                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9729                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9730
9731                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9732         }
9733
9734         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9735                 tp->phy_id = hw_phy_id;
9736                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9737                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9738                 else
9739                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9740         } else {
9741                 if (tp->phy_id != PHY_ID_INVALID) {
9742                         /* Do nothing, phy ID already set up in
9743                          * tg3_get_eeprom_hw_cfg().
9744                          */
9745                 } else {
9746                         struct subsys_tbl_ent *p;
9747
9748                         /* No eeprom signature?  Try the hardcoded
9749                          * subsys device table.
9750                          */
9751                         p = lookup_by_subsys(tp);
9752                         if (!p)
9753                                 return -ENODEV;
9754
9755                         tp->phy_id = p->phy_id;
9756                         if (!tp->phy_id ||
9757                             tp->phy_id == PHY_ID_BCM8002)
9758                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9759                 }
9760         }
9761
9762         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9763             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9764                 u32 bmsr, adv_reg, tg3_ctrl;
9765
9766                 tg3_readphy(tp, MII_BMSR, &bmsr);
9767                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9768                     (bmsr & BMSR_LSTATUS))
9769                         goto skip_phy_reset;
9770                     
9771                 err = tg3_phy_reset(tp);
9772                 if (err)
9773                         return err;
9774
9775                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9776                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9777                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9778                 tg3_ctrl = 0;
9779                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9780                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9781                                     MII_TG3_CTRL_ADV_1000_FULL);
9782                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9783                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9784                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9785                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9786                 }
9787
9788                 if (!tg3_copper_is_advertising_all(tp)) {
9789                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9790
9791                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9792                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9793
9794                         tg3_writephy(tp, MII_BMCR,
9795                                      BMCR_ANENABLE | BMCR_ANRESTART);
9796                 }
9797                 tg3_phy_set_wirespeed(tp);
9798
9799                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9800                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9801                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9802         }
9803
9804 skip_phy_reset:
9805         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9806                 err = tg3_init_5401phy_dsp(tp);
9807                 if (err)
9808                         return err;
9809         }
9810
9811         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9812                 err = tg3_init_5401phy_dsp(tp);
9813         }
9814
9815         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9816                 tp->link_config.advertising =
9817                         (ADVERTISED_1000baseT_Half |
9818                          ADVERTISED_1000baseT_Full |
9819                          ADVERTISED_Autoneg |
9820                          ADVERTISED_FIBRE);
9821         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9822                 tp->link_config.advertising &=
9823                         ~(ADVERTISED_1000baseT_Half |
9824                           ADVERTISED_1000baseT_Full);
9825
9826         return err;
9827 }
9828
9829 static void __devinit tg3_read_partno(struct tg3 *tp)
9830 {
9831         unsigned char vpd_data[256];
9832         int i;
9833         u32 magic;
9834
9835         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9836                 goto out_not_found;
9837
9838         if (magic == TG3_EEPROM_MAGIC) {
9839                 for (i = 0; i < 256; i += 4) {
9840                         u32 tmp;
9841
9842                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9843                                 goto out_not_found;
9844
9845                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9846                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9847                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9848                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9849                 }
9850         } else {
9851                 int vpd_cap;
9852
9853                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9854                 for (i = 0; i < 256; i += 4) {
9855                         u32 tmp, j = 0;
9856                         u16 tmp16;
9857
9858                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9859                                               i);
9860                         while (j++ < 100) {
9861                                 pci_read_config_word(tp->pdev, vpd_cap +
9862                                                      PCI_VPD_ADDR, &tmp16);
9863                                 if (tmp16 & 0x8000)
9864                                         break;
9865                                 msleep(1);
9866                         }
9867                         if (!(tmp16 & 0x8000))
9868                                 goto out_not_found;
9869
9870                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9871                                               &tmp);
9872                         tmp = cpu_to_le32(tmp);
9873                         memcpy(&vpd_data[i], &tmp, 4);
9874                 }
9875         }
9876
9877         /* Now parse and find the part number. */
9878         for (i = 0; i < 256; ) {
9879                 unsigned char val = vpd_data[i];
9880                 int block_end;
9881
9882                 if (val == 0x82 || val == 0x91) {
9883                         i = (i + 3 +
9884                              (vpd_data[i + 1] +
9885                               (vpd_data[i + 2] << 8)));
9886                         continue;
9887                 }
9888
9889                 if (val != 0x90)
9890                         goto out_not_found;
9891
9892                 block_end = (i + 3 +
9893                              (vpd_data[i + 1] +
9894                               (vpd_data[i + 2] << 8)));
9895                 i += 3;
9896                 while (i < block_end) {
9897                         if (vpd_data[i + 0] == 'P' &&
9898                             vpd_data[i + 1] == 'N') {
9899                                 int partno_len = vpd_data[i + 2];
9900
9901                                 if (partno_len > 24)
9902                                         goto out_not_found;
9903
9904                                 memcpy(tp->board_part_number,
9905                                        &vpd_data[i + 3],
9906                                        partno_len);
9907
9908                                 /* Success. */
9909                                 return;
9910                         }
9911                 }
9912
9913                 /* Part number not found. */
9914                 goto out_not_found;
9915         }
9916
9917 out_not_found:
9918         strcpy(tp->board_part_number, "none");
9919 }
9920
9921 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9922 {
9923         u32 val, offset, start;
9924
9925         if (tg3_nvram_read_swab(tp, 0, &val))
9926                 return;
9927
9928         if (val != TG3_EEPROM_MAGIC)
9929                 return;
9930
9931         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9932             tg3_nvram_read_swab(tp, 0x4, &start))
9933                 return;
9934
9935         offset = tg3_nvram_logical_addr(tp, offset);
9936         if (tg3_nvram_read_swab(tp, offset, &val))
9937                 return;
9938
9939         if ((val & 0xfc000000) == 0x0c000000) {
9940                 u32 ver_offset, addr;
9941                 int i;
9942
9943                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9944                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9945                         return;
9946
9947                 if (val != 0)
9948                         return;
9949
9950                 addr = offset + ver_offset - start;
9951                 for (i = 0; i < 16; i += 4) {
9952                         if (tg3_nvram_read(tp, addr + i, &val))
9953                                 return;
9954
9955                         val = cpu_to_le32(val);
9956                         memcpy(tp->fw_ver + i, &val, 4);
9957                 }
9958         }
9959 }
9960
9961 static int __devinit tg3_get_invariants(struct tg3 *tp)
9962 {
9963         static struct pci_device_id write_reorder_chipsets[] = {
9964                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9965                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9966                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9967                              PCI_DEVICE_ID_VIA_8385_0) },
9968                 { },
9969         };
9970         u32 misc_ctrl_reg;
9971         u32 cacheline_sz_reg;
9972         u32 pci_state_reg, grc_misc_cfg;
9973         u32 val;
9974         u16 pci_cmd;
9975         int err;
9976
9977         /* Force memory write invalidate off.  If we leave it on,
9978          * then on 5700_BX chips we have to enable a workaround.
9979          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9980          * to match the cacheline size.  The Broadcom driver have this
9981          * workaround but turns MWI off all the times so never uses
9982          * it.  This seems to suggest that the workaround is insufficient.
9983          */
9984         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9985         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9986         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9987
9988         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9989          * has the register indirect write enable bit set before
9990          * we try to access any of the MMIO registers.  It is also
9991          * critical that the PCI-X hw workaround situation is decided
9992          * before that as well.
9993          */
9994         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9995                               &misc_ctrl_reg);
9996
9997         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9998                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9999
10000         /* Wrong chip ID in 5752 A0. This code can be removed later
10001          * as A0 is not in production.
10002          */
10003         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10004                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10005
10006         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10007          * we need to disable memory and use config. cycles
10008          * only to access all registers. The 5702/03 chips
10009          * can mistakenly decode the special cycles from the
10010          * ICH chipsets as memory write cycles, causing corruption
10011          * of register and memory space. Only certain ICH bridges
10012          * will drive special cycles with non-zero data during the
10013          * address phase which can fall within the 5703's address
10014          * range. This is not an ICH bug as the PCI spec allows
10015          * non-zero address during special cycles. However, only
10016          * these ICH bridges are known to drive non-zero addresses
10017          * during special cycles.
10018          *
10019          * Since special cycles do not cross PCI bridges, we only
10020          * enable this workaround if the 5703 is on the secondary
10021          * bus of these ICH bridges.
10022          */
10023         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10024             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10025                 static struct tg3_dev_id {
10026                         u32     vendor;
10027                         u32     device;
10028                         u32     rev;
10029                 } ich_chipsets[] = {
10030                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10031                           PCI_ANY_ID },
10032                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10033                           PCI_ANY_ID },
10034                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10035                           0xa },
10036                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10037                           PCI_ANY_ID },
10038                         { },
10039                 };
10040                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10041                 struct pci_dev *bridge = NULL;
10042
10043                 while (pci_id->vendor != 0) {
10044                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10045                                                 bridge);
10046                         if (!bridge) {
10047                                 pci_id++;
10048                                 continue;
10049                         }
10050                         if (pci_id->rev != PCI_ANY_ID) {
10051                                 u8 rev;
10052
10053                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10054                                                      &rev);
10055                                 if (rev > pci_id->rev)
10056                                         continue;
10057                         }
10058                         if (bridge->subordinate &&
10059                             (bridge->subordinate->number ==
10060                              tp->pdev->bus->number)) {
10061
10062                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10063                                 pci_dev_put(bridge);
10064                                 break;
10065                         }
10066                 }
10067         }
10068
10069         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10070          * DMA addresses > 40-bit. This bridge may have other additional
10071          * 57xx devices behind it in some 4-port NIC designs for example.
10072          * Any tg3 device found behind the bridge will also need the 40-bit
10073          * DMA workaround.
10074          */
10075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10077                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10078                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10079                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10080         }
10081         else {
10082                 struct pci_dev *bridge = NULL;
10083
10084                 do {
10085                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10086                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10087                                                 bridge);
10088                         if (bridge && bridge->subordinate &&
10089                             (bridge->subordinate->number <=
10090                              tp->pdev->bus->number) &&
10091                             (bridge->subordinate->subordinate >=
10092                              tp->pdev->bus->number)) {
10093                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10094                                 pci_dev_put(bridge);
10095                                 break;
10096                         }
10097                 } while (bridge);
10098         }
10099
10100         /* Initialize misc host control in PCI block. */
10101         tp->misc_host_ctrl |= (misc_ctrl_reg &
10102                                MISC_HOST_CTRL_CHIPREV);
10103         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10104                                tp->misc_host_ctrl);
10105
10106         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10107                               &cacheline_sz_reg);
10108
10109         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10110         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10111         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10112         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10113
10114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10118             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10119                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10120
10121         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10122             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10123                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10124
10125         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10126                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10127                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10128                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10129                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10130                 } else
10131                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10132         }
10133
10134         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10137             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10138             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10139                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10140
10141         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10142                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10143
10144         /* If we have an AMD 762 or VIA K8T800 chipset, write
10145          * reordering to the mailbox registers done by the host
10146          * controller can cause major troubles.  We read back from
10147          * every mailbox register write to force the writes to be
10148          * posted to the chip in order.
10149          */
10150         if (pci_dev_present(write_reorder_chipsets) &&
10151             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10152                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10153
10154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10155             tp->pci_lat_timer < 64) {
10156                 tp->pci_lat_timer = 64;
10157
10158                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10159                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10160                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10161                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10162
10163                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10164                                        cacheline_sz_reg);
10165         }
10166
10167         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10168                               &pci_state_reg);
10169
10170         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10171                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10172
10173                 /* If this is a 5700 BX chipset, and we are in PCI-X
10174                  * mode, enable register write workaround.
10175                  *
10176                  * The workaround is to use indirect register accesses
10177                  * for all chip writes not to mailbox registers.
10178                  */
10179                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10180                         u32 pm_reg;
10181                         u16 pci_cmd;
10182
10183                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10184
10185                         /* The chip can have it's power management PCI config
10186                          * space registers clobbered due to this bug.
10187                          * So explicitly force the chip into D0 here.
10188                          */
10189                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10190                                               &pm_reg);
10191                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10192                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10193                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10194                                                pm_reg);
10195
10196                         /* Also, force SERR#/PERR# in PCI command. */
10197                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10198                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10199                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10200                 }
10201         }
10202
10203         /* 5700 BX chips need to have their TX producer index mailboxes
10204          * written twice to workaround a bug.
10205          */
10206         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10207                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10208
10209         /* Back to back register writes can cause problems on this chip,
10210          * the workaround is to read back all reg writes except those to
10211          * mailbox regs.  See tg3_write_indirect_reg32().
10212          *
10213          * PCI Express 5750_A0 rev chips need this workaround too.
10214          */
10215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10216             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10217              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10218                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10219
10220         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10221                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10222         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10223                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10224
10225         /* Chip-specific fixup from Broadcom driver */
10226         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10227             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10228                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10229                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10230         }
10231
10232         /* Default fast path register access methods */
10233         tp->read32 = tg3_read32;
10234         tp->write32 = tg3_write32;
10235         tp->read32_mbox = tg3_read32;
10236         tp->write32_mbox = tg3_write32;
10237         tp->write32_tx_mbox = tg3_write32;
10238         tp->write32_rx_mbox = tg3_write32;
10239
10240         /* Various workaround register access methods */
10241         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10242                 tp->write32 = tg3_write_indirect_reg32;
10243         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10244                 tp->write32 = tg3_write_flush_reg32;
10245
10246         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10247             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10248                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10249                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10250                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10251         }
10252
10253         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10254                 tp->read32 = tg3_read_indirect_reg32;
10255                 tp->write32 = tg3_write_indirect_reg32;
10256                 tp->read32_mbox = tg3_read_indirect_mbox;
10257                 tp->write32_mbox = tg3_write_indirect_mbox;
10258                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10259                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10260
10261                 iounmap(tp->regs);
10262                 tp->regs = NULL;
10263
10264                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10265                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10266                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10267         }
10268
10269         if (tp->write32 == tg3_write_indirect_reg32 ||
10270             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10271              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10272               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10273                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10274
10275         /* Get eeprom hw config before calling tg3_set_power_state().
10276          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10277          * determined before calling tg3_set_power_state() so that
10278          * we know whether or not to switch out of Vaux power.
10279          * When the flag is set, it means that GPIO1 is used for eeprom
10280          * write protect and also implies that it is a LOM where GPIOs
10281          * are not used to switch power.
10282          */ 
10283         tg3_get_eeprom_hw_cfg(tp);
10284
10285         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10286          * GPIO1 driven high will bring 5700's external PHY out of reset.
10287          * It is also used as eeprom write protect on LOMs.
10288          */
10289         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10290         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10291             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10292                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10293                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10294         /* Unused GPIO3 must be driven as output on 5752 because there
10295          * are no pull-up resistors on unused GPIO pins.
10296          */
10297         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10298                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10299
10300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10301                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10302
10303         /* Force the chip into D0. */
10304         err = tg3_set_power_state(tp, PCI_D0);
10305         if (err) {
10306                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10307                        pci_name(tp->pdev));
10308                 return err;
10309         }
10310
10311         /* 5700 B0 chips do not support checksumming correctly due
10312          * to hardware bugs.
10313          */
10314         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10315                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10316
10317         /* Derive initial jumbo mode from MTU assigned in
10318          * ether_setup() via the alloc_etherdev() call
10319          */
10320         if (tp->dev->mtu > ETH_DATA_LEN &&
10321             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10322                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10323
10324         /* Determine WakeOnLan speed to use. */
10325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10326             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10327             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10328             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10329                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10330         } else {
10331                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10332         }
10333
10334         /* A few boards don't want Ethernet@WireSpeed phy feature */
10335         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10336             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10337              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10338              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10339             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10340                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10341
10342         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10343             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10344                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10345         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10346                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10347
10348         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10349                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10350                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10351                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10352                 else
10353                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10354         }
10355
10356         tp->coalesce_mode = 0;
10357         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10359                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10360
10361         /* Initialize MAC MI mode, polling disabled. */
10362         tw32_f(MAC_MI_MODE, tp->mi_mode);
10363         udelay(80);
10364
10365         /* Initialize data/descriptor byte/word swapping. */
10366         val = tr32(GRC_MODE);
10367         val &= GRC_MODE_HOST_STACKUP;
10368         tw32(GRC_MODE, val | tp->grc_mode);
10369
10370         tg3_switch_clocks(tp);
10371
10372         /* Clear this out for sanity. */
10373         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10374
10375         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10376                               &pci_state_reg);
10377         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10378             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10379                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10380
10381                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10382                     chiprevid == CHIPREV_ID_5701_B0 ||
10383                     chiprevid == CHIPREV_ID_5701_B2 ||
10384                     chiprevid == CHIPREV_ID_5701_B5) {
10385                         void __iomem *sram_base;
10386
10387                         /* Write some dummy words into the SRAM status block
10388                          * area, see if it reads back correctly.  If the return
10389                          * value is bad, force enable the PCIX workaround.
10390                          */
10391                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10392
10393                         writel(0x00000000, sram_base);
10394                         writel(0x00000000, sram_base + 4);
10395                         writel(0xffffffff, sram_base + 4);
10396                         if (readl(sram_base) != 0x00000000)
10397                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10398                 }
10399         }
10400
10401         udelay(50);
10402         tg3_nvram_init(tp);
10403
10404         grc_misc_cfg = tr32(GRC_MISC_CFG);
10405         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10406
10407         /* Broadcom's driver says that CIOBE multisplit has a bug */
10408 #if 0
10409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10410             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10411                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10412                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10413         }
10414 #endif
10415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10416             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10417              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10418                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10419
10420         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10421             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10422                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10423         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10424                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10425                                       HOSTCC_MODE_CLRTICK_TXBD);
10426
10427                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10428                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10429                                        tp->misc_host_ctrl);
10430         }
10431
10432         /* these are limited to 10/100 only */
10433         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10434              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10435             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10436              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10437              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10438               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10439               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10440             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10441              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10442               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10443                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10444
10445         err = tg3_phy_probe(tp);
10446         if (err) {
10447                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10448                        pci_name(tp->pdev), err);
10449                 /* ... but do not return immediately ... */
10450         }
10451
10452         tg3_read_partno(tp);
10453         tg3_read_fw_ver(tp);
10454
10455         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10456                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10457         } else {
10458                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10459                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10460                 else
10461                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10462         }
10463
10464         /* 5700 {AX,BX} chips have a broken status block link
10465          * change bit implementation, so we must use the
10466          * status register in those cases.
10467          */
10468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10469                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10470         else
10471                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10472
10473         /* The led_ctrl is set during tg3_phy_probe, here we might
10474          * have to force the link status polling mechanism based
10475          * upon subsystem IDs.
10476          */
10477         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10478             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10479                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10480                                   TG3_FLAG_USE_LINKCHG_REG);
10481         }
10482
10483         /* For all SERDES we poll the MAC status register. */
10484         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10485                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10486         else
10487                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10488
10489         /* All chips before 5787 can get confused if TX buffers
10490          * straddle the 4GB address boundary in some cases.
10491          */
10492         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10493             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10494                 tp->dev->hard_start_xmit = tg3_start_xmit;
10495         else
10496                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10497
10498         tp->rx_offset = 2;
10499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10500             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10501                 tp->rx_offset = 0;
10502
10503         /* By default, disable wake-on-lan.  User can change this
10504          * using ETHTOOL_SWOL.
10505          */
10506         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10507
10508         return err;
10509 }
10510
10511 #ifdef CONFIG_SPARC64
10512 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10513 {
10514         struct net_device *dev = tp->dev;
10515         struct pci_dev *pdev = tp->pdev;
10516         struct pcidev_cookie *pcp = pdev->sysdata;
10517
10518         if (pcp != NULL) {
10519                 int node = pcp->prom_node;
10520
10521                 if (prom_getproplen(node, "local-mac-address") == 6) {
10522                         prom_getproperty(node, "local-mac-address",
10523                                          dev->dev_addr, 6);
10524                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10525                         return 0;
10526                 }
10527         }
10528         return -ENODEV;
10529 }
10530
10531 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10532 {
10533         struct net_device *dev = tp->dev;
10534
10535         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10536         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10537         return 0;
10538 }
10539 #endif
10540
10541 static int __devinit tg3_get_device_address(struct tg3 *tp)
10542 {
10543         struct net_device *dev = tp->dev;
10544         u32 hi, lo, mac_offset;
10545         int addr_ok = 0;
10546
10547 #ifdef CONFIG_SPARC64
10548         if (!tg3_get_macaddr_sparc(tp))
10549                 return 0;
10550 #endif
10551
10552         mac_offset = 0x7c;
10553         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10554             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10555                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10556                         mac_offset = 0xcc;
10557                 if (tg3_nvram_lock(tp))
10558                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10559                 else
10560                         tg3_nvram_unlock(tp);
10561         }
10562
10563         /* First try to get it from MAC address mailbox. */
10564         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10565         if ((hi >> 16) == 0x484b) {
10566                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10567                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10568
10569                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10570                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10571                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10572                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10573                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10574
10575                 /* Some old bootcode may report a 0 MAC address in SRAM */
10576                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10577         }
10578         if (!addr_ok) {
10579                 /* Next, try NVRAM. */
10580                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10581                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10582                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10583                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10584                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10585                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10586                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10587                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10588                 }
10589                 /* Finally just fetch it out of the MAC control regs. */
10590                 else {
10591                         hi = tr32(MAC_ADDR_0_HIGH);
10592                         lo = tr32(MAC_ADDR_0_LOW);
10593
10594                         dev->dev_addr[5] = lo & 0xff;
10595                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10596                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10597                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10598                         dev->dev_addr[1] = hi & 0xff;
10599                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10600                 }
10601         }
10602
10603         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10604 #ifdef CONFIG_SPARC64
10605                 if (!tg3_get_default_macaddr_sparc(tp))
10606                         return 0;
10607 #endif
10608                 return -EINVAL;
10609         }
10610         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10611         return 0;
10612 }
10613
10614 #define BOUNDARY_SINGLE_CACHELINE       1
10615 #define BOUNDARY_MULTI_CACHELINE        2
10616
10617 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10618 {
10619         int cacheline_size;
10620         u8 byte;
10621         int goal;
10622
10623         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10624         if (byte == 0)
10625                 cacheline_size = 1024;
10626         else
10627                 cacheline_size = (int) byte * 4;
10628
10629         /* On 5703 and later chips, the boundary bits have no
10630          * effect.
10631          */
10632         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10633             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10634             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10635                 goto out;
10636
10637 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10638         goal = BOUNDARY_MULTI_CACHELINE;
10639 #else
10640 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10641         goal = BOUNDARY_SINGLE_CACHELINE;
10642 #else
10643         goal = 0;
10644 #endif
10645 #endif
10646
10647         if (!goal)
10648                 goto out;
10649
10650         /* PCI controllers on most RISC systems tend to disconnect
10651          * when a device tries to burst across a cache-line boundary.
10652          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10653          *
10654          * Unfortunately, for PCI-E there are only limited
10655          * write-side controls for this, and thus for reads
10656          * we will still get the disconnects.  We'll also waste
10657          * these PCI cycles for both read and write for chips
10658          * other than 5700 and 5701 which do not implement the
10659          * boundary bits.
10660          */
10661         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10662             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10663                 switch (cacheline_size) {
10664                 case 16:
10665                 case 32:
10666                 case 64:
10667                 case 128:
10668                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10669                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10670                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10671                         } else {
10672                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10673                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10674                         }
10675                         break;
10676
10677                 case 256:
10678                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10679                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10680                         break;
10681
10682                 default:
10683                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10684                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10685                         break;
10686                 };
10687         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10688                 switch (cacheline_size) {
10689                 case 16:
10690                 case 32:
10691                 case 64:
10692                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10693                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10694                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10695                                 break;
10696                         }
10697                         /* fallthrough */
10698                 case 128:
10699                 default:
10700                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10701                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10702                         break;
10703                 };
10704         } else {
10705                 switch (cacheline_size) {
10706                 case 16:
10707                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10708                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10709                                         DMA_RWCTRL_WRITE_BNDRY_16);
10710                                 break;
10711                         }
10712                         /* fallthrough */
10713                 case 32:
10714                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10715                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10716                                         DMA_RWCTRL_WRITE_BNDRY_32);
10717                                 break;
10718                         }
10719                         /* fallthrough */
10720                 case 64:
10721                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10722                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10723                                         DMA_RWCTRL_WRITE_BNDRY_64);
10724                                 break;
10725                         }
10726                         /* fallthrough */
10727                 case 128:
10728                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10729                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10730                                         DMA_RWCTRL_WRITE_BNDRY_128);
10731                                 break;
10732                         }
10733                         /* fallthrough */
10734                 case 256:
10735                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10736                                 DMA_RWCTRL_WRITE_BNDRY_256);
10737                         break;
10738                 case 512:
10739                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10740                                 DMA_RWCTRL_WRITE_BNDRY_512);
10741                         break;
10742                 case 1024:
10743                 default:
10744                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10745                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10746                         break;
10747                 };
10748         }
10749
10750 out:
10751         return val;
10752 }
10753
10754 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10755 {
10756         struct tg3_internal_buffer_desc test_desc;
10757         u32 sram_dma_descs;
10758         int i, ret;
10759
10760         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10761
10762         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10763         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10764         tw32(RDMAC_STATUS, 0);
10765         tw32(WDMAC_STATUS, 0);
10766
10767         tw32(BUFMGR_MODE, 0);
10768         tw32(FTQ_RESET, 0);
10769
10770         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10771         test_desc.addr_lo = buf_dma & 0xffffffff;
10772         test_desc.nic_mbuf = 0x00002100;
10773         test_desc.len = size;
10774
10775         /*
10776          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10777          * the *second* time the tg3 driver was getting loaded after an
10778          * initial scan.
10779          *
10780          * Broadcom tells me:
10781          *   ...the DMA engine is connected to the GRC block and a DMA
10782          *   reset may affect the GRC block in some unpredictable way...
10783          *   The behavior of resets to individual blocks has not been tested.
10784          *
10785          * Broadcom noted the GRC reset will also reset all sub-components.
10786          */
10787         if (to_device) {
10788                 test_desc.cqid_sqid = (13 << 8) | 2;
10789
10790                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10791                 udelay(40);
10792         } else {
10793                 test_desc.cqid_sqid = (16 << 8) | 7;
10794
10795                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10796                 udelay(40);
10797         }
10798         test_desc.flags = 0x00000005;
10799
10800         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10801                 u32 val;
10802
10803                 val = *(((u32 *)&test_desc) + i);
10804                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10805                                        sram_dma_descs + (i * sizeof(u32)));
10806                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10807         }
10808         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10809
10810         if (to_device) {
10811                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10812         } else {
10813                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10814         }
10815
10816         ret = -ENODEV;
10817         for (i = 0; i < 40; i++) {
10818                 u32 val;
10819
10820                 if (to_device)
10821                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10822                 else
10823                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10824                 if ((val & 0xffff) == sram_dma_descs) {
10825                         ret = 0;
10826                         break;
10827                 }
10828
10829                 udelay(100);
10830         }
10831
10832         return ret;
10833 }
10834
10835 #define TEST_BUFFER_SIZE        0x2000
10836
10837 static int __devinit tg3_test_dma(struct tg3 *tp)
10838 {
10839         dma_addr_t buf_dma;
10840         u32 *buf, saved_dma_rwctrl;
10841         int ret;
10842
10843         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10844         if (!buf) {
10845                 ret = -ENOMEM;
10846                 goto out_nofree;
10847         }
10848
10849         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10850                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10851
10852         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10853
10854         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10855                 /* DMA read watermark not used on PCIE */
10856                 tp->dma_rwctrl |= 0x00180000;
10857         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10858                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10859                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10860                         tp->dma_rwctrl |= 0x003f0000;
10861                 else
10862                         tp->dma_rwctrl |= 0x003f000f;
10863         } else {
10864                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10865                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10866                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10867
10868                         /* If the 5704 is behind the EPB bridge, we can
10869                          * do the less restrictive ONE_DMA workaround for
10870                          * better performance.
10871                          */
10872                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10873                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10874                                 tp->dma_rwctrl |= 0x8000;
10875                         else if (ccval == 0x6 || ccval == 0x7)
10876                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10877
10878                         /* Set bit 23 to enable PCIX hw bug fix */
10879                         tp->dma_rwctrl |= 0x009f0000;
10880                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10881                         /* 5780 always in PCIX mode */
10882                         tp->dma_rwctrl |= 0x00144000;
10883                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10884                         /* 5714 always in PCIX mode */
10885                         tp->dma_rwctrl |= 0x00148000;
10886                 } else {
10887                         tp->dma_rwctrl |= 0x001b000f;
10888                 }
10889         }
10890
10891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10893                 tp->dma_rwctrl &= 0xfffffff0;
10894
10895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10897                 /* Remove this if it causes problems for some boards. */
10898                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10899
10900                 /* On 5700/5701 chips, we need to set this bit.
10901                  * Otherwise the chip will issue cacheline transactions
10902                  * to streamable DMA memory with not all the byte
10903                  * enables turned on.  This is an error on several
10904                  * RISC PCI controllers, in particular sparc64.
10905                  *
10906                  * On 5703/5704 chips, this bit has been reassigned
10907                  * a different meaning.  In particular, it is used
10908                  * on those chips to enable a PCI-X workaround.
10909                  */
10910                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10911         }
10912
10913         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10914
10915 #if 0
10916         /* Unneeded, already done by tg3_get_invariants.  */
10917         tg3_switch_clocks(tp);
10918 #endif
10919
10920         ret = 0;
10921         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10922             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10923                 goto out;
10924
10925         /* It is best to perform DMA test with maximum write burst size
10926          * to expose the 5700/5701 write DMA bug.
10927          */
10928         saved_dma_rwctrl = tp->dma_rwctrl;
10929         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10930         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10931
10932         while (1) {
10933                 u32 *p = buf, i;
10934
10935                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10936                         p[i] = i;
10937
10938                 /* Send the buffer to the chip. */
10939                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10940                 if (ret) {
10941                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10942                         break;
10943                 }
10944
10945 #if 0
10946                 /* validate data reached card RAM correctly. */
10947                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10948                         u32 val;
10949                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10950                         if (le32_to_cpu(val) != p[i]) {
10951                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10952                                 /* ret = -ENODEV here? */
10953                         }
10954                         p[i] = 0;
10955                 }
10956 #endif
10957                 /* Now read it back. */
10958                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10959                 if (ret) {
10960                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10961
10962                         break;
10963                 }
10964
10965                 /* Verify it. */
10966                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10967                         if (p[i] == i)
10968                                 continue;
10969
10970                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10971                             DMA_RWCTRL_WRITE_BNDRY_16) {
10972                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10973                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10974                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10975                                 break;
10976                         } else {
10977                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10978                                 ret = -ENODEV;
10979                                 goto out;
10980                         }
10981                 }
10982
10983                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10984                         /* Success. */
10985                         ret = 0;
10986                         break;
10987                 }
10988         }
10989         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10990             DMA_RWCTRL_WRITE_BNDRY_16) {
10991                 static struct pci_device_id dma_wait_state_chipsets[] = {
10992                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10993                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10994                         { },
10995                 };
10996
10997                 /* DMA test passed without adjusting DMA boundary,
10998                  * now look for chipsets that are known to expose the
10999                  * DMA bug without failing the test.
11000                  */
11001                 if (pci_dev_present(dma_wait_state_chipsets)) {
11002                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11003                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11004                 }
11005                 else
11006                         /* Safe to use the calculated DMA boundary. */
11007                         tp->dma_rwctrl = saved_dma_rwctrl;
11008
11009                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11010         }
11011
11012 out:
11013         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11014 out_nofree:
11015         return ret;
11016 }
11017
11018 static void __devinit tg3_init_link_config(struct tg3 *tp)
11019 {
11020         tp->link_config.advertising =
11021                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11022                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11023                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11024                  ADVERTISED_Autoneg | ADVERTISED_MII);
11025         tp->link_config.speed = SPEED_INVALID;
11026         tp->link_config.duplex = DUPLEX_INVALID;
11027         tp->link_config.autoneg = AUTONEG_ENABLE;
11028         tp->link_config.active_speed = SPEED_INVALID;
11029         tp->link_config.active_duplex = DUPLEX_INVALID;
11030         tp->link_config.phy_is_low_power = 0;
11031         tp->link_config.orig_speed = SPEED_INVALID;
11032         tp->link_config.orig_duplex = DUPLEX_INVALID;
11033         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11034 }
11035
11036 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11037 {
11038         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11039                 tp->bufmgr_config.mbuf_read_dma_low_water =
11040                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11041                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11042                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11043                 tp->bufmgr_config.mbuf_high_water =
11044                         DEFAULT_MB_HIGH_WATER_5705;
11045
11046                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11047                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11048                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11049                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11050                 tp->bufmgr_config.mbuf_high_water_jumbo =
11051                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11052         } else {
11053                 tp->bufmgr_config.mbuf_read_dma_low_water =
11054                         DEFAULT_MB_RDMA_LOW_WATER;
11055                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11056                         DEFAULT_MB_MACRX_LOW_WATER;
11057                 tp->bufmgr_config.mbuf_high_water =
11058                         DEFAULT_MB_HIGH_WATER;
11059
11060                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11061                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11062                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11063                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11064                 tp->bufmgr_config.mbuf_high_water_jumbo =
11065                         DEFAULT_MB_HIGH_WATER_JUMBO;
11066         }
11067
11068         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11069         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11070 }
11071
11072 static char * __devinit tg3_phy_string(struct tg3 *tp)
11073 {
11074         switch (tp->phy_id & PHY_ID_MASK) {
11075         case PHY_ID_BCM5400:    return "5400";
11076         case PHY_ID_BCM5401:    return "5401";
11077         case PHY_ID_BCM5411:    return "5411";
11078         case PHY_ID_BCM5701:    return "5701";
11079         case PHY_ID_BCM5703:    return "5703";
11080         case PHY_ID_BCM5704:    return "5704";
11081         case PHY_ID_BCM5705:    return "5705";
11082         case PHY_ID_BCM5750:    return "5750";
11083         case PHY_ID_BCM5752:    return "5752";
11084         case PHY_ID_BCM5714:    return "5714";
11085         case PHY_ID_BCM5780:    return "5780";
11086         case PHY_ID_BCM5755:    return "5755";
11087         case PHY_ID_BCM5787:    return "5787";
11088         case PHY_ID_BCM8002:    return "8002/serdes";
11089         case 0:                 return "serdes";
11090         default:                return "unknown";
11091         };
11092 }
11093
11094 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11095 {
11096         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11097                 strcpy(str, "PCI Express");
11098                 return str;
11099         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11100                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11101
11102                 strcpy(str, "PCIX:");
11103
11104                 if ((clock_ctrl == 7) ||
11105                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11106                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11107                         strcat(str, "133MHz");
11108                 else if (clock_ctrl == 0)
11109                         strcat(str, "33MHz");
11110                 else if (clock_ctrl == 2)
11111                         strcat(str, "50MHz");
11112                 else if (clock_ctrl == 4)
11113                         strcat(str, "66MHz");
11114                 else if (clock_ctrl == 6)
11115                         strcat(str, "100MHz");
11116         } else {
11117                 strcpy(str, "PCI:");
11118                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11119                         strcat(str, "66MHz");
11120                 else
11121                         strcat(str, "33MHz");
11122         }
11123         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11124                 strcat(str, ":32-bit");
11125         else
11126                 strcat(str, ":64-bit");
11127         return str;
11128 }
11129
11130 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11131 {
11132         struct pci_dev *peer;
11133         unsigned int func, devnr = tp->pdev->devfn & ~7;
11134
11135         for (func = 0; func < 8; func++) {
11136                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11137                 if (peer && peer != tp->pdev)
11138                         break;
11139                 pci_dev_put(peer);
11140         }
11141         /* 5704 can be configured in single-port mode, set peer to
11142          * tp->pdev in that case.
11143          */
11144         if (!peer) {
11145                 peer = tp->pdev;
11146                 return peer;
11147         }
11148
11149         /*
11150          * We don't need to keep the refcount elevated; there's no way
11151          * to remove one half of this device without removing the other
11152          */
11153         pci_dev_put(peer);
11154
11155         return peer;
11156 }
11157
11158 static void __devinit tg3_init_coal(struct tg3 *tp)
11159 {
11160         struct ethtool_coalesce *ec = &tp->coal;
11161
11162         memset(ec, 0, sizeof(*ec));
11163         ec->cmd = ETHTOOL_GCOALESCE;
11164         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11165         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11166         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11167         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11168         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11169         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11170         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11171         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11172         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11173
11174         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11175                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11176                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11177                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11178                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11179                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11180         }
11181
11182         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11183                 ec->rx_coalesce_usecs_irq = 0;
11184                 ec->tx_coalesce_usecs_irq = 0;
11185                 ec->stats_block_coalesce_usecs = 0;
11186         }
11187 }
11188
11189 static int __devinit tg3_init_one(struct pci_dev *pdev,
11190                                   const struct pci_device_id *ent)
11191 {
11192         static int tg3_version_printed = 0;
11193         unsigned long tg3reg_base, tg3reg_len;
11194         struct net_device *dev;
11195         struct tg3 *tp;
11196         int i, err, pm_cap;
11197         char str[40];
11198         u64 dma_mask, persist_dma_mask;
11199
11200         if (tg3_version_printed++ == 0)
11201                 printk(KERN_INFO "%s", version);
11202
11203         err = pci_enable_device(pdev);
11204         if (err) {
11205                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11206                        "aborting.\n");
11207                 return err;
11208         }
11209
11210         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11211                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11212                        "base address, aborting.\n");
11213                 err = -ENODEV;
11214                 goto err_out_disable_pdev;
11215         }
11216
11217         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11218         if (err) {
11219                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11220                        "aborting.\n");
11221                 goto err_out_disable_pdev;
11222         }
11223
11224         pci_set_master(pdev);
11225
11226         /* Find power-management capability. */
11227         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11228         if (pm_cap == 0) {
11229                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11230                        "aborting.\n");
11231                 err = -EIO;
11232                 goto err_out_free_res;
11233         }
11234
11235         tg3reg_base = pci_resource_start(pdev, 0);
11236         tg3reg_len = pci_resource_len(pdev, 0);
11237
11238         dev = alloc_etherdev(sizeof(*tp));
11239         if (!dev) {
11240                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11241                 err = -ENOMEM;
11242                 goto err_out_free_res;
11243         }
11244
11245         SET_MODULE_OWNER(dev);
11246         SET_NETDEV_DEV(dev, &pdev->dev);
11247
11248         dev->features |= NETIF_F_LLTX;
11249 #if TG3_VLAN_TAG_USED
11250         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11251         dev->vlan_rx_register = tg3_vlan_rx_register;
11252         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11253 #endif
11254
11255         tp = netdev_priv(dev);
11256         tp->pdev = pdev;
11257         tp->dev = dev;
11258         tp->pm_cap = pm_cap;
11259         tp->mac_mode = TG3_DEF_MAC_MODE;
11260         tp->rx_mode = TG3_DEF_RX_MODE;
11261         tp->tx_mode = TG3_DEF_TX_MODE;
11262         tp->mi_mode = MAC_MI_MODE_BASE;
11263         if (tg3_debug > 0)
11264                 tp->msg_enable = tg3_debug;
11265         else
11266                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11267
11268         /* The word/byte swap controls here control register access byte
11269          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11270          * setting below.
11271          */
11272         tp->misc_host_ctrl =
11273                 MISC_HOST_CTRL_MASK_PCI_INT |
11274                 MISC_HOST_CTRL_WORD_SWAP |
11275                 MISC_HOST_CTRL_INDIR_ACCESS |
11276                 MISC_HOST_CTRL_PCISTATE_RW;
11277
11278         /* The NONFRM (non-frame) byte/word swap controls take effect
11279          * on descriptor entries, anything which isn't packet data.
11280          *
11281          * The StrongARM chips on the board (one for tx, one for rx)
11282          * are running in big-endian mode.
11283          */
11284         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11285                         GRC_MODE_WSWAP_NONFRM_DATA);
11286 #ifdef __BIG_ENDIAN
11287         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11288 #endif
11289         spin_lock_init(&tp->lock);
11290         spin_lock_init(&tp->tx_lock);
11291         spin_lock_init(&tp->indirect_lock);
11292         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11293
11294         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11295         if (tp->regs == 0UL) {
11296                 printk(KERN_ERR PFX "Cannot map device registers, "
11297                        "aborting.\n");
11298                 err = -ENOMEM;
11299                 goto err_out_free_dev;
11300         }
11301
11302         tg3_init_link_config(tp);
11303
11304         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11305         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11306         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11307
11308         dev->open = tg3_open;
11309         dev->stop = tg3_close;
11310         dev->get_stats = tg3_get_stats;
11311         dev->set_multicast_list = tg3_set_rx_mode;
11312         dev->set_mac_address = tg3_set_mac_addr;
11313         dev->do_ioctl = tg3_ioctl;
11314         dev->tx_timeout = tg3_tx_timeout;
11315         dev->poll = tg3_poll;
11316         dev->ethtool_ops = &tg3_ethtool_ops;
11317         dev->weight = 64;
11318         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11319         dev->change_mtu = tg3_change_mtu;
11320         dev->irq = pdev->irq;
11321 #ifdef CONFIG_NET_POLL_CONTROLLER
11322         dev->poll_controller = tg3_poll_controller;
11323 #endif
11324
11325         err = tg3_get_invariants(tp);
11326         if (err) {
11327                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11328                        "aborting.\n");
11329                 goto err_out_iounmap;
11330         }
11331
11332         /* The EPB bridge inside 5714, 5715, and 5780 and any
11333          * device behind the EPB cannot support DMA addresses > 40-bit.
11334          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11335          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11336          * do DMA address check in tg3_start_xmit().
11337          */
11338         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11339                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11340         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11341                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11342 #ifdef CONFIG_HIGHMEM
11343                 dma_mask = DMA_64BIT_MASK;
11344 #endif
11345         } else
11346                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11347
11348         /* Configure DMA attributes. */
11349         if (dma_mask > DMA_32BIT_MASK) {
11350                 err = pci_set_dma_mask(pdev, dma_mask);
11351                 if (!err) {
11352                         dev->features |= NETIF_F_HIGHDMA;
11353                         err = pci_set_consistent_dma_mask(pdev,
11354                                                           persist_dma_mask);
11355                         if (err < 0) {
11356                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11357                                        "DMA for consistent allocations\n");
11358                                 goto err_out_iounmap;
11359                         }
11360                 }
11361         }
11362         if (err || dma_mask == DMA_32BIT_MASK) {
11363                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11364                 if (err) {
11365                         printk(KERN_ERR PFX "No usable DMA configuration, "
11366                                "aborting.\n");
11367                         goto err_out_iounmap;
11368                 }
11369         }
11370
11371         tg3_init_bufmgr_config(tp);
11372
11373 #if TG3_TSO_SUPPORT != 0
11374         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11375                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11376         }
11377         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11379             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11380             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11381                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11382         } else {
11383                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11384         }
11385
11386         /* TSO is on by default on chips that support hardware TSO.
11387          * Firmware TSO on older chips gives lower performance, so it
11388          * is off by default, but can be enabled using ethtool.
11389          */
11390         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11391                 dev->features |= NETIF_F_TSO;
11392
11393 #endif
11394
11395         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11396             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11397             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11398                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11399                 tp->rx_pending = 63;
11400         }
11401
11402         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11404                 tp->pdev_peer = tg3_find_peer(tp);
11405
11406         err = tg3_get_device_address(tp);
11407         if (err) {
11408                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11409                        "aborting.\n");
11410                 goto err_out_iounmap;
11411         }
11412
11413         /*
11414          * Reset chip in case UNDI or EFI driver did not shutdown
11415          * DMA self test will enable WDMAC and we'll see (spurious)
11416          * pending DMA on the PCI bus at that point.
11417          */
11418         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11419             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11420                 pci_save_state(tp->pdev);
11421                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11422                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11423         }
11424
11425         err = tg3_test_dma(tp);
11426         if (err) {
11427                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11428                 goto err_out_iounmap;
11429         }
11430
11431         /* Tigon3 can do ipv4 only... and some chips have buggy
11432          * checksumming.
11433          */
11434         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11436                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11437                         dev->features |= NETIF_F_HW_CSUM;
11438                 else
11439                         dev->features |= NETIF_F_IP_CSUM;
11440                 dev->features |= NETIF_F_SG;
11441                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11442         } else
11443                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11444
11445         /* flow control autonegotiation is default behavior */
11446         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11447
11448         tg3_init_coal(tp);
11449
11450         /* Now that we have fully setup the chip, save away a snapshot
11451          * of the PCI config space.  We need to restore this after
11452          * GRC_MISC_CFG core clock resets and some resume events.
11453          */
11454         pci_save_state(tp->pdev);
11455
11456         err = register_netdev(dev);
11457         if (err) {
11458                 printk(KERN_ERR PFX "Cannot register net device, "
11459                        "aborting.\n");
11460                 goto err_out_iounmap;
11461         }
11462
11463         pci_set_drvdata(pdev, dev);
11464
11465         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11466                dev->name,
11467                tp->board_part_number,
11468                tp->pci_chip_rev_id,
11469                tg3_phy_string(tp),
11470                tg3_bus_string(tp, str),
11471                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11472
11473         for (i = 0; i < 6; i++)
11474                 printk("%2.2x%c", dev->dev_addr[i],
11475                        i == 5 ? '\n' : ':');
11476
11477         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11478                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11479                "TSOcap[%d] \n",
11480                dev->name,
11481                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11482                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11483                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11484                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11485                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11486                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11487                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11488         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11489                dev->name, tp->dma_rwctrl,
11490                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11491                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11492
11493         netif_carrier_off(tp->dev);
11494
11495         return 0;
11496
11497 err_out_iounmap:
11498         if (tp->regs) {
11499                 iounmap(tp->regs);
11500                 tp->regs = NULL;
11501         }
11502
11503 err_out_free_dev:
11504         free_netdev(dev);
11505
11506 err_out_free_res:
11507         pci_release_regions(pdev);
11508
11509 err_out_disable_pdev:
11510         pci_disable_device(pdev);
11511         pci_set_drvdata(pdev, NULL);
11512         return err;
11513 }
11514
11515 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11516 {
11517         struct net_device *dev = pci_get_drvdata(pdev);
11518
11519         if (dev) {
11520                 struct tg3 *tp = netdev_priv(dev);
11521
11522                 flush_scheduled_work();
11523                 unregister_netdev(dev);
11524                 if (tp->regs) {
11525                         iounmap(tp->regs);
11526                         tp->regs = NULL;
11527                 }
11528                 free_netdev(dev);
11529                 pci_release_regions(pdev);
11530                 pci_disable_device(pdev);
11531                 pci_set_drvdata(pdev, NULL);
11532         }
11533 }
11534
11535 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11536 {
11537         struct net_device *dev = pci_get_drvdata(pdev);
11538         struct tg3 *tp = netdev_priv(dev);
11539         int err;
11540
11541         if (!netif_running(dev))
11542                 return 0;
11543
11544         flush_scheduled_work();
11545         tg3_netif_stop(tp);
11546
11547         del_timer_sync(&tp->timer);
11548
11549         tg3_full_lock(tp, 1);
11550         tg3_disable_ints(tp);
11551         tg3_full_unlock(tp);
11552
11553         netif_device_detach(dev);
11554
11555         tg3_full_lock(tp, 0);
11556         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11557         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11558         tg3_full_unlock(tp);
11559
11560         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11561         if (err) {
11562                 tg3_full_lock(tp, 0);
11563
11564                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11565                 tg3_init_hw(tp, 1);
11566
11567                 tp->timer.expires = jiffies + tp->timer_offset;
11568                 add_timer(&tp->timer);
11569
11570                 netif_device_attach(dev);
11571                 tg3_netif_start(tp);
11572
11573                 tg3_full_unlock(tp);
11574         }
11575
11576         return err;
11577 }
11578
11579 static int tg3_resume(struct pci_dev *pdev)
11580 {
11581         struct net_device *dev = pci_get_drvdata(pdev);
11582         struct tg3 *tp = netdev_priv(dev);
11583         int err;
11584
11585         if (!netif_running(dev))
11586                 return 0;
11587
11588         pci_restore_state(tp->pdev);
11589
11590         err = tg3_set_power_state(tp, PCI_D0);
11591         if (err)
11592                 return err;
11593
11594         netif_device_attach(dev);
11595
11596         tg3_full_lock(tp, 0);
11597
11598         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11599         tg3_init_hw(tp, 1);
11600
11601         tp->timer.expires = jiffies + tp->timer_offset;
11602         add_timer(&tp->timer);
11603
11604         tg3_netif_start(tp);
11605
11606         tg3_full_unlock(tp);
11607
11608         return 0;
11609 }
11610
11611 static struct pci_driver tg3_driver = {
11612         .name           = DRV_MODULE_NAME,
11613         .id_table       = tg3_pci_tbl,
11614         .probe          = tg3_init_one,
11615         .remove         = __devexit_p(tg3_remove_one),
11616         .suspend        = tg3_suspend,
11617         .resume         = tg3_resume
11618 };
11619
11620 static int __init tg3_init(void)
11621 {
11622         return pci_module_init(&tg3_driver);
11623 }
11624
11625 static void __exit tg3_cleanup(void)
11626 {
11627         pci_unregister_driver(&tg3_driver);
11628 }
11629
11630 module_init(tg3_init);
11631 module_exit(tg3_cleanup);