]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: use TG3_FLG2_57{05,50}_PLUS flags in tg3_get_invariants
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1009                 udelay(100);
1010
1011                 return 0;
1012
1013         case 1:
1014                 power_control |= 1;
1015                 break;
1016
1017         case 2:
1018                 power_control |= 2;
1019                 break;
1020
1021         case 3:
1022                 power_control |= 3;
1023                 break;
1024
1025         default:
1026                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1027                        "requested.\n",
1028                        tp->dev->name, state);
1029                 return -EINVAL;
1030         };
1031
1032         power_control |= PCI_PM_CTRL_PME_ENABLE;
1033
1034         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1035         tw32(TG3PCI_MISC_HOST_CTRL,
1036              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1037
1038         if (tp->link_config.phy_is_low_power == 0) {
1039                 tp->link_config.phy_is_low_power = 1;
1040                 tp->link_config.orig_speed = tp->link_config.speed;
1041                 tp->link_config.orig_duplex = tp->link_config.duplex;
1042                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1043         }
1044
1045         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1046                 tp->link_config.speed = SPEED_10;
1047                 tp->link_config.duplex = DUPLEX_HALF;
1048                 tp->link_config.autoneg = AUTONEG_ENABLE;
1049                 tg3_setup_phy(tp, 0);
1050         }
1051
1052         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1053
1054         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1055                 u32 mac_mode;
1056
1057                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1058                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1059                         udelay(40);
1060
1061                         mac_mode = MAC_MODE_PORT_MODE_MII;
1062
1063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1064                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1065                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1066                 } else {
1067                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1068                 }
1069
1070                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1071                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1072
1073                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1074                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1075                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1076
1077                 tw32_f(MAC_MODE, mac_mode);
1078                 udelay(100);
1079
1080                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1081                 udelay(10);
1082         }
1083
1084         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1085             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1086              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1087                 u32 base_val;
1088
1089                 base_val = tp->pci_clock_ctrl;
1090                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1091                              CLOCK_CTRL_TXCLK_DISABLE);
1092
1093                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1094                      CLOCK_CTRL_ALTCLK |
1095                      CLOCK_CTRL_PWRDOWN_PLL133);
1096                 udelay(40);
1097         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1098                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1099                 u32 newbits1, newbits2;
1100
1101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1102                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1103                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1104                                     CLOCK_CTRL_TXCLK_DISABLE |
1105                                     CLOCK_CTRL_ALTCLK);
1106                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1107                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1108                         newbits1 = CLOCK_CTRL_625_CORE;
1109                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1110                 } else {
1111                         newbits1 = CLOCK_CTRL_ALTCLK;
1112                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1113                 }
1114
1115                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1116                 udelay(40);
1117
1118                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1119                 udelay(40);
1120
1121                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1122                         u32 newbits3;
1123
1124                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1125                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1126                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1127                                             CLOCK_CTRL_TXCLK_DISABLE |
1128                                             CLOCK_CTRL_44MHZ_CORE);
1129                         } else {
1130                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1131                         }
1132
1133                         tw32_f(TG3PCI_CLOCK_CTRL,
1134                                          tp->pci_clock_ctrl | newbits3);
1135                         udelay(40);
1136                 }
1137         }
1138
1139         tg3_frob_aux_power(tp);
1140
1141         /* Workaround for unstable PLL clock */
1142         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1143             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1144                 u32 val = tr32(0x7d00);
1145
1146                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1147                 tw32(0x7d00, val);
1148                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1149                         tg3_halt_cpu(tp, RX_CPU_BASE);
1150         }
1151
1152         /* Finally, set the new power state. */
1153         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1154
1155         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1156
1157         return 0;
1158 }
1159
1160 static void tg3_link_report(struct tg3 *tp)
1161 {
1162         if (!netif_carrier_ok(tp->dev)) {
1163                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1164         } else {
1165                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1166                        tp->dev->name,
1167                        (tp->link_config.active_speed == SPEED_1000 ?
1168                         1000 :
1169                         (tp->link_config.active_speed == SPEED_100 ?
1170                          100 : 10)),
1171                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1172                         "full" : "half"));
1173
1174                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1175                        "%s for RX.\n",
1176                        tp->dev->name,
1177                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1178                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1179         }
1180 }
1181
1182 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1183 {
1184         u32 new_tg3_flags = 0;
1185         u32 old_rx_mode = tp->rx_mode;
1186         u32 old_tx_mode = tp->tx_mode;
1187
1188         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1189                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1190                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1191                                 if (remote_adv & LPA_PAUSE_CAP)
1192                                         new_tg3_flags |=
1193                                                 (TG3_FLAG_RX_PAUSE |
1194                                                 TG3_FLAG_TX_PAUSE);
1195                                 else if (remote_adv & LPA_PAUSE_ASYM)
1196                                         new_tg3_flags |=
1197                                                 (TG3_FLAG_RX_PAUSE);
1198                         } else {
1199                                 if (remote_adv & LPA_PAUSE_CAP)
1200                                         new_tg3_flags |=
1201                                                 (TG3_FLAG_RX_PAUSE |
1202                                                 TG3_FLAG_TX_PAUSE);
1203                         }
1204                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1205                         if ((remote_adv & LPA_PAUSE_CAP) &&
1206                         (remote_adv & LPA_PAUSE_ASYM))
1207                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1208                 }
1209
1210                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1211                 tp->tg3_flags |= new_tg3_flags;
1212         } else {
1213                 new_tg3_flags = tp->tg3_flags;
1214         }
1215
1216         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1217                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1218         else
1219                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1220
1221         if (old_rx_mode != tp->rx_mode) {
1222                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1223         }
1224         
1225         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1226                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1227         else
1228                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1229
1230         if (old_tx_mode != tp->tx_mode) {
1231                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1232         }
1233 }
1234
1235 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1236 {
1237         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1238         case MII_TG3_AUX_STAT_10HALF:
1239                 *speed = SPEED_10;
1240                 *duplex = DUPLEX_HALF;
1241                 break;
1242
1243         case MII_TG3_AUX_STAT_10FULL:
1244                 *speed = SPEED_10;
1245                 *duplex = DUPLEX_FULL;
1246                 break;
1247
1248         case MII_TG3_AUX_STAT_100HALF:
1249                 *speed = SPEED_100;
1250                 *duplex = DUPLEX_HALF;
1251                 break;
1252
1253         case MII_TG3_AUX_STAT_100FULL:
1254                 *speed = SPEED_100;
1255                 *duplex = DUPLEX_FULL;
1256                 break;
1257
1258         case MII_TG3_AUX_STAT_1000HALF:
1259                 *speed = SPEED_1000;
1260                 *duplex = DUPLEX_HALF;
1261                 break;
1262
1263         case MII_TG3_AUX_STAT_1000FULL:
1264                 *speed = SPEED_1000;
1265                 *duplex = DUPLEX_FULL;
1266                 break;
1267
1268         default:
1269                 *speed = SPEED_INVALID;
1270                 *duplex = DUPLEX_INVALID;
1271                 break;
1272         };
1273 }
1274
1275 static void tg3_phy_copper_begin(struct tg3 *tp)
1276 {
1277         u32 new_adv;
1278         int i;
1279
1280         if (tp->link_config.phy_is_low_power) {
1281                 /* Entering low power mode.  Disable gigabit and
1282                  * 100baseT advertisements.
1283                  */
1284                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1285
1286                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1287                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1288                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1289                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1290
1291                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1292         } else if (tp->link_config.speed == SPEED_INVALID) {
1293                 tp->link_config.advertising =
1294                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1295                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1296                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1297                          ADVERTISED_Autoneg | ADVERTISED_MII);
1298
1299                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1300                         tp->link_config.advertising &=
1301                                 ~(ADVERTISED_1000baseT_Half |
1302                                   ADVERTISED_1000baseT_Full);
1303
1304                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1305                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1306                         new_adv |= ADVERTISE_10HALF;
1307                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1308                         new_adv |= ADVERTISE_10FULL;
1309                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1310                         new_adv |= ADVERTISE_100HALF;
1311                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1312                         new_adv |= ADVERTISE_100FULL;
1313                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1314
1315                 if (tp->link_config.advertising &
1316                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1317                         new_adv = 0;
1318                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1319                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1320                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1321                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1322                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1323                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1324                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1325                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1326                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1327                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1328                 } else {
1329                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1330                 }
1331         } else {
1332                 /* Asking for a specific link mode. */
1333                 if (tp->link_config.speed == SPEED_1000) {
1334                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1335                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1336
1337                         if (tp->link_config.duplex == DUPLEX_FULL)
1338                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1339                         else
1340                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1341                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1342                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1343                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1344                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1345                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1346                 } else {
1347                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1348
1349                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1350                         if (tp->link_config.speed == SPEED_100) {
1351                                 if (tp->link_config.duplex == DUPLEX_FULL)
1352                                         new_adv |= ADVERTISE_100FULL;
1353                                 else
1354                                         new_adv |= ADVERTISE_100HALF;
1355                         } else {
1356                                 if (tp->link_config.duplex == DUPLEX_FULL)
1357                                         new_adv |= ADVERTISE_10FULL;
1358                                 else
1359                                         new_adv |= ADVERTISE_10HALF;
1360                         }
1361                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1362                 }
1363         }
1364
1365         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1366             tp->link_config.speed != SPEED_INVALID) {
1367                 u32 bmcr, orig_bmcr;
1368
1369                 tp->link_config.active_speed = tp->link_config.speed;
1370                 tp->link_config.active_duplex = tp->link_config.duplex;
1371
1372                 bmcr = 0;
1373                 switch (tp->link_config.speed) {
1374                 default:
1375                 case SPEED_10:
1376                         break;
1377
1378                 case SPEED_100:
1379                         bmcr |= BMCR_SPEED100;
1380                         break;
1381
1382                 case SPEED_1000:
1383                         bmcr |= TG3_BMCR_SPEED1000;
1384                         break;
1385                 };
1386
1387                 if (tp->link_config.duplex == DUPLEX_FULL)
1388                         bmcr |= BMCR_FULLDPLX;
1389
1390                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1391                     (bmcr != orig_bmcr)) {
1392                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1393                         for (i = 0; i < 1500; i++) {
1394                                 u32 tmp;
1395
1396                                 udelay(10);
1397                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1398                                     tg3_readphy(tp, MII_BMSR, &tmp))
1399                                         continue;
1400                                 if (!(tmp & BMSR_LSTATUS)) {
1401                                         udelay(40);
1402                                         break;
1403                                 }
1404                         }
1405                         tg3_writephy(tp, MII_BMCR, bmcr);
1406                         udelay(40);
1407                 }
1408         } else {
1409                 tg3_writephy(tp, MII_BMCR,
1410                              BMCR_ANENABLE | BMCR_ANRESTART);
1411         }
1412 }
1413
1414 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1415 {
1416         int err;
1417
1418         /* Turn off tap power management. */
1419         /* Set Extended packet length bit */
1420         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1421
1422         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1423         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1424
1425         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1426         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1436
1437         udelay(40);
1438
1439         return err;
1440 }
1441
1442 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1443 {
1444         u32 adv_reg, all_mask;
1445
1446         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1447                 return 0;
1448
1449         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1450                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1451         if ((adv_reg & all_mask) != all_mask)
1452                 return 0;
1453         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1454                 u32 tg3_ctrl;
1455
1456                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1457                         return 0;
1458
1459                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1460                             MII_TG3_CTRL_ADV_1000_FULL);
1461                 if ((tg3_ctrl & all_mask) != all_mask)
1462                         return 0;
1463         }
1464         return 1;
1465 }
1466
1467 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1468 {
1469         int current_link_up;
1470         u32 bmsr, dummy;
1471         u16 current_speed;
1472         u8 current_duplex;
1473         int i, err;
1474
1475         tw32(MAC_EVENT, 0);
1476
1477         tw32_f(MAC_STATUS,
1478              (MAC_STATUS_SYNC_CHANGED |
1479               MAC_STATUS_CFG_CHANGED |
1480               MAC_STATUS_MI_COMPLETION |
1481               MAC_STATUS_LNKSTATE_CHANGED));
1482         udelay(40);
1483
1484         tp->mi_mode = MAC_MI_MODE_BASE;
1485         tw32_f(MAC_MI_MODE, tp->mi_mode);
1486         udelay(80);
1487
1488         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1489
1490         /* Some third-party PHYs need to be reset on link going
1491          * down.
1492          */
1493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1494              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1495              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1496             netif_carrier_ok(tp->dev)) {
1497                 tg3_readphy(tp, MII_BMSR, &bmsr);
1498                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1499                     !(bmsr & BMSR_LSTATUS))
1500                         force_reset = 1;
1501         }
1502         if (force_reset)
1503                 tg3_phy_reset(tp);
1504
1505         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1506                 tg3_readphy(tp, MII_BMSR, &bmsr);
1507                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1508                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1509                         bmsr = 0;
1510
1511                 if (!(bmsr & BMSR_LSTATUS)) {
1512                         err = tg3_init_5401phy_dsp(tp);
1513                         if (err)
1514                                 return err;
1515
1516                         tg3_readphy(tp, MII_BMSR, &bmsr);
1517                         for (i = 0; i < 1000; i++) {
1518                                 udelay(10);
1519                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1520                                     (bmsr & BMSR_LSTATUS)) {
1521                                         udelay(40);
1522                                         break;
1523                                 }
1524                         }
1525
1526                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1527                             !(bmsr & BMSR_LSTATUS) &&
1528                             tp->link_config.active_speed == SPEED_1000) {
1529                                 err = tg3_phy_reset(tp);
1530                                 if (!err)
1531                                         err = tg3_init_5401phy_dsp(tp);
1532                                 if (err)
1533                                         return err;
1534                         }
1535                 }
1536         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1537                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1538                 /* 5701 {A0,B0} CRC bug workaround */
1539                 tg3_writephy(tp, 0x15, 0x0a75);
1540                 tg3_writephy(tp, 0x1c, 0x8c68);
1541                 tg3_writephy(tp, 0x1c, 0x8d68);
1542                 tg3_writephy(tp, 0x1c, 0x8c68);
1543         }
1544
1545         /* Clear pending interrupts... */
1546         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1547         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1548
1549         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1550                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1551         else
1552                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1553
1554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1556                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1557                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1558                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1559                 else
1560                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1561         }
1562
1563         current_link_up = 0;
1564         current_speed = SPEED_INVALID;
1565         current_duplex = DUPLEX_INVALID;
1566
1567         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1568                 u32 val;
1569
1570                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1571                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1572                 if (!(val & (1 << 10))) {
1573                         val |= (1 << 10);
1574                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1575                         goto relink;
1576                 }
1577         }
1578
1579         bmsr = 0;
1580         for (i = 0; i < 100; i++) {
1581                 tg3_readphy(tp, MII_BMSR, &bmsr);
1582                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1583                     (bmsr & BMSR_LSTATUS))
1584                         break;
1585                 udelay(40);
1586         }
1587
1588         if (bmsr & BMSR_LSTATUS) {
1589                 u32 aux_stat, bmcr;
1590
1591                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1592                 for (i = 0; i < 2000; i++) {
1593                         udelay(10);
1594                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1595                             aux_stat)
1596                                 break;
1597                 }
1598
1599                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1600                                              &current_speed,
1601                                              &current_duplex);
1602
1603                 bmcr = 0;
1604                 for (i = 0; i < 200; i++) {
1605                         tg3_readphy(tp, MII_BMCR, &bmcr);
1606                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1607                                 continue;
1608                         if (bmcr && bmcr != 0x7fff)
1609                                 break;
1610                         udelay(10);
1611                 }
1612
1613                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1614                         if (bmcr & BMCR_ANENABLE) {
1615                                 current_link_up = 1;
1616
1617                                 /* Force autoneg restart if we are exiting
1618                                  * low power mode.
1619                                  */
1620                                 if (!tg3_copper_is_advertising_all(tp))
1621                                         current_link_up = 0;
1622                         } else {
1623                                 current_link_up = 0;
1624                         }
1625                 } else {
1626                         if (!(bmcr & BMCR_ANENABLE) &&
1627                             tp->link_config.speed == current_speed &&
1628                             tp->link_config.duplex == current_duplex) {
1629                                 current_link_up = 1;
1630                         } else {
1631                                 current_link_up = 0;
1632                         }
1633                 }
1634
1635                 tp->link_config.active_speed = current_speed;
1636                 tp->link_config.active_duplex = current_duplex;
1637         }
1638
1639         if (current_link_up == 1 &&
1640             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1641             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1642                 u32 local_adv, remote_adv;
1643
1644                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1645                         local_adv = 0;
1646                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1647
1648                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1649                         remote_adv = 0;
1650
1651                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1652
1653                 /* If we are not advertising full pause capability,
1654                  * something is wrong.  Bring the link down and reconfigure.
1655                  */
1656                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1657                         current_link_up = 0;
1658                 } else {
1659                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1660                 }
1661         }
1662 relink:
1663         if (current_link_up == 0) {
1664                 u32 tmp;
1665
1666                 tg3_phy_copper_begin(tp);
1667
1668                 tg3_readphy(tp, MII_BMSR, &tmp);
1669                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1670                     (tmp & BMSR_LSTATUS))
1671                         current_link_up = 1;
1672         }
1673
1674         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1675         if (current_link_up == 1) {
1676                 if (tp->link_config.active_speed == SPEED_100 ||
1677                     tp->link_config.active_speed == SPEED_10)
1678                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1679                 else
1680                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1681         } else
1682                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1683
1684         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1685         if (tp->link_config.active_duplex == DUPLEX_HALF)
1686                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1687
1688         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1690                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1691                     (current_link_up == 1 &&
1692                      tp->link_config.active_speed == SPEED_10))
1693                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1694         } else {
1695                 if (current_link_up == 1)
1696                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1697         }
1698
1699         /* ??? Without this setting Netgear GA302T PHY does not
1700          * ??? send/receive packets...
1701          */
1702         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1703             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1704                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1705                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1706                 udelay(80);
1707         }
1708
1709         tw32_f(MAC_MODE, tp->mac_mode);
1710         udelay(40);
1711
1712         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1713                 /* Polled via timer. */
1714                 tw32_f(MAC_EVENT, 0);
1715         } else {
1716                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1717         }
1718         udelay(40);
1719
1720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1721             current_link_up == 1 &&
1722             tp->link_config.active_speed == SPEED_1000 &&
1723             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1724              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1725                 udelay(120);
1726                 tw32_f(MAC_STATUS,
1727                      (MAC_STATUS_SYNC_CHANGED |
1728                       MAC_STATUS_CFG_CHANGED));
1729                 udelay(40);
1730                 tg3_write_mem(tp,
1731                               NIC_SRAM_FIRMWARE_MBOX,
1732                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1733         }
1734
1735         if (current_link_up != netif_carrier_ok(tp->dev)) {
1736                 if (current_link_up)
1737                         netif_carrier_on(tp->dev);
1738                 else
1739                         netif_carrier_off(tp->dev);
1740                 tg3_link_report(tp);
1741         }
1742
1743         return 0;
1744 }
1745
1746 struct tg3_fiber_aneginfo {
1747         int state;
1748 #define ANEG_STATE_UNKNOWN              0
1749 #define ANEG_STATE_AN_ENABLE            1
1750 #define ANEG_STATE_RESTART_INIT         2
1751 #define ANEG_STATE_RESTART              3
1752 #define ANEG_STATE_DISABLE_LINK_OK      4
1753 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1754 #define ANEG_STATE_ABILITY_DETECT       6
1755 #define ANEG_STATE_ACK_DETECT_INIT      7
1756 #define ANEG_STATE_ACK_DETECT           8
1757 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1758 #define ANEG_STATE_COMPLETE_ACK         10
1759 #define ANEG_STATE_IDLE_DETECT_INIT     11
1760 #define ANEG_STATE_IDLE_DETECT          12
1761 #define ANEG_STATE_LINK_OK              13
1762 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1763 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1764
1765         u32 flags;
1766 #define MR_AN_ENABLE            0x00000001
1767 #define MR_RESTART_AN           0x00000002
1768 #define MR_AN_COMPLETE          0x00000004
1769 #define MR_PAGE_RX              0x00000008
1770 #define MR_NP_LOADED            0x00000010
1771 #define MR_TOGGLE_TX            0x00000020
1772 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1773 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1774 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1775 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1776 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1777 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1778 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1779 #define MR_TOGGLE_RX            0x00002000
1780 #define MR_NP_RX                0x00004000
1781
1782 #define MR_LINK_OK              0x80000000
1783
1784         unsigned long link_time, cur_time;
1785
1786         u32 ability_match_cfg;
1787         int ability_match_count;
1788
1789         char ability_match, idle_match, ack_match;
1790
1791         u32 txconfig, rxconfig;
1792 #define ANEG_CFG_NP             0x00000080
1793 #define ANEG_CFG_ACK            0x00000040
1794 #define ANEG_CFG_RF2            0x00000020
1795 #define ANEG_CFG_RF1            0x00000010
1796 #define ANEG_CFG_PS2            0x00000001
1797 #define ANEG_CFG_PS1            0x00008000
1798 #define ANEG_CFG_HD             0x00004000
1799 #define ANEG_CFG_FD             0x00002000
1800 #define ANEG_CFG_INVAL          0x00001f06
1801
1802 };
1803 #define ANEG_OK         0
1804 #define ANEG_DONE       1
1805 #define ANEG_TIMER_ENAB 2
1806 #define ANEG_FAILED     -1
1807
1808 #define ANEG_STATE_SETTLE_TIME  10000
1809
1810 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1811                                    struct tg3_fiber_aneginfo *ap)
1812 {
1813         unsigned long delta;
1814         u32 rx_cfg_reg;
1815         int ret;
1816
1817         if (ap->state == ANEG_STATE_UNKNOWN) {
1818                 ap->rxconfig = 0;
1819                 ap->link_time = 0;
1820                 ap->cur_time = 0;
1821                 ap->ability_match_cfg = 0;
1822                 ap->ability_match_count = 0;
1823                 ap->ability_match = 0;
1824                 ap->idle_match = 0;
1825                 ap->ack_match = 0;
1826         }
1827         ap->cur_time++;
1828
1829         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1830                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1831
1832                 if (rx_cfg_reg != ap->ability_match_cfg) {
1833                         ap->ability_match_cfg = rx_cfg_reg;
1834                         ap->ability_match = 0;
1835                         ap->ability_match_count = 0;
1836                 } else {
1837                         if (++ap->ability_match_count > 1) {
1838                                 ap->ability_match = 1;
1839                                 ap->ability_match_cfg = rx_cfg_reg;
1840                         }
1841                 }
1842                 if (rx_cfg_reg & ANEG_CFG_ACK)
1843                         ap->ack_match = 1;
1844                 else
1845                         ap->ack_match = 0;
1846
1847                 ap->idle_match = 0;
1848         } else {
1849                 ap->idle_match = 1;
1850                 ap->ability_match_cfg = 0;
1851                 ap->ability_match_count = 0;
1852                 ap->ability_match = 0;
1853                 ap->ack_match = 0;
1854
1855                 rx_cfg_reg = 0;
1856         }
1857
1858         ap->rxconfig = rx_cfg_reg;
1859         ret = ANEG_OK;
1860
1861         switch(ap->state) {
1862         case ANEG_STATE_UNKNOWN:
1863                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1864                         ap->state = ANEG_STATE_AN_ENABLE;
1865
1866                 /* fallthru */
1867         case ANEG_STATE_AN_ENABLE:
1868                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1869                 if (ap->flags & MR_AN_ENABLE) {
1870                         ap->link_time = 0;
1871                         ap->cur_time = 0;
1872                         ap->ability_match_cfg = 0;
1873                         ap->ability_match_count = 0;
1874                         ap->ability_match = 0;
1875                         ap->idle_match = 0;
1876                         ap->ack_match = 0;
1877
1878                         ap->state = ANEG_STATE_RESTART_INIT;
1879                 } else {
1880                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1881                 }
1882                 break;
1883
1884         case ANEG_STATE_RESTART_INIT:
1885                 ap->link_time = ap->cur_time;
1886                 ap->flags &= ~(MR_NP_LOADED);
1887                 ap->txconfig = 0;
1888                 tw32(MAC_TX_AUTO_NEG, 0);
1889                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1890                 tw32_f(MAC_MODE, tp->mac_mode);
1891                 udelay(40);
1892
1893                 ret = ANEG_TIMER_ENAB;
1894                 ap->state = ANEG_STATE_RESTART;
1895
1896                 /* fallthru */
1897         case ANEG_STATE_RESTART:
1898                 delta = ap->cur_time - ap->link_time;
1899                 if (delta > ANEG_STATE_SETTLE_TIME) {
1900                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1901                 } else {
1902                         ret = ANEG_TIMER_ENAB;
1903                 }
1904                 break;
1905
1906         case ANEG_STATE_DISABLE_LINK_OK:
1907                 ret = ANEG_DONE;
1908                 break;
1909
1910         case ANEG_STATE_ABILITY_DETECT_INIT:
1911                 ap->flags &= ~(MR_TOGGLE_TX);
1912                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1913                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1914                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917
1918                 ap->state = ANEG_STATE_ABILITY_DETECT;
1919                 break;
1920
1921         case ANEG_STATE_ABILITY_DETECT:
1922                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1923                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1924                 }
1925                 break;
1926
1927         case ANEG_STATE_ACK_DETECT_INIT:
1928                 ap->txconfig |= ANEG_CFG_ACK;
1929                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1930                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1931                 tw32_f(MAC_MODE, tp->mac_mode);
1932                 udelay(40);
1933
1934                 ap->state = ANEG_STATE_ACK_DETECT;
1935
1936                 /* fallthru */
1937         case ANEG_STATE_ACK_DETECT:
1938                 if (ap->ack_match != 0) {
1939                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1940                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1941                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1942                         } else {
1943                                 ap->state = ANEG_STATE_AN_ENABLE;
1944                         }
1945                 } else if (ap->ability_match != 0 &&
1946                            ap->rxconfig == 0) {
1947                         ap->state = ANEG_STATE_AN_ENABLE;
1948                 }
1949                 break;
1950
1951         case ANEG_STATE_COMPLETE_ACK_INIT:
1952                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1953                         ret = ANEG_FAILED;
1954                         break;
1955                 }
1956                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1957                                MR_LP_ADV_HALF_DUPLEX |
1958                                MR_LP_ADV_SYM_PAUSE |
1959                                MR_LP_ADV_ASYM_PAUSE |
1960                                MR_LP_ADV_REMOTE_FAULT1 |
1961                                MR_LP_ADV_REMOTE_FAULT2 |
1962                                MR_LP_ADV_NEXT_PAGE |
1963                                MR_TOGGLE_RX |
1964                                MR_NP_RX);
1965                 if (ap->rxconfig & ANEG_CFG_FD)
1966                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1967                 if (ap->rxconfig & ANEG_CFG_HD)
1968                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1969                 if (ap->rxconfig & ANEG_CFG_PS1)
1970                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1971                 if (ap->rxconfig & ANEG_CFG_PS2)
1972                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1973                 if (ap->rxconfig & ANEG_CFG_RF1)
1974                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1975                 if (ap->rxconfig & ANEG_CFG_RF2)
1976                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1977                 if (ap->rxconfig & ANEG_CFG_NP)
1978                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1979
1980                 ap->link_time = ap->cur_time;
1981
1982                 ap->flags ^= (MR_TOGGLE_TX);
1983                 if (ap->rxconfig & 0x0008)
1984                         ap->flags |= MR_TOGGLE_RX;
1985                 if (ap->rxconfig & ANEG_CFG_NP)
1986                         ap->flags |= MR_NP_RX;
1987                 ap->flags |= MR_PAGE_RX;
1988
1989                 ap->state = ANEG_STATE_COMPLETE_ACK;
1990                 ret = ANEG_TIMER_ENAB;
1991                 break;
1992
1993         case ANEG_STATE_COMPLETE_ACK:
1994                 if (ap->ability_match != 0 &&
1995                     ap->rxconfig == 0) {
1996                         ap->state = ANEG_STATE_AN_ENABLE;
1997                         break;
1998                 }
1999                 delta = ap->cur_time - ap->link_time;
2000                 if (delta > ANEG_STATE_SETTLE_TIME) {
2001                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2002                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2003                         } else {
2004                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2005                                     !(ap->flags & MR_NP_RX)) {
2006                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2007                                 } else {
2008                                         ret = ANEG_FAILED;
2009                                 }
2010                         }
2011                 }
2012                 break;
2013
2014         case ANEG_STATE_IDLE_DETECT_INIT:
2015                 ap->link_time = ap->cur_time;
2016                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2017                 tw32_f(MAC_MODE, tp->mac_mode);
2018                 udelay(40);
2019
2020                 ap->state = ANEG_STATE_IDLE_DETECT;
2021                 ret = ANEG_TIMER_ENAB;
2022                 break;
2023
2024         case ANEG_STATE_IDLE_DETECT:
2025                 if (ap->ability_match != 0 &&
2026                     ap->rxconfig == 0) {
2027                         ap->state = ANEG_STATE_AN_ENABLE;
2028                         break;
2029                 }
2030                 delta = ap->cur_time - ap->link_time;
2031                 if (delta > ANEG_STATE_SETTLE_TIME) {
2032                         /* XXX another gem from the Broadcom driver :( */
2033                         ap->state = ANEG_STATE_LINK_OK;
2034                 }
2035                 break;
2036
2037         case ANEG_STATE_LINK_OK:
2038                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2039                 ret = ANEG_DONE;
2040                 break;
2041
2042         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2043                 /* ??? unimplemented */
2044                 break;
2045
2046         case ANEG_STATE_NEXT_PAGE_WAIT:
2047                 /* ??? unimplemented */
2048                 break;
2049
2050         default:
2051                 ret = ANEG_FAILED;
2052                 break;
2053         };
2054
2055         return ret;
2056 }
2057
2058 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2059 {
2060         int res = 0;
2061         struct tg3_fiber_aneginfo aninfo;
2062         int status = ANEG_FAILED;
2063         unsigned int tick;
2064         u32 tmp;
2065
2066         tw32_f(MAC_TX_AUTO_NEG, 0);
2067
2068         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2069         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2070         udelay(40);
2071
2072         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2073         udelay(40);
2074
2075         memset(&aninfo, 0, sizeof(aninfo));
2076         aninfo.flags |= MR_AN_ENABLE;
2077         aninfo.state = ANEG_STATE_UNKNOWN;
2078         aninfo.cur_time = 0;
2079         tick = 0;
2080         while (++tick < 195000) {
2081                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2082                 if (status == ANEG_DONE || status == ANEG_FAILED)
2083                         break;
2084
2085                 udelay(1);
2086         }
2087
2088         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2089         tw32_f(MAC_MODE, tp->mac_mode);
2090         udelay(40);
2091
2092         *flags = aninfo.flags;
2093
2094         if (status == ANEG_DONE &&
2095             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2096                              MR_LP_ADV_FULL_DUPLEX)))
2097                 res = 1;
2098
2099         return res;
2100 }
2101
2102 static void tg3_init_bcm8002(struct tg3 *tp)
2103 {
2104         u32 mac_status = tr32(MAC_STATUS);
2105         int i;
2106
2107         /* Reset when initting first time or we have a link. */
2108         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2109             !(mac_status & MAC_STATUS_PCS_SYNCED))
2110                 return;
2111
2112         /* Set PLL lock range. */
2113         tg3_writephy(tp, 0x16, 0x8007);
2114
2115         /* SW reset */
2116         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2117
2118         /* Wait for reset to complete. */
2119         /* XXX schedule_timeout() ... */
2120         for (i = 0; i < 500; i++)
2121                 udelay(10);
2122
2123         /* Config mode; select PMA/Ch 1 regs. */
2124         tg3_writephy(tp, 0x10, 0x8411);
2125
2126         /* Enable auto-lock and comdet, select txclk for tx. */
2127         tg3_writephy(tp, 0x11, 0x0a10);
2128
2129         tg3_writephy(tp, 0x18, 0x00a0);
2130         tg3_writephy(tp, 0x16, 0x41ff);
2131
2132         /* Assert and deassert POR. */
2133         tg3_writephy(tp, 0x13, 0x0400);
2134         udelay(40);
2135         tg3_writephy(tp, 0x13, 0x0000);
2136
2137         tg3_writephy(tp, 0x11, 0x0a50);
2138         udelay(40);
2139         tg3_writephy(tp, 0x11, 0x0a10);
2140
2141         /* Wait for signal to stabilize */
2142         /* XXX schedule_timeout() ... */
2143         for (i = 0; i < 15000; i++)
2144                 udelay(10);
2145
2146         /* Deselect the channel register so we can read the PHYID
2147          * later.
2148          */
2149         tg3_writephy(tp, 0x10, 0x8011);
2150 }
2151
2152 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2153 {
2154         u32 sg_dig_ctrl, sg_dig_status;
2155         u32 serdes_cfg, expected_sg_dig_ctrl;
2156         int workaround, port_a;
2157         int current_link_up;
2158
2159         serdes_cfg = 0;
2160         expected_sg_dig_ctrl = 0;
2161         workaround = 0;
2162         port_a = 1;
2163         current_link_up = 0;
2164
2165         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2166             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2167                 workaround = 1;
2168                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2169                         port_a = 0;
2170
2171                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2172                 /* preserve bits 20-23 for voltage regulator */
2173                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2174         }
2175
2176         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2177
2178         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2179                 if (sg_dig_ctrl & (1 << 31)) {
2180                         if (workaround) {
2181                                 u32 val = serdes_cfg;
2182
2183                                 if (port_a)
2184                                         val |= 0xc010000;
2185                                 else
2186                                         val |= 0x4010000;
2187                                 tw32_f(MAC_SERDES_CFG, val);
2188                         }
2189                         tw32_f(SG_DIG_CTRL, 0x01388400);
2190                 }
2191                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2192                         tg3_setup_flow_control(tp, 0, 0);
2193                         current_link_up = 1;
2194                 }
2195                 goto out;
2196         }
2197
2198         /* Want auto-negotiation.  */
2199         expected_sg_dig_ctrl = 0x81388400;
2200
2201         /* Pause capability */
2202         expected_sg_dig_ctrl |= (1 << 11);
2203
2204         /* Asymettric pause */
2205         expected_sg_dig_ctrl |= (1 << 12);
2206
2207         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2208                 if (workaround)
2209                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2210                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2211                 udelay(5);
2212                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2213
2214                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2215         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2216                                  MAC_STATUS_SIGNAL_DET)) {
2217                 int i;
2218
2219                 /* Giver time to negotiate (~200ms) */
2220                 for (i = 0; i < 40000; i++) {
2221                         sg_dig_status = tr32(SG_DIG_STATUS);
2222                         if (sg_dig_status & (0x3))
2223                                 break;
2224                         udelay(5);
2225                 }
2226                 mac_status = tr32(MAC_STATUS);
2227
2228                 if ((sg_dig_status & (1 << 1)) &&
2229                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2230                         u32 local_adv, remote_adv;
2231
2232                         local_adv = ADVERTISE_PAUSE_CAP;
2233                         remote_adv = 0;
2234                         if (sg_dig_status & (1 << 19))
2235                                 remote_adv |= LPA_PAUSE_CAP;
2236                         if (sg_dig_status & (1 << 20))
2237                                 remote_adv |= LPA_PAUSE_ASYM;
2238
2239                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2240                         current_link_up = 1;
2241                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2242                 } else if (!(sg_dig_status & (1 << 1))) {
2243                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2244                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2245                         else {
2246                                 if (workaround) {
2247                                         u32 val = serdes_cfg;
2248
2249                                         if (port_a)
2250                                                 val |= 0xc010000;
2251                                         else
2252                                                 val |= 0x4010000;
2253
2254                                         tw32_f(MAC_SERDES_CFG, val);
2255                                 }
2256
2257                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2258                                 udelay(40);
2259
2260                                 /* Link parallel detection - link is up */
2261                                 /* only if we have PCS_SYNC and not */
2262                                 /* receiving config code words */
2263                                 mac_status = tr32(MAC_STATUS);
2264                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2265                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2266                                         tg3_setup_flow_control(tp, 0, 0);
2267                                         current_link_up = 1;
2268                                 }
2269                         }
2270                 }
2271         }
2272
2273 out:
2274         return current_link_up;
2275 }
2276
2277 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2278 {
2279         int current_link_up = 0;
2280
2281         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2282                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2283                 goto out;
2284         }
2285
2286         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2287                 u32 flags;
2288                 int i;
2289   
2290                 if (fiber_autoneg(tp, &flags)) {
2291                         u32 local_adv, remote_adv;
2292
2293                         local_adv = ADVERTISE_PAUSE_CAP;
2294                         remote_adv = 0;
2295                         if (flags & MR_LP_ADV_SYM_PAUSE)
2296                                 remote_adv |= LPA_PAUSE_CAP;
2297                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2298                                 remote_adv |= LPA_PAUSE_ASYM;
2299
2300                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2301
2302                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2303                         current_link_up = 1;
2304                 }
2305                 for (i = 0; i < 30; i++) {
2306                         udelay(20);
2307                         tw32_f(MAC_STATUS,
2308                                (MAC_STATUS_SYNC_CHANGED |
2309                                 MAC_STATUS_CFG_CHANGED));
2310                         udelay(40);
2311                         if ((tr32(MAC_STATUS) &
2312                              (MAC_STATUS_SYNC_CHANGED |
2313                               MAC_STATUS_CFG_CHANGED)) == 0)
2314                                 break;
2315                 }
2316
2317                 mac_status = tr32(MAC_STATUS);
2318                 if (current_link_up == 0 &&
2319                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2320                     !(mac_status & MAC_STATUS_RCVD_CFG))
2321                         current_link_up = 1;
2322         } else {
2323                 /* Forcing 1000FD link up. */
2324                 current_link_up = 1;
2325                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2326
2327                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2328                 udelay(40);
2329         }
2330
2331 out:
2332         return current_link_up;
2333 }
2334
2335 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2336 {
2337         u32 orig_pause_cfg;
2338         u16 orig_active_speed;
2339         u8 orig_active_duplex;
2340         u32 mac_status;
2341         int current_link_up;
2342         int i;
2343
2344         orig_pause_cfg =
2345                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2346                                   TG3_FLAG_TX_PAUSE));
2347         orig_active_speed = tp->link_config.active_speed;
2348         orig_active_duplex = tp->link_config.active_duplex;
2349
2350         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2351             netif_carrier_ok(tp->dev) &&
2352             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2353                 mac_status = tr32(MAC_STATUS);
2354                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2355                                MAC_STATUS_SIGNAL_DET |
2356                                MAC_STATUS_CFG_CHANGED |
2357                                MAC_STATUS_RCVD_CFG);
2358                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2359                                    MAC_STATUS_SIGNAL_DET)) {
2360                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2361                                             MAC_STATUS_CFG_CHANGED));
2362                         return 0;
2363                 }
2364         }
2365
2366         tw32_f(MAC_TX_AUTO_NEG, 0);
2367
2368         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2369         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2370         tw32_f(MAC_MODE, tp->mac_mode);
2371         udelay(40);
2372
2373         if (tp->phy_id == PHY_ID_BCM8002)
2374                 tg3_init_bcm8002(tp);
2375
2376         /* Enable link change event even when serdes polling.  */
2377         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2378         udelay(40);
2379
2380         current_link_up = 0;
2381         mac_status = tr32(MAC_STATUS);
2382
2383         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2384                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2385         else
2386                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2387
2388         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2389         tw32_f(MAC_MODE, tp->mac_mode);
2390         udelay(40);
2391
2392         tp->hw_status->status =
2393                 (SD_STATUS_UPDATED |
2394                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2395
2396         for (i = 0; i < 100; i++) {
2397                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2398                                     MAC_STATUS_CFG_CHANGED));
2399                 udelay(5);
2400                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2401                                          MAC_STATUS_CFG_CHANGED)) == 0)
2402                         break;
2403         }
2404
2405         mac_status = tr32(MAC_STATUS);
2406         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2407                 current_link_up = 0;
2408                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2409                         tw32_f(MAC_MODE, (tp->mac_mode |
2410                                           MAC_MODE_SEND_CONFIGS));
2411                         udelay(1);
2412                         tw32_f(MAC_MODE, tp->mac_mode);
2413                 }
2414         }
2415
2416         if (current_link_up == 1) {
2417                 tp->link_config.active_speed = SPEED_1000;
2418                 tp->link_config.active_duplex = DUPLEX_FULL;
2419                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2420                                     LED_CTRL_LNKLED_OVERRIDE |
2421                                     LED_CTRL_1000MBPS_ON));
2422         } else {
2423                 tp->link_config.active_speed = SPEED_INVALID;
2424                 tp->link_config.active_duplex = DUPLEX_INVALID;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_TRAFFIC_OVERRIDE));
2428         }
2429
2430         if (current_link_up != netif_carrier_ok(tp->dev)) {
2431                 if (current_link_up)
2432                         netif_carrier_on(tp->dev);
2433                 else
2434                         netif_carrier_off(tp->dev);
2435                 tg3_link_report(tp);
2436         } else {
2437                 u32 now_pause_cfg =
2438                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2439                                          TG3_FLAG_TX_PAUSE);
2440                 if (orig_pause_cfg != now_pause_cfg ||
2441                     orig_active_speed != tp->link_config.active_speed ||
2442                     orig_active_duplex != tp->link_config.active_duplex)
2443                         tg3_link_report(tp);
2444         }
2445
2446         return 0;
2447 }
2448
2449 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2450 {
2451         int err;
2452
2453         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2454                 err = tg3_setup_fiber_phy(tp, force_reset);
2455         } else {
2456                 err = tg3_setup_copper_phy(tp, force_reset);
2457         }
2458
2459         if (tp->link_config.active_speed == SPEED_1000 &&
2460             tp->link_config.active_duplex == DUPLEX_HALF)
2461                 tw32(MAC_TX_LENGTHS,
2462                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2463                       (6 << TX_LENGTHS_IPG_SHIFT) |
2464                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2465         else
2466                 tw32(MAC_TX_LENGTHS,
2467                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2468                       (6 << TX_LENGTHS_IPG_SHIFT) |
2469                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2470
2471         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2472                 if (netif_carrier_ok(tp->dev)) {
2473                         tw32(HOSTCC_STAT_COAL_TICKS,
2474                              DEFAULT_STAT_COAL_TICKS);
2475                 } else {
2476                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2477                 }
2478         }
2479
2480         return err;
2481 }
2482
2483 /* Tigon3 never reports partial packet sends.  So we do not
2484  * need special logic to handle SKBs that have not had all
2485  * of their frags sent yet, like SunGEM does.
2486  */
2487 static void tg3_tx(struct tg3 *tp)
2488 {
2489         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2490         u32 sw_idx = tp->tx_cons;
2491
2492         while (sw_idx != hw_idx) {
2493                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2494                 struct sk_buff *skb = ri->skb;
2495                 int i;
2496
2497                 if (unlikely(skb == NULL))
2498                         BUG();
2499
2500                 pci_unmap_single(tp->pdev,
2501                                  pci_unmap_addr(ri, mapping),
2502                                  skb_headlen(skb),
2503                                  PCI_DMA_TODEVICE);
2504
2505                 ri->skb = NULL;
2506
2507                 sw_idx = NEXT_TX(sw_idx);
2508
2509                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2510                         if (unlikely(sw_idx == hw_idx))
2511                                 BUG();
2512
2513                         ri = &tp->tx_buffers[sw_idx];
2514                         if (unlikely(ri->skb != NULL))
2515                                 BUG();
2516
2517                         pci_unmap_page(tp->pdev,
2518                                        pci_unmap_addr(ri, mapping),
2519                                        skb_shinfo(skb)->frags[i].size,
2520                                        PCI_DMA_TODEVICE);
2521
2522                         sw_idx = NEXT_TX(sw_idx);
2523                 }
2524
2525                 dev_kfree_skb_irq(skb);
2526         }
2527
2528         tp->tx_cons = sw_idx;
2529
2530         if (netif_queue_stopped(tp->dev) &&
2531             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2532                 netif_wake_queue(tp->dev);
2533 }
2534
2535 /* Returns size of skb allocated or < 0 on error.
2536  *
2537  * We only need to fill in the address because the other members
2538  * of the RX descriptor are invariant, see tg3_init_rings.
2539  *
2540  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2541  * posting buffers we only dirty the first cache line of the RX
2542  * descriptor (containing the address).  Whereas for the RX status
2543  * buffers the cpu only reads the last cacheline of the RX descriptor
2544  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2545  */
2546 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2547                             int src_idx, u32 dest_idx_unmasked)
2548 {
2549         struct tg3_rx_buffer_desc *desc;
2550         struct ring_info *map, *src_map;
2551         struct sk_buff *skb;
2552         dma_addr_t mapping;
2553         int skb_size, dest_idx;
2554
2555         src_map = NULL;
2556         switch (opaque_key) {
2557         case RXD_OPAQUE_RING_STD:
2558                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2559                 desc = &tp->rx_std[dest_idx];
2560                 map = &tp->rx_std_buffers[dest_idx];
2561                 if (src_idx >= 0)
2562                         src_map = &tp->rx_std_buffers[src_idx];
2563                 skb_size = RX_PKT_BUF_SZ;
2564                 break;
2565
2566         case RXD_OPAQUE_RING_JUMBO:
2567                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2568                 desc = &tp->rx_jumbo[dest_idx];
2569                 map = &tp->rx_jumbo_buffers[dest_idx];
2570                 if (src_idx >= 0)
2571                         src_map = &tp->rx_jumbo_buffers[src_idx];
2572                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2573                 break;
2574
2575         default:
2576                 return -EINVAL;
2577         };
2578
2579         /* Do not overwrite any of the map or rp information
2580          * until we are sure we can commit to a new buffer.
2581          *
2582          * Callers depend upon this behavior and assume that
2583          * we leave everything unchanged if we fail.
2584          */
2585         skb = dev_alloc_skb(skb_size);
2586         if (skb == NULL)
2587                 return -ENOMEM;
2588
2589         skb->dev = tp->dev;
2590         skb_reserve(skb, tp->rx_offset);
2591
2592         mapping = pci_map_single(tp->pdev, skb->data,
2593                                  skb_size - tp->rx_offset,
2594                                  PCI_DMA_FROMDEVICE);
2595
2596         map->skb = skb;
2597         pci_unmap_addr_set(map, mapping, mapping);
2598
2599         if (src_map != NULL)
2600                 src_map->skb = NULL;
2601
2602         desc->addr_hi = ((u64)mapping >> 32);
2603         desc->addr_lo = ((u64)mapping & 0xffffffff);
2604
2605         return skb_size;
2606 }
2607
2608 /* We only need to move over in the address because the other
2609  * members of the RX descriptor are invariant.  See notes above
2610  * tg3_alloc_rx_skb for full details.
2611  */
2612 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2613                            int src_idx, u32 dest_idx_unmasked)
2614 {
2615         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2616         struct ring_info *src_map, *dest_map;
2617         int dest_idx;
2618
2619         switch (opaque_key) {
2620         case RXD_OPAQUE_RING_STD:
2621                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2622                 dest_desc = &tp->rx_std[dest_idx];
2623                 dest_map = &tp->rx_std_buffers[dest_idx];
2624                 src_desc = &tp->rx_std[src_idx];
2625                 src_map = &tp->rx_std_buffers[src_idx];
2626                 break;
2627
2628         case RXD_OPAQUE_RING_JUMBO:
2629                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2630                 dest_desc = &tp->rx_jumbo[dest_idx];
2631                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2632                 src_desc = &tp->rx_jumbo[src_idx];
2633                 src_map = &tp->rx_jumbo_buffers[src_idx];
2634                 break;
2635
2636         default:
2637                 return;
2638         };
2639
2640         dest_map->skb = src_map->skb;
2641         pci_unmap_addr_set(dest_map, mapping,
2642                            pci_unmap_addr(src_map, mapping));
2643         dest_desc->addr_hi = src_desc->addr_hi;
2644         dest_desc->addr_lo = src_desc->addr_lo;
2645
2646         src_map->skb = NULL;
2647 }
2648
2649 #if TG3_VLAN_TAG_USED
2650 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2651 {
2652         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2653 }
2654 #endif
2655
2656 /* The RX ring scheme is composed of multiple rings which post fresh
2657  * buffers to the chip, and one special ring the chip uses to report
2658  * status back to the host.
2659  *
2660  * The special ring reports the status of received packets to the
2661  * host.  The chip does not write into the original descriptor the
2662  * RX buffer was obtained from.  The chip simply takes the original
2663  * descriptor as provided by the host, updates the status and length
2664  * field, then writes this into the next status ring entry.
2665  *
2666  * Each ring the host uses to post buffers to the chip is described
2667  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2668  * it is first placed into the on-chip ram.  When the packet's length
2669  * is known, it walks down the TG3_BDINFO entries to select the ring.
2670  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2671  * which is within the range of the new packet's length is chosen.
2672  *
2673  * The "separate ring for rx status" scheme may sound queer, but it makes
2674  * sense from a cache coherency perspective.  If only the host writes
2675  * to the buffer post rings, and only the chip writes to the rx status
2676  * rings, then cache lines never move beyond shared-modified state.
2677  * If both the host and chip were to write into the same ring, cache line
2678  * eviction could occur since both entities want it in an exclusive state.
2679  */
2680 static int tg3_rx(struct tg3 *tp, int budget)
2681 {
2682         u32 work_mask;
2683         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2684         u16 hw_idx, sw_idx;
2685         int received;
2686
2687         hw_idx = tp->hw_status->idx[0].rx_producer;
2688         /*
2689          * We need to order the read of hw_idx and the read of
2690          * the opaque cookie.
2691          */
2692         rmb();
2693         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2694         work_mask = 0;
2695         received = 0;
2696         while (sw_idx != hw_idx && budget > 0) {
2697                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2698                 unsigned int len;
2699                 struct sk_buff *skb;
2700                 dma_addr_t dma_addr;
2701                 u32 opaque_key, desc_idx, *post_ptr;
2702
2703                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2704                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2705                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2706                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2707                                                   mapping);
2708                         skb = tp->rx_std_buffers[desc_idx].skb;
2709                         post_ptr = &tp->rx_std_ptr;
2710                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2711                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2712                                                   mapping);
2713                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2714                         post_ptr = &tp->rx_jumbo_ptr;
2715                 }
2716                 else {
2717                         goto next_pkt_nopost;
2718                 }
2719
2720                 work_mask |= opaque_key;
2721
2722                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2723                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2724                 drop_it:
2725                         tg3_recycle_rx(tp, opaque_key,
2726                                        desc_idx, *post_ptr);
2727                 drop_it_no_recycle:
2728                         /* Other statistics kept track of by card. */
2729                         tp->net_stats.rx_dropped++;
2730                         goto next_pkt;
2731                 }
2732
2733                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2734
2735                 if (len > RX_COPY_THRESHOLD 
2736                         && tp->rx_offset == 2
2737                         /* rx_offset != 2 iff this is a 5701 card running
2738                          * in PCI-X mode [see tg3_get_invariants()] */
2739                 ) {
2740                         int skb_size;
2741
2742                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2743                                                     desc_idx, *post_ptr);
2744                         if (skb_size < 0)
2745                                 goto drop_it;
2746
2747                         pci_unmap_single(tp->pdev, dma_addr,
2748                                          skb_size - tp->rx_offset,
2749                                          PCI_DMA_FROMDEVICE);
2750
2751                         skb_put(skb, len);
2752                 } else {
2753                         struct sk_buff *copy_skb;
2754
2755                         tg3_recycle_rx(tp, opaque_key,
2756                                        desc_idx, *post_ptr);
2757
2758                         copy_skb = dev_alloc_skb(len + 2);
2759                         if (copy_skb == NULL)
2760                                 goto drop_it_no_recycle;
2761
2762                         copy_skb->dev = tp->dev;
2763                         skb_reserve(copy_skb, 2);
2764                         skb_put(copy_skb, len);
2765                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2766                         memcpy(copy_skb->data, skb->data, len);
2767                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2768
2769                         /* We'll reuse the original ring buffer. */
2770                         skb = copy_skb;
2771                 }
2772
2773                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2774                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2775                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2776                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2777                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2778                 else
2779                         skb->ip_summed = CHECKSUM_NONE;
2780
2781                 skb->protocol = eth_type_trans(skb, tp->dev);
2782 #if TG3_VLAN_TAG_USED
2783                 if (tp->vlgrp != NULL &&
2784                     desc->type_flags & RXD_FLAG_VLAN) {
2785                         tg3_vlan_rx(tp, skb,
2786                                     desc->err_vlan & RXD_VLAN_MASK);
2787                 } else
2788 #endif
2789                         netif_receive_skb(skb);
2790
2791                 tp->dev->last_rx = jiffies;
2792                 received++;
2793                 budget--;
2794
2795 next_pkt:
2796                 (*post_ptr)++;
2797 next_pkt_nopost:
2798                 rx_rcb_ptr++;
2799                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2800         }
2801
2802         /* ACK the status ring. */
2803         tp->rx_rcb_ptr = rx_rcb_ptr;
2804         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2805                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2806
2807         /* Refill RX ring(s). */
2808         if (work_mask & RXD_OPAQUE_RING_STD) {
2809                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2810                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2811                              sw_idx);
2812         }
2813         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2814                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2815                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2816                              sw_idx);
2817         }
2818         mmiowb();
2819
2820         return received;
2821 }
2822
2823 static int tg3_poll(struct net_device *netdev, int *budget)
2824 {
2825         struct tg3 *tp = netdev_priv(netdev);
2826         struct tg3_hw_status *sblk = tp->hw_status;
2827         unsigned long flags;
2828         int done;
2829
2830         spin_lock_irqsave(&tp->lock, flags);
2831
2832         /* handle link change and other phy events */
2833         if (!(tp->tg3_flags &
2834               (TG3_FLAG_USE_LINKCHG_REG |
2835                TG3_FLAG_POLL_SERDES))) {
2836                 if (sblk->status & SD_STATUS_LINK_CHG) {
2837                         sblk->status = SD_STATUS_UPDATED |
2838                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2839                         tg3_setup_phy(tp, 0);
2840                 }
2841         }
2842
2843         /* run TX completion thread */
2844         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2845                 spin_lock(&tp->tx_lock);
2846                 tg3_tx(tp);
2847                 spin_unlock(&tp->tx_lock);
2848         }
2849
2850         spin_unlock_irqrestore(&tp->lock, flags);
2851
2852         /* run RX thread, within the bounds set by NAPI.
2853          * All RX "locking" is done by ensuring outside
2854          * code synchronizes with dev->poll()
2855          */
2856         done = 1;
2857         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2858                 int orig_budget = *budget;
2859                 int work_done;
2860
2861                 if (orig_budget > netdev->quota)
2862                         orig_budget = netdev->quota;
2863
2864                 work_done = tg3_rx(tp, orig_budget);
2865
2866                 *budget -= work_done;
2867                 netdev->quota -= work_done;
2868
2869                 if (work_done >= orig_budget)
2870                         done = 0;
2871         }
2872
2873         /* if no more work, tell net stack and NIC we're done */
2874         if (done) {
2875                 spin_lock_irqsave(&tp->lock, flags);
2876                 __netif_rx_complete(netdev);
2877                 tg3_restart_ints(tp);
2878                 spin_unlock_irqrestore(&tp->lock, flags);
2879         }
2880
2881         return (done ? 0 : 1);
2882 }
2883
2884 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2885 {
2886         struct tg3_hw_status *sblk = tp->hw_status;
2887         unsigned int work_exists = 0;
2888
2889         /* check for phy events */
2890         if (!(tp->tg3_flags &
2891               (TG3_FLAG_USE_LINKCHG_REG |
2892                TG3_FLAG_POLL_SERDES))) {
2893                 if (sblk->status & SD_STATUS_LINK_CHG)
2894                         work_exists = 1;
2895         }
2896         /* check for RX/TX work to do */
2897         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2898             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2899                 work_exists = 1;
2900
2901         return work_exists;
2902 }
2903
2904 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2905 {
2906         struct net_device *dev = dev_id;
2907         struct tg3 *tp = netdev_priv(dev);
2908         struct tg3_hw_status *sblk = tp->hw_status;
2909         unsigned long flags;
2910         unsigned int handled = 1;
2911
2912         spin_lock_irqsave(&tp->lock, flags);
2913
2914         /* In INTx mode, it is possible for the interrupt to arrive at
2915          * the CPU before the status block posted prior to the interrupt.
2916          * Reading the PCI State register will confirm whether the
2917          * interrupt is ours and will flush the status block.
2918          */
2919         if ((sblk->status & SD_STATUS_UPDATED) ||
2920             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2921                 /*
2922                  * writing any value to intr-mbox-0 clears PCI INTA# and
2923                  * chip-internal interrupt pending events.
2924                  * writing non-zero to intr-mbox-0 additional tells the
2925                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2926                  * event coalescing.
2927                  */
2928                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2929                              0x00000001);
2930                 /*
2931                  * Flush PCI write.  This also guarantees that our
2932                  * status block has been flushed to host memory.
2933                  */
2934                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2935                 sblk->status &= ~SD_STATUS_UPDATED;
2936
2937                 if (likely(tg3_has_work(dev, tp)))
2938                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2939                 else {
2940                         /* no work, shared interrupt perhaps?  re-enable
2941                          * interrupts, and flush that PCI write
2942                          */
2943                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2944                                 0x00000000);
2945                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2946                 }
2947         } else {        /* shared interrupt */
2948                 handled = 0;
2949         }
2950
2951         spin_unlock_irqrestore(&tp->lock, flags);
2952
2953         return IRQ_RETVAL(handled);
2954 }
2955
2956 static int tg3_init_hw(struct tg3 *);
2957 static int tg3_halt(struct tg3 *);
2958
2959 #ifdef CONFIG_NET_POLL_CONTROLLER
2960 static void tg3_poll_controller(struct net_device *dev)
2961 {
2962         tg3_interrupt(dev->irq, dev, NULL);
2963 }
2964 #endif
2965
2966 static void tg3_reset_task(void *_data)
2967 {
2968         struct tg3 *tp = _data;
2969         unsigned int restart_timer;
2970
2971         tg3_netif_stop(tp);
2972
2973         spin_lock_irq(&tp->lock);
2974         spin_lock(&tp->tx_lock);
2975
2976         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2977         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2978
2979         tg3_halt(tp);
2980         tg3_init_hw(tp);
2981
2982         tg3_netif_start(tp);
2983
2984         spin_unlock(&tp->tx_lock);
2985         spin_unlock_irq(&tp->lock);
2986
2987         if (restart_timer)
2988                 mod_timer(&tp->timer, jiffies + 1);
2989 }
2990
2991 static void tg3_tx_timeout(struct net_device *dev)
2992 {
2993         struct tg3 *tp = netdev_priv(dev);
2994
2995         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2996                dev->name);
2997
2998         schedule_work(&tp->reset_task);
2999 }
3000
3001 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3002
3003 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3004                                        u32 guilty_entry, int guilty_len,
3005                                        u32 last_plus_one, u32 *start, u32 mss)
3006 {
3007         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3008         dma_addr_t new_addr;
3009         u32 entry = *start;
3010         int i;
3011
3012         if (!new_skb) {
3013                 dev_kfree_skb(skb);
3014                 return -1;
3015         }
3016
3017         /* New SKB is guaranteed to be linear. */
3018         entry = *start;
3019         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3020                                   PCI_DMA_TODEVICE);
3021         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3022                     (skb->ip_summed == CHECKSUM_HW) ?
3023                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3024         *start = NEXT_TX(entry);
3025
3026         /* Now clean up the sw ring entries. */
3027         i = 0;
3028         while (entry != last_plus_one) {
3029                 int len;
3030
3031                 if (i == 0)
3032                         len = skb_headlen(skb);
3033                 else
3034                         len = skb_shinfo(skb)->frags[i-1].size;
3035                 pci_unmap_single(tp->pdev,
3036                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3037                                  len, PCI_DMA_TODEVICE);
3038                 if (i == 0) {
3039                         tp->tx_buffers[entry].skb = new_skb;
3040                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3041                 } else {
3042                         tp->tx_buffers[entry].skb = NULL;
3043                 }
3044                 entry = NEXT_TX(entry);
3045                 i++;
3046         }
3047
3048         dev_kfree_skb(skb);
3049
3050         return 0;
3051 }
3052
3053 static void tg3_set_txd(struct tg3 *tp, int entry,
3054                         dma_addr_t mapping, int len, u32 flags,
3055                         u32 mss_and_is_end)
3056 {
3057         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3058         int is_end = (mss_and_is_end & 0x1);
3059         u32 mss = (mss_and_is_end >> 1);
3060         u32 vlan_tag = 0;
3061
3062         if (is_end)
3063                 flags |= TXD_FLAG_END;
3064         if (flags & TXD_FLAG_VLAN) {
3065                 vlan_tag = flags >> 16;
3066                 flags &= 0xffff;
3067         }
3068         vlan_tag |= (mss << TXD_MSS_SHIFT);
3069
3070         txd->addr_hi = ((u64) mapping >> 32);
3071         txd->addr_lo = ((u64) mapping & 0xffffffff);
3072         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3073         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3074 }
3075
3076 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3077 {
3078         u32 base = (u32) mapping & 0xffffffff;
3079
3080         return ((base > 0xffffdcc0) &&
3081                 (base + len + 8 < base));
3082 }
3083
3084 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3085 {
3086         struct tg3 *tp = netdev_priv(dev);
3087         dma_addr_t mapping;
3088         unsigned int i;
3089         u32 len, entry, base_flags, mss;
3090         int would_hit_hwbug;
3091         unsigned long flags;
3092
3093         len = skb_headlen(skb);
3094
3095         /* No BH disabling for tx_lock here.  We are running in BH disabled
3096          * context and TX reclaim runs via tp->poll inside of a software
3097          * interrupt.  Rejoice!
3098          *
3099          * Actually, things are not so simple.  If we are to take a hw
3100          * IRQ here, we can deadlock, consider:
3101          *
3102          *       CPU1           CPU2
3103          *   tg3_start_xmit
3104          *   take tp->tx_lock
3105          *                      tg3_timer
3106          *                      take tp->lock
3107          *   tg3_interrupt
3108          *   spin on tp->lock
3109          *                      spin on tp->tx_lock
3110          *
3111          * So we really do need to disable interrupts when taking
3112          * tx_lock here.
3113          */
3114         local_irq_save(flags);
3115         if (!spin_trylock(&tp->tx_lock)) { 
3116                 local_irq_restore(flags);
3117                 return NETDEV_TX_LOCKED; 
3118         } 
3119
3120         /* This is a hard error, log it. */
3121         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3122                 netif_stop_queue(dev);
3123                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3124                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3125                        dev->name);
3126                 return NETDEV_TX_BUSY;
3127         }
3128
3129         entry = tp->tx_prod;
3130         base_flags = 0;
3131         if (skb->ip_summed == CHECKSUM_HW)
3132                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3133 #if TG3_TSO_SUPPORT != 0
3134         mss = 0;
3135         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3136             (mss = skb_shinfo(skb)->tso_size) != 0) {
3137                 int tcp_opt_len, ip_tcp_len;
3138
3139                 if (skb_header_cloned(skb) &&
3140                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3141                         dev_kfree_skb(skb);
3142                         goto out_unlock;
3143                 }
3144
3145                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3146                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3147
3148                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3149                                TXD_FLAG_CPU_POST_DMA);
3150
3151                 skb->nh.iph->check = 0;
3152                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3153                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3154                         skb->h.th->check = 0;
3155                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3156                 }
3157                 else {
3158                         skb->h.th->check =
3159                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3160                                                    skb->nh.iph->daddr,
3161                                                    0, IPPROTO_TCP, 0);
3162                 }
3163
3164                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3165                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3166                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3167                                 int tsflags;
3168
3169                                 tsflags = ((skb->nh.iph->ihl - 5) +
3170                                            (tcp_opt_len >> 2));
3171                                 mss |= (tsflags << 11);
3172                         }
3173                 } else {
3174                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3175                                 int tsflags;
3176
3177                                 tsflags = ((skb->nh.iph->ihl - 5) +
3178                                            (tcp_opt_len >> 2));
3179                                 base_flags |= tsflags << 12;
3180                         }
3181                 }
3182         }
3183 #else
3184         mss = 0;
3185 #endif
3186 #if TG3_VLAN_TAG_USED
3187         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3188                 base_flags |= (TXD_FLAG_VLAN |
3189                                (vlan_tx_tag_get(skb) << 16));
3190 #endif
3191
3192         /* Queue skb data, a.k.a. the main skb fragment. */
3193         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3194
3195         tp->tx_buffers[entry].skb = skb;
3196         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3197
3198         would_hit_hwbug = 0;
3199
3200         if (tg3_4g_overflow_test(mapping, len))
3201                 would_hit_hwbug = entry + 1;
3202
3203         tg3_set_txd(tp, entry, mapping, len, base_flags,
3204                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3205
3206         entry = NEXT_TX(entry);
3207
3208         /* Now loop through additional data fragments, and queue them. */
3209         if (skb_shinfo(skb)->nr_frags > 0) {
3210                 unsigned int i, last;
3211
3212                 last = skb_shinfo(skb)->nr_frags - 1;
3213                 for (i = 0; i <= last; i++) {
3214                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3215
3216                         len = frag->size;
3217                         mapping = pci_map_page(tp->pdev,
3218                                                frag->page,
3219                                                frag->page_offset,
3220                                                len, PCI_DMA_TODEVICE);
3221
3222                         tp->tx_buffers[entry].skb = NULL;
3223                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3224
3225                         if (tg3_4g_overflow_test(mapping, len)) {
3226                                 /* Only one should match. */
3227                                 if (would_hit_hwbug)
3228                                         BUG();
3229                                 would_hit_hwbug = entry + 1;
3230                         }
3231
3232                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3233                                 tg3_set_txd(tp, entry, mapping, len,
3234                                             base_flags, (i == last)|(mss << 1));
3235                         else
3236                                 tg3_set_txd(tp, entry, mapping, len,
3237                                             base_flags, (i == last));
3238
3239                         entry = NEXT_TX(entry);
3240                 }
3241         }
3242
3243         if (would_hit_hwbug) {
3244                 u32 last_plus_one = entry;
3245                 u32 start;
3246                 unsigned int len = 0;
3247
3248                 would_hit_hwbug -= 1;
3249                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3250                 entry &= (TG3_TX_RING_SIZE - 1);
3251                 start = entry;
3252                 i = 0;
3253                 while (entry != last_plus_one) {
3254                         if (i == 0)
3255                                 len = skb_headlen(skb);
3256                         else
3257                                 len = skb_shinfo(skb)->frags[i-1].size;
3258
3259                         if (entry == would_hit_hwbug)
3260                                 break;
3261
3262                         i++;
3263                         entry = NEXT_TX(entry);
3264
3265                 }
3266
3267                 /* If the workaround fails due to memory/mapping
3268                  * failure, silently drop this packet.
3269                  */
3270                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3271                                                 entry, len,
3272                                                 last_plus_one,
3273                                                 &start, mss))
3274                         goto out_unlock;
3275
3276                 entry = start;
3277         }
3278
3279         /* Packets are ready, update Tx producer idx local and on card. */
3280         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3281
3282         tp->tx_prod = entry;
3283         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3284                 netif_stop_queue(dev);
3285
3286 out_unlock:
3287         mmiowb();
3288         spin_unlock_irqrestore(&tp->tx_lock, flags);
3289
3290         dev->trans_start = jiffies;
3291
3292         return NETDEV_TX_OK;
3293 }
3294
3295 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3296                                int new_mtu)
3297 {
3298         dev->mtu = new_mtu;
3299
3300         if (new_mtu > ETH_DATA_LEN)
3301                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3302         else
3303                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3304 }
3305
3306 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3307 {
3308         struct tg3 *tp = netdev_priv(dev);
3309
3310         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3311                 return -EINVAL;
3312
3313         if (!netif_running(dev)) {
3314                 /* We'll just catch it later when the
3315                  * device is up'd.
3316                  */
3317                 tg3_set_mtu(dev, tp, new_mtu);
3318                 return 0;
3319         }
3320
3321         tg3_netif_stop(tp);
3322         spin_lock_irq(&tp->lock);
3323         spin_lock(&tp->tx_lock);
3324
3325         tg3_halt(tp);
3326
3327         tg3_set_mtu(dev, tp, new_mtu);
3328
3329         tg3_init_hw(tp);
3330
3331         tg3_netif_start(tp);
3332
3333         spin_unlock(&tp->tx_lock);
3334         spin_unlock_irq(&tp->lock);
3335
3336         return 0;
3337 }
3338
3339 /* Free up pending packets in all rx/tx rings.
3340  *
3341  * The chip has been shut down and the driver detached from
3342  * the networking, so no interrupts or new tx packets will
3343  * end up in the driver.  tp->{tx,}lock is not held and we are not
3344  * in an interrupt context and thus may sleep.
3345  */
3346 static void tg3_free_rings(struct tg3 *tp)
3347 {
3348         struct ring_info *rxp;
3349         int i;
3350
3351         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3352                 rxp = &tp->rx_std_buffers[i];
3353
3354                 if (rxp->skb == NULL)
3355                         continue;
3356                 pci_unmap_single(tp->pdev,
3357                                  pci_unmap_addr(rxp, mapping),
3358                                  RX_PKT_BUF_SZ - tp->rx_offset,
3359                                  PCI_DMA_FROMDEVICE);
3360                 dev_kfree_skb_any(rxp->skb);
3361                 rxp->skb = NULL;
3362         }
3363
3364         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3365                 rxp = &tp->rx_jumbo_buffers[i];
3366
3367                 if (rxp->skb == NULL)
3368                         continue;
3369                 pci_unmap_single(tp->pdev,
3370                                  pci_unmap_addr(rxp, mapping),
3371                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3372                                  PCI_DMA_FROMDEVICE);
3373                 dev_kfree_skb_any(rxp->skb);
3374                 rxp->skb = NULL;
3375         }
3376
3377         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3378                 struct tx_ring_info *txp;
3379                 struct sk_buff *skb;
3380                 int j;
3381
3382                 txp = &tp->tx_buffers[i];
3383                 skb = txp->skb;
3384
3385                 if (skb == NULL) {
3386                         i++;
3387                         continue;
3388                 }
3389
3390                 pci_unmap_single(tp->pdev,
3391                                  pci_unmap_addr(txp, mapping),
3392                                  skb_headlen(skb),
3393                                  PCI_DMA_TODEVICE);
3394                 txp->skb = NULL;
3395
3396                 i++;
3397
3398                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3399                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3400                         pci_unmap_page(tp->pdev,
3401                                        pci_unmap_addr(txp, mapping),
3402                                        skb_shinfo(skb)->frags[j].size,
3403                                        PCI_DMA_TODEVICE);
3404                         i++;
3405                 }
3406
3407                 dev_kfree_skb_any(skb);
3408         }
3409 }
3410
3411 /* Initialize tx/rx rings for packet processing.
3412  *
3413  * The chip has been shut down and the driver detached from
3414  * the networking, so no interrupts or new tx packets will
3415  * end up in the driver.  tp->{tx,}lock are held and thus
3416  * we may not sleep.
3417  */
3418 static void tg3_init_rings(struct tg3 *tp)
3419 {
3420         u32 i;
3421
3422         /* Free up all the SKBs. */
3423         tg3_free_rings(tp);
3424
3425         /* Zero out all descriptors. */
3426         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3427         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3428         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3429         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3430
3431         /* Initialize invariants of the rings, we only set this
3432          * stuff once.  This works because the card does not
3433          * write into the rx buffer posting rings.
3434          */
3435         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3436                 struct tg3_rx_buffer_desc *rxd;
3437
3438                 rxd = &tp->rx_std[i];
3439                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3440                         << RXD_LEN_SHIFT;
3441                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3442                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3443                                (i << RXD_OPAQUE_INDEX_SHIFT));
3444         }
3445
3446         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3447                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3448                         struct tg3_rx_buffer_desc *rxd;
3449
3450                         rxd = &tp->rx_jumbo[i];
3451                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3452                                 << RXD_LEN_SHIFT;
3453                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3454                                 RXD_FLAG_JUMBO;
3455                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3456                                (i << RXD_OPAQUE_INDEX_SHIFT));
3457                 }
3458         }
3459
3460         /* Now allocate fresh SKBs for each rx ring. */
3461         for (i = 0; i < tp->rx_pending; i++) {
3462                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3463                                      -1, i) < 0)
3464                         break;
3465         }
3466
3467         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3468                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3469                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3470                                              -1, i) < 0)
3471                                 break;
3472                 }
3473         }
3474 }
3475
3476 /*
3477  * Must not be invoked with interrupt sources disabled and
3478  * the hardware shutdown down.
3479  */
3480 static void tg3_free_consistent(struct tg3 *tp)
3481 {
3482         if (tp->rx_std_buffers) {
3483                 kfree(tp->rx_std_buffers);
3484                 tp->rx_std_buffers = NULL;
3485         }
3486         if (tp->rx_std) {
3487                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3488                                     tp->rx_std, tp->rx_std_mapping);
3489                 tp->rx_std = NULL;
3490         }
3491         if (tp->rx_jumbo) {
3492                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3493                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3494                 tp->rx_jumbo = NULL;
3495         }
3496         if (tp->rx_rcb) {
3497                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3498                                     tp->rx_rcb, tp->rx_rcb_mapping);
3499                 tp->rx_rcb = NULL;
3500         }
3501         if (tp->tx_ring) {
3502                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3503                         tp->tx_ring, tp->tx_desc_mapping);
3504                 tp->tx_ring = NULL;
3505         }
3506         if (tp->hw_status) {
3507                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3508                                     tp->hw_status, tp->status_mapping);
3509                 tp->hw_status = NULL;
3510         }
3511         if (tp->hw_stats) {
3512                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3513                                     tp->hw_stats, tp->stats_mapping);
3514                 tp->hw_stats = NULL;
3515         }
3516 }
3517
3518 /*
3519  * Must not be invoked with interrupt sources disabled and
3520  * the hardware shutdown down.  Can sleep.
3521  */
3522 static int tg3_alloc_consistent(struct tg3 *tp)
3523 {
3524         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3525                                       (TG3_RX_RING_SIZE +
3526                                        TG3_RX_JUMBO_RING_SIZE)) +
3527                                      (sizeof(struct tx_ring_info) *
3528                                       TG3_TX_RING_SIZE),
3529                                      GFP_KERNEL);
3530         if (!tp->rx_std_buffers)
3531                 return -ENOMEM;
3532
3533         memset(tp->rx_std_buffers, 0,
3534                (sizeof(struct ring_info) *
3535                 (TG3_RX_RING_SIZE +
3536                  TG3_RX_JUMBO_RING_SIZE)) +
3537                (sizeof(struct tx_ring_info) *
3538                 TG3_TX_RING_SIZE));
3539
3540         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3541         tp->tx_buffers = (struct tx_ring_info *)
3542                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3543
3544         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3545                                           &tp->rx_std_mapping);
3546         if (!tp->rx_std)
3547                 goto err_out;
3548
3549         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3550                                             &tp->rx_jumbo_mapping);
3551
3552         if (!tp->rx_jumbo)
3553                 goto err_out;
3554
3555         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3556                                           &tp->rx_rcb_mapping);
3557         if (!tp->rx_rcb)
3558                 goto err_out;
3559
3560         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3561                                            &tp->tx_desc_mapping);
3562         if (!tp->tx_ring)
3563                 goto err_out;
3564
3565         tp->hw_status = pci_alloc_consistent(tp->pdev,
3566                                              TG3_HW_STATUS_SIZE,
3567                                              &tp->status_mapping);
3568         if (!tp->hw_status)
3569                 goto err_out;
3570
3571         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3572                                             sizeof(struct tg3_hw_stats),
3573                                             &tp->stats_mapping);
3574         if (!tp->hw_stats)
3575                 goto err_out;
3576
3577         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3578         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3579
3580         return 0;
3581
3582 err_out:
3583         tg3_free_consistent(tp);
3584         return -ENOMEM;
3585 }
3586
3587 #define MAX_WAIT_CNT 1000
3588
3589 /* To stop a block, clear the enable bit and poll till it
3590  * clears.  tp->lock is held.
3591  */
3592 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3593 {
3594         unsigned int i;
3595         u32 val;
3596
3597         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3598                 switch (ofs) {
3599                 case RCVLSC_MODE:
3600                 case DMAC_MODE:
3601                 case MBFREE_MODE:
3602                 case BUFMGR_MODE:
3603                 case MEMARB_MODE:
3604                         /* We can't enable/disable these bits of the
3605                          * 5705/5750, just say success.
3606                          */
3607                         return 0;
3608
3609                 default:
3610                         break;
3611                 };
3612         }
3613
3614         val = tr32(ofs);
3615         val &= ~enable_bit;
3616         tw32_f(ofs, val);
3617
3618         for (i = 0; i < MAX_WAIT_CNT; i++) {
3619                 udelay(100);
3620                 val = tr32(ofs);
3621                 if ((val & enable_bit) == 0)
3622                         break;
3623         }
3624
3625         if (i == MAX_WAIT_CNT) {
3626                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3627                        "ofs=%lx enable_bit=%x\n",
3628                        ofs, enable_bit);
3629                 return -ENODEV;
3630         }
3631
3632         return 0;
3633 }
3634
3635 /* tp->lock is held. */
3636 static int tg3_abort_hw(struct tg3 *tp)
3637 {
3638         int i, err;
3639
3640         tg3_disable_ints(tp);
3641
3642         tp->rx_mode &= ~RX_MODE_ENABLE;
3643         tw32_f(MAC_RX_MODE, tp->rx_mode);
3644         udelay(10);
3645
3646         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3647         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3648         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3649         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3650         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3651         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3652
3653         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3654         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3658         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3659         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3660         if (err)
3661                 goto out;
3662
3663         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3664         tw32_f(MAC_MODE, tp->mac_mode);
3665         udelay(40);
3666
3667         tp->tx_mode &= ~TX_MODE_ENABLE;
3668         tw32_f(MAC_TX_MODE, tp->tx_mode);
3669
3670         for (i = 0; i < MAX_WAIT_CNT; i++) {
3671                 udelay(100);
3672                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3673                         break;
3674         }
3675         if (i >= MAX_WAIT_CNT) {
3676                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3677                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3678                        tp->dev->name, tr32(MAC_TX_MODE));
3679                 return -ENODEV;
3680         }
3681
3682         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3683         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3684         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3685
3686         tw32(FTQ_RESET, 0xffffffff);
3687         tw32(FTQ_RESET, 0x00000000);
3688
3689         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3690         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3691         if (err)
3692                 goto out;
3693
3694         if (tp->hw_status)
3695                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3696         if (tp->hw_stats)
3697                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3698
3699 out:
3700         return err;
3701 }
3702
3703 /* tp->lock is held. */
3704 static int tg3_nvram_lock(struct tg3 *tp)
3705 {
3706         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3707                 int i;
3708
3709                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3710                 for (i = 0; i < 8000; i++) {
3711                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3712                                 break;
3713                         udelay(20);
3714                 }
3715                 if (i == 8000)
3716                         return -ENODEV;
3717         }
3718         return 0;
3719 }
3720
3721 /* tp->lock is held. */
3722 static void tg3_nvram_unlock(struct tg3 *tp)
3723 {
3724         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3725                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3726 }
3727
3728 /* tp->lock is held. */
3729 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3730 {
3731         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3732                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3733                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3734
3735         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3736                 switch (kind) {
3737                 case RESET_KIND_INIT:
3738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3739                                       DRV_STATE_START);
3740                         break;
3741
3742                 case RESET_KIND_SHUTDOWN:
3743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3744                                       DRV_STATE_UNLOAD);
3745                         break;
3746
3747                 case RESET_KIND_SUSPEND:
3748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3749                                       DRV_STATE_SUSPEND);
3750                         break;
3751
3752                 default:
3753                         break;
3754                 };
3755         }
3756 }
3757
3758 /* tp->lock is held. */
3759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3760 {
3761         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3762                 switch (kind) {
3763                 case RESET_KIND_INIT:
3764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3765                                       DRV_STATE_START_DONE);
3766                         break;
3767
3768                 case RESET_KIND_SHUTDOWN:
3769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3770                                       DRV_STATE_UNLOAD_DONE);
3771                         break;
3772
3773                 default:
3774                         break;
3775                 };
3776         }
3777 }
3778
3779 /* tp->lock is held. */
3780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3781 {
3782         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3783                 switch (kind) {
3784                 case RESET_KIND_INIT:
3785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3786                                       DRV_STATE_START);
3787                         break;
3788
3789                 case RESET_KIND_SHUTDOWN:
3790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3791                                       DRV_STATE_UNLOAD);
3792                         break;
3793
3794                 case RESET_KIND_SUSPEND:
3795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3796                                       DRV_STATE_SUSPEND);
3797                         break;
3798
3799                 default:
3800                         break;
3801                 };
3802         }
3803 }
3804
3805 static void tg3_stop_fw(struct tg3 *);
3806
3807 /* tp->lock is held. */
3808 static int tg3_chip_reset(struct tg3 *tp)
3809 {
3810         u32 val;
3811         u32 flags_save;
3812         int i;
3813
3814         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3815                 tg3_nvram_lock(tp);
3816
3817         /*
3818          * We must avoid the readl() that normally takes place.
3819          * It locks machines, causes machine checks, and other
3820          * fun things.  So, temporarily disable the 5701
3821          * hardware workaround, while we do the reset.
3822          */
3823         flags_save = tp->tg3_flags;
3824         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3825
3826         /* do the reset */
3827         val = GRC_MISC_CFG_CORECLK_RESET;
3828
3829         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3830                 if (tr32(0x7e2c) == 0x60) {
3831                         tw32(0x7e2c, 0x20);
3832                 }
3833                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3834                         tw32(GRC_MISC_CFG, (1 << 29));
3835                         val |= (1 << 29);
3836                 }
3837         }
3838
3839         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3840                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3841         tw32(GRC_MISC_CFG, val);
3842
3843         /* restore 5701 hardware bug workaround flag */
3844         tp->tg3_flags = flags_save;
3845
3846         /* Unfortunately, we have to delay before the PCI read back.
3847          * Some 575X chips even will not respond to a PCI cfg access
3848          * when the reset command is given to the chip.
3849          *
3850          * How do these hardware designers expect things to work
3851          * properly if the PCI write is posted for a long period
3852          * of time?  It is always necessary to have some method by
3853          * which a register read back can occur to push the write
3854          * out which does the reset.
3855          *
3856          * For most tg3 variants the trick below was working.
3857          * Ho hum...
3858          */
3859         udelay(120);
3860
3861         /* Flush PCI posted writes.  The normal MMIO registers
3862          * are inaccessible at this time so this is the only
3863          * way to make this reliably (actually, this is no longer
3864          * the case, see above).  I tried to use indirect
3865          * register read/write but this upset some 5701 variants.
3866          */
3867         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3868
3869         udelay(120);
3870
3871         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3872                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3873                         int i;
3874                         u32 cfg_val;
3875
3876                         /* Wait for link training to complete.  */
3877                         for (i = 0; i < 5000; i++)
3878                                 udelay(100);
3879
3880                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3881                         pci_write_config_dword(tp->pdev, 0xc4,
3882                                                cfg_val | (1 << 15));
3883                 }
3884                 /* Set PCIE max payload size and clear error status.  */
3885                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3886         }
3887
3888         /* Re-enable indirect register accesses. */
3889         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3890                                tp->misc_host_ctrl);
3891
3892         /* Set MAX PCI retry to zero. */
3893         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3894         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3895             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3896                 val |= PCISTATE_RETRY_SAME_DMA;
3897         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3898
3899         pci_restore_state(tp->pdev);
3900
3901         /* Make sure PCI-X relaxed ordering bit is clear. */
3902         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3903         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3904         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3905
3906         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3907
3908         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3909                 tg3_stop_fw(tp);
3910                 tw32(0x5000, 0x400);
3911         }
3912
3913         tw32(GRC_MODE, tp->grc_mode);
3914
3915         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3916                 u32 val = tr32(0xc4);
3917
3918                 tw32(0xc4, val | (1 << 15));
3919         }
3920
3921         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3923                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3924                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3925                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3926                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3927         }
3928
3929         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3930                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3931                 tw32_f(MAC_MODE, tp->mac_mode);
3932         } else
3933                 tw32_f(MAC_MODE, 0);
3934         udelay(40);
3935
3936         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3937                 /* Wait for firmware initialization to complete. */
3938                 for (i = 0; i < 100000; i++) {
3939                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3940                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3941                                 break;
3942                         udelay(10);
3943                 }
3944                 if (i >= 100000) {
3945                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3946                                "firmware will not restart magic=%08x\n",
3947                                tp->dev->name, val);
3948                         return -ENODEV;
3949                 }
3950         }
3951
3952         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3953             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3954                 u32 val = tr32(0x7c00);
3955
3956                 tw32(0x7c00, val | (1 << 25));
3957         }
3958
3959         /* Reprobe ASF enable state.  */
3960         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3961         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3962         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3963         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3964                 u32 nic_cfg;
3965
3966                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3967                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3968                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3969                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
3970                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3971                 }
3972         }
3973
3974         return 0;
3975 }
3976
3977 /* tp->lock is held. */
3978 static void tg3_stop_fw(struct tg3 *tp)
3979 {
3980         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3981                 u32 val;
3982                 int i;
3983
3984                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3985                 val = tr32(GRC_RX_CPU_EVENT);
3986                 val |= (1 << 14);
3987                 tw32(GRC_RX_CPU_EVENT, val);
3988
3989                 /* Wait for RX cpu to ACK the event.  */
3990                 for (i = 0; i < 100; i++) {
3991                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3992                                 break;
3993                         udelay(1);
3994                 }
3995         }
3996 }
3997
3998 /* tp->lock is held. */
3999 static int tg3_halt(struct tg3 *tp)
4000 {
4001         int err;
4002
4003         tg3_stop_fw(tp);
4004
4005         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4006
4007         tg3_abort_hw(tp);
4008         err = tg3_chip_reset(tp);
4009
4010         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4011         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4012
4013         if (err)
4014                 return err;
4015
4016         return 0;
4017 }
4018
4019 #define TG3_FW_RELEASE_MAJOR    0x0
4020 #define TG3_FW_RELASE_MINOR     0x0
4021 #define TG3_FW_RELEASE_FIX      0x0
4022 #define TG3_FW_START_ADDR       0x08000000
4023 #define TG3_FW_TEXT_ADDR        0x08000000
4024 #define TG3_FW_TEXT_LEN         0x9c0
4025 #define TG3_FW_RODATA_ADDR      0x080009c0
4026 #define TG3_FW_RODATA_LEN       0x60
4027 #define TG3_FW_DATA_ADDR        0x08000a40
4028 #define TG3_FW_DATA_LEN         0x20
4029 #define TG3_FW_SBSS_ADDR        0x08000a60
4030 #define TG3_FW_SBSS_LEN         0xc
4031 #define TG3_FW_BSS_ADDR         0x08000a70
4032 #define TG3_FW_BSS_LEN          0x10
4033
4034 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4035         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4036         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4037         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4038         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4039         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4040         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4041         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4042         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4043         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4044         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4045         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4046         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4047         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4048         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4049         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4050         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4051         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4052         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4053         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4054         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4055         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4056         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4057         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4058         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4059         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4060         0, 0, 0, 0, 0, 0,
4061         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4062         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4063         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4065         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4066         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4067         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4068         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4072         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4073         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4074         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4075         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4076         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4077         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4078         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4079         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4080         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4081         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4082         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4083         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4084         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4085         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4086         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4087         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4088         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4089         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4090         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4091         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4092         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4093         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4094         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4095         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4096         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4097         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4098         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4099         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4100         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4101         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4102         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4103         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4104         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4105         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4106         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4107         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4108         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4109         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4110         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4111         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4112         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4113         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4114         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4115         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4116         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4117         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4118         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4119         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4120         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4121         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4122         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4123         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4124         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4125         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4126 };
4127
4128 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4129         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4130         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4131         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4132         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4133         0x00000000
4134 };
4135
4136 #if 0 /* All zeros, don't eat up space with it. */
4137 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4139         0x00000000, 0x00000000, 0x00000000, 0x00000000
4140 };
4141 #endif
4142
4143 #define RX_CPU_SCRATCH_BASE     0x30000
4144 #define RX_CPU_SCRATCH_SIZE     0x04000
4145 #define TX_CPU_SCRATCH_BASE     0x34000
4146 #define TX_CPU_SCRATCH_SIZE     0x04000
4147
4148 /* tp->lock is held. */
4149 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4150 {
4151         int i;
4152
4153         if (offset == TX_CPU_BASE &&
4154             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4155                 BUG();
4156
4157         if (offset == RX_CPU_BASE) {
4158                 for (i = 0; i < 10000; i++) {
4159                         tw32(offset + CPU_STATE, 0xffffffff);
4160                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4161                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4162                                 break;
4163                 }
4164
4165                 tw32(offset + CPU_STATE, 0xffffffff);
4166                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4167                 udelay(10);
4168         } else {
4169                 for (i = 0; i < 10000; i++) {
4170                         tw32(offset + CPU_STATE, 0xffffffff);
4171                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4172                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4173                                 break;
4174                 }
4175         }
4176
4177         if (i >= 10000) {
4178                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4179                        "and %s CPU\n",
4180                        tp->dev->name,
4181                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4182                 return -ENODEV;
4183         }
4184         return 0;
4185 }
4186
4187 struct fw_info {
4188         unsigned int text_base;
4189         unsigned int text_len;
4190         u32 *text_data;
4191         unsigned int rodata_base;
4192         unsigned int rodata_len;
4193         u32 *rodata_data;
4194         unsigned int data_base;
4195         unsigned int data_len;
4196         u32 *data_data;
4197 };
4198
4199 /* tp->lock is held. */
4200 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4201                                  int cpu_scratch_size, struct fw_info *info)
4202 {
4203         int err, i;
4204         u32 orig_tg3_flags = tp->tg3_flags;
4205         void (*write_op)(struct tg3 *, u32, u32);
4206
4207         if (cpu_base == TX_CPU_BASE &&
4208             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4209                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4210                        "TX cpu firmware on %s which is 5705.\n",
4211                        tp->dev->name);
4212                 return -EINVAL;
4213         }
4214
4215         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4216                 write_op = tg3_write_mem;
4217         else
4218                 write_op = tg3_write_indirect_reg32;
4219
4220         /* Force use of PCI config space for indirect register
4221          * write calls.
4222          */
4223         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4224
4225         err = tg3_halt_cpu(tp, cpu_base);
4226         if (err)
4227                 goto out;
4228
4229         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4230                 write_op(tp, cpu_scratch_base + i, 0);
4231         tw32(cpu_base + CPU_STATE, 0xffffffff);
4232         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4233         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4234                 write_op(tp, (cpu_scratch_base +
4235                               (info->text_base & 0xffff) +
4236                               (i * sizeof(u32))),
4237                          (info->text_data ?
4238                           info->text_data[i] : 0));
4239         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4240                 write_op(tp, (cpu_scratch_base +
4241                               (info->rodata_base & 0xffff) +
4242                               (i * sizeof(u32))),
4243                          (info->rodata_data ?
4244                           info->rodata_data[i] : 0));
4245         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4246                 write_op(tp, (cpu_scratch_base +
4247                               (info->data_base & 0xffff) +
4248                               (i * sizeof(u32))),
4249                          (info->data_data ?
4250                           info->data_data[i] : 0));
4251
4252         err = 0;
4253
4254 out:
4255         tp->tg3_flags = orig_tg3_flags;
4256         return err;
4257 }
4258
4259 /* tp->lock is held. */
4260 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4261 {
4262         struct fw_info info;
4263         int err, i;
4264
4265         info.text_base = TG3_FW_TEXT_ADDR;
4266         info.text_len = TG3_FW_TEXT_LEN;
4267         info.text_data = &tg3FwText[0];
4268         info.rodata_base = TG3_FW_RODATA_ADDR;
4269         info.rodata_len = TG3_FW_RODATA_LEN;
4270         info.rodata_data = &tg3FwRodata[0];
4271         info.data_base = TG3_FW_DATA_ADDR;
4272         info.data_len = TG3_FW_DATA_LEN;
4273         info.data_data = NULL;
4274
4275         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4276                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4277                                     &info);
4278         if (err)
4279                 return err;
4280
4281         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4282                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4283                                     &info);
4284         if (err)
4285                 return err;
4286
4287         /* Now startup only the RX cpu. */
4288         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4289         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4290
4291         for (i = 0; i < 5; i++) {
4292                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4293                         break;
4294                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4295                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4296                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4297                 udelay(1000);
4298         }
4299         if (i >= 5) {
4300                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4301                        "to set RX CPU PC, is %08x should be %08x\n",
4302                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4303                        TG3_FW_TEXT_ADDR);
4304                 return -ENODEV;
4305         }
4306         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4307         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4308
4309         return 0;
4310 }
4311
4312 #if TG3_TSO_SUPPORT != 0
4313
4314 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4315 #define TG3_TSO_FW_RELASE_MINOR         0x6
4316 #define TG3_TSO_FW_RELEASE_FIX          0x0
4317 #define TG3_TSO_FW_START_ADDR           0x08000000
4318 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4319 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4320 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4321 #define TG3_TSO_FW_RODATA_LEN           0x60
4322 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4323 #define TG3_TSO_FW_DATA_LEN             0x30
4324 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4325 #define TG3_TSO_FW_SBSS_LEN             0x2c
4326 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4327 #define TG3_TSO_FW_BSS_LEN              0x894
4328
4329 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4330         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4331         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4332         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4333         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4334         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4335         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4336         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4337         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4338         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4339         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4340         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4341         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4342         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4343         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4344         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4345         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4346         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4347         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4348         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4349         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4350         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4351         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4352         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4353         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4354         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4355         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4356         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4357         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4358         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4359         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4360         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4361         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4362         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4363         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4364         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4365         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4366         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4367         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4368         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4369         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4370         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4371         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4372         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4373         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4374         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4375         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4376         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4377         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4378         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4379         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4380         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4381         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4382         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4383         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4384         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4385         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4386         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4387         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4388         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4389         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4390         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4391         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4392         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4393         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4394         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4395         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4396         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4397         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4398         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4399         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4400         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4401         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4402         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4403         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4404         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4405         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4406         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4407         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4408         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4409         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4410         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4411         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4412         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4413         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4414         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4415         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4416         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4417         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4418         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4419         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4420         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4421         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4422         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4423         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4424         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4425         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4426         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4427         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4428         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4429         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4430         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4431         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4432         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4433         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4434         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4435         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4436         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4437         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4438         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4439         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4440         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4441         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4442         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4443         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4444         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4445         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4446         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4447         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4448         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4449         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4450         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4451         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4452         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4453         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4454         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4455         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4456         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4457         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4458         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4459         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4460         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4461         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4462         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4463         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4464         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4465         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4466         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4467         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4468         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4469         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4470         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4471         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4472         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4473         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4474         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4475         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4476         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4477         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4478         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4479         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4480         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4481         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4482         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4483         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4484         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4485         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4486         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4487         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4488         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4489         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4490         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4491         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4492         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4493         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4494         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4495         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4496         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4497         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4498         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4499         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4500         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4501         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4502         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4503         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4504         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4505         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4506         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4507         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4508         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4509         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4510         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4511         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4512         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4513         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4514         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4515         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4516         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4517         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4518         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4519         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4520         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4521         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4522         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4523         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4524         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4525         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4526         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4527         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4528         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4529         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4530         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4531         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4532         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4533         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4534         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4535         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4536         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4537         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4538         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4539         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4540         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4541         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4542         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4543         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4544         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4545         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4546         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4547         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4548         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4549         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4550         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4551         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4552         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4553         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4554         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4555         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4556         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4557         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4558         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4559         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4560         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4561         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4562         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4563         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4564         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4565         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4566         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4567         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4568         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4569         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4570         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4571         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4572         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4573         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4574         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4575         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4576         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4577         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4578         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4579         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4580         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4581         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4582         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4583         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4584         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4585         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4586         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4587         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4588         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4589         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4590         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4591         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4592         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4593         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4594         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4595         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4596         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4597         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4598         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4599         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4600         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4601         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4602         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4603         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4604         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4605         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4606         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4607         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4608         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4609         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4610         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4611         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4612         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4613         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4614 };
4615
4616 static u32 tg3TsoFwRodata[] = {
4617         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4618         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4619         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4620         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4621         0x00000000,
4622 };
4623
4624 static u32 tg3TsoFwData[] = {
4625         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4626         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4627         0x00000000,
4628 };
4629
4630 /* 5705 needs a special version of the TSO firmware.  */
4631 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4632 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4633 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4634 #define TG3_TSO5_FW_START_ADDR          0x00010000
4635 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4636 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4637 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4638 #define TG3_TSO5_FW_RODATA_LEN          0x50
4639 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4640 #define TG3_TSO5_FW_DATA_LEN            0x20
4641 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4642 #define TG3_TSO5_FW_SBSS_LEN            0x28
4643 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4644 #define TG3_TSO5_FW_BSS_LEN             0x88
4645
4646 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4647         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4648         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4649         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4650         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4651         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4652         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4653         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4654         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4655         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4656         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4657         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4658         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4659         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4660         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4661         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4662         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4663         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4664         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4665         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4666         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4667         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4668         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4669         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4670         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4671         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4672         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4673         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4674         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4675         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4676         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4677         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4678         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4679         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4680         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4681         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4682         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4683         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4684         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4685         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4686         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4687         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4688         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4689         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4690         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4691         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4692         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4693         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4694         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4695         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4696         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4697         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4698         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4699         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4700         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4701         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4702         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4703         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4704         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4705         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4706         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4707         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4708         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4709         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4710         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4711         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4712         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4713         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4714         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4715         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4716         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4717         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4718         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4719         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4720         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4721         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4722         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4723         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4724         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4725         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4726         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4727         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4728         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4729         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4730         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4731         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4732         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4733         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4734         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4735         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4736         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4737         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4738         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4739         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4740         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4741         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4742         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4743         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4744         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4745         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4746         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4747         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4748         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4749         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4750         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4751         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4752         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4753         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4754         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4755         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4756         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4757         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4758         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4759         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4760         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4761         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4762         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4763         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4764         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4765         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4766         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4767         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4768         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4769         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4770         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4771         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4772         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4773         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4774         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4775         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4776         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4777         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4778         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4779         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4780         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4781         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4782         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4783         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4784         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4785         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4786         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4787         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4788         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4789         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4790         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4791         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4792         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4793         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4794         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4795         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4796         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4797         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4798         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4799         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4800         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4801         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4802         0x00000000, 0x00000000, 0x00000000,
4803 };
4804
4805 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4806         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4807         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4808         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4809         0x00000000, 0x00000000, 0x00000000,
4810 };
4811
4812 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4813         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4814         0x00000000, 0x00000000, 0x00000000,
4815 };
4816
4817 /* tp->lock is held. */
4818 static int tg3_load_tso_firmware(struct tg3 *tp)
4819 {
4820         struct fw_info info;
4821         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4822         int err, i;
4823
4824         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4825                 return 0;
4826
4827         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4828                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4829                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4830                 info.text_data = &tg3Tso5FwText[0];
4831                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4832                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4833                 info.rodata_data = &tg3Tso5FwRodata[0];
4834                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4835                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4836                 info.data_data = &tg3Tso5FwData[0];
4837                 cpu_base = RX_CPU_BASE;
4838                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4839                 cpu_scratch_size = (info.text_len +
4840                                     info.rodata_len +
4841                                     info.data_len +
4842                                     TG3_TSO5_FW_SBSS_LEN +
4843                                     TG3_TSO5_FW_BSS_LEN);
4844         } else {
4845                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4846                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4847                 info.text_data = &tg3TsoFwText[0];
4848                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4849                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4850                 info.rodata_data = &tg3TsoFwRodata[0];
4851                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4852                 info.data_len = TG3_TSO_FW_DATA_LEN;
4853                 info.data_data = &tg3TsoFwData[0];
4854                 cpu_base = TX_CPU_BASE;
4855                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4856                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4857         }
4858
4859         err = tg3_load_firmware_cpu(tp, cpu_base,
4860                                     cpu_scratch_base, cpu_scratch_size,
4861                                     &info);
4862         if (err)
4863                 return err;
4864
4865         /* Now startup the cpu. */
4866         tw32(cpu_base + CPU_STATE, 0xffffffff);
4867         tw32_f(cpu_base + CPU_PC,    info.text_base);
4868
4869         for (i = 0; i < 5; i++) {
4870                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4871                         break;
4872                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4873                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4874                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4875                 udelay(1000);
4876         }
4877         if (i >= 5) {
4878                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4879                        "to set CPU PC, is %08x should be %08x\n",
4880                        tp->dev->name, tr32(cpu_base + CPU_PC),
4881                        info.text_base);
4882                 return -ENODEV;
4883         }
4884         tw32(cpu_base + CPU_STATE, 0xffffffff);
4885         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4886         return 0;
4887 }
4888
4889 #endif /* TG3_TSO_SUPPORT != 0 */
4890
4891 /* tp->lock is held. */
4892 static void __tg3_set_mac_addr(struct tg3 *tp)
4893 {
4894         u32 addr_high, addr_low;
4895         int i;
4896
4897         addr_high = ((tp->dev->dev_addr[0] << 8) |
4898                      tp->dev->dev_addr[1]);
4899         addr_low = ((tp->dev->dev_addr[2] << 24) |
4900                     (tp->dev->dev_addr[3] << 16) |
4901                     (tp->dev->dev_addr[4] <<  8) |
4902                     (tp->dev->dev_addr[5] <<  0));
4903         for (i = 0; i < 4; i++) {
4904                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4905                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4906         }
4907
4908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4910                 for (i = 0; i < 12; i++) {
4911                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4912                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4913                 }
4914         }
4915
4916         addr_high = (tp->dev->dev_addr[0] +
4917                      tp->dev->dev_addr[1] +
4918                      tp->dev->dev_addr[2] +
4919                      tp->dev->dev_addr[3] +
4920                      tp->dev->dev_addr[4] +
4921                      tp->dev->dev_addr[5]) &
4922                 TX_BACKOFF_SEED_MASK;
4923         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4924 }
4925
4926 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4927 {
4928         struct tg3 *tp = netdev_priv(dev);
4929         struct sockaddr *addr = p;
4930
4931         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4932
4933         spin_lock_irq(&tp->lock);
4934         __tg3_set_mac_addr(tp);
4935         spin_unlock_irq(&tp->lock);
4936
4937         return 0;
4938 }
4939
4940 /* tp->lock is held. */
4941 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4942                            dma_addr_t mapping, u32 maxlen_flags,
4943                            u32 nic_addr)
4944 {
4945         tg3_write_mem(tp,
4946                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4947                       ((u64) mapping >> 32));
4948         tg3_write_mem(tp,
4949                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4950                       ((u64) mapping & 0xffffffff));
4951         tg3_write_mem(tp,
4952                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4953                        maxlen_flags);
4954
4955         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4956                 tg3_write_mem(tp,
4957                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4958                               nic_addr);
4959 }
4960
4961 static void __tg3_set_rx_mode(struct net_device *);
4962
4963 /* tp->lock is held. */
4964 static int tg3_reset_hw(struct tg3 *tp)
4965 {
4966         u32 val, rdmac_mode;
4967         int i, err, limit;
4968
4969         tg3_disable_ints(tp);
4970
4971         tg3_stop_fw(tp);
4972
4973         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4974
4975         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4976                 err = tg3_abort_hw(tp);
4977                 if (err)
4978                         return err;
4979         }
4980
4981         err = tg3_chip_reset(tp);
4982         if (err)
4983                 return err;
4984
4985         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4986
4987         /* This works around an issue with Athlon chipsets on
4988          * B3 tigon3 silicon.  This bit has no effect on any
4989          * other revision.  But do not set this on PCI Express
4990          * chips.
4991          */
4992         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4993                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4994         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4995
4996         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4997             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4998                 val = tr32(TG3PCI_PCISTATE);
4999                 val |= PCISTATE_RETRY_SAME_DMA;
5000                 tw32(TG3PCI_PCISTATE, val);
5001         }
5002
5003         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5004                 /* Enable some hw fixes.  */
5005                 val = tr32(TG3PCI_MSI_DATA);
5006                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5007                 tw32(TG3PCI_MSI_DATA, val);
5008         }
5009
5010         /* Descriptor ring init may make accesses to the
5011          * NIC SRAM area to setup the TX descriptors, so we
5012          * can only do this after the hardware has been
5013          * successfully reset.
5014          */
5015         tg3_init_rings(tp);
5016
5017         /* This value is determined during the probe time DMA
5018          * engine test, tg3_test_dma.
5019          */
5020         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5021
5022         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5023                           GRC_MODE_4X_NIC_SEND_RINGS |
5024                           GRC_MODE_NO_TX_PHDR_CSUM |
5025                           GRC_MODE_NO_RX_PHDR_CSUM);
5026         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5027         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5028                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5029         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5030                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5031
5032         tw32(GRC_MODE,
5033              tp->grc_mode |
5034              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5035
5036         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5037         val = tr32(GRC_MISC_CFG);
5038         val &= ~0xff;
5039         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5040         tw32(GRC_MISC_CFG, val);
5041
5042         /* Initialize MBUF/DESC pool. */
5043         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5044                 /* Do nothing.  */
5045         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5046                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5047                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5048                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5049                 else
5050                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5051                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5052                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5053         }
5054 #if TG3_TSO_SUPPORT != 0
5055         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5056                 int fw_len;
5057
5058                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5059                           TG3_TSO5_FW_RODATA_LEN +
5060                           TG3_TSO5_FW_DATA_LEN +
5061                           TG3_TSO5_FW_SBSS_LEN +
5062                           TG3_TSO5_FW_BSS_LEN);
5063                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5064                 tw32(BUFMGR_MB_POOL_ADDR,
5065                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5066                 tw32(BUFMGR_MB_POOL_SIZE,
5067                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5068         }
5069 #endif
5070
5071         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5072                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5073                      tp->bufmgr_config.mbuf_read_dma_low_water);
5074                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5075                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5076                 tw32(BUFMGR_MB_HIGH_WATER,
5077                      tp->bufmgr_config.mbuf_high_water);
5078         } else {
5079                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5080                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5081                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5082                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5083                 tw32(BUFMGR_MB_HIGH_WATER,
5084                      tp->bufmgr_config.mbuf_high_water_jumbo);
5085         }
5086         tw32(BUFMGR_DMA_LOW_WATER,
5087              tp->bufmgr_config.dma_low_water);
5088         tw32(BUFMGR_DMA_HIGH_WATER,
5089              tp->bufmgr_config.dma_high_water);
5090
5091         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5092         for (i = 0; i < 2000; i++) {
5093                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5094                         break;
5095                 udelay(10);
5096         }
5097         if (i >= 2000) {
5098                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5099                        tp->dev->name);
5100                 return -ENODEV;
5101         }
5102
5103         /* Setup replenish threshold. */
5104         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5105
5106         /* Initialize TG3_BDINFO's at:
5107          *  RCVDBDI_STD_BD:     standard eth size rx ring
5108          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5109          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5110          *
5111          * like so:
5112          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5113          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5114          *                              ring attribute flags
5115          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5116          *
5117          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5118          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5119          *
5120          * The size of each ring is fixed in the firmware, but the location is
5121          * configurable.
5122          */
5123         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5124              ((u64) tp->rx_std_mapping >> 32));
5125         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5126              ((u64) tp->rx_std_mapping & 0xffffffff));
5127         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5128              NIC_SRAM_RX_BUFFER_DESC);
5129
5130         /* Don't even try to program the JUMBO/MINI buffer descriptor
5131          * configs on 5705.
5132          */
5133         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5134                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5135                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5136         } else {
5137                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5138                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5139
5140                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5141                      BDINFO_FLAGS_DISABLED);
5142
5143                 /* Setup replenish threshold. */
5144                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5145
5146                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5147                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5148                              ((u64) tp->rx_jumbo_mapping >> 32));
5149                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5150                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5151                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5152                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5153                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5154                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5155                 } else {
5156                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5157                              BDINFO_FLAGS_DISABLED);
5158                 }
5159
5160         }
5161
5162         /* There is only one send ring on 5705/5750, no need to explicitly
5163          * disable the others.
5164          */
5165         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5166                 /* Clear out send RCB ring in SRAM. */
5167                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5168                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5169                                       BDINFO_FLAGS_DISABLED);
5170         }
5171
5172         tp->tx_prod = 0;
5173         tp->tx_cons = 0;
5174         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5175         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5176
5177         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5178                        tp->tx_desc_mapping,
5179                        (TG3_TX_RING_SIZE <<
5180                         BDINFO_FLAGS_MAXLEN_SHIFT),
5181                        NIC_SRAM_TX_BUFFER_DESC);
5182
5183         /* There is only one receive return ring on 5705/5750, no need
5184          * to explicitly disable the others.
5185          */
5186         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5187                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5188                      i += TG3_BDINFO_SIZE) {
5189                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5190                                       BDINFO_FLAGS_DISABLED);
5191                 }
5192         }
5193
5194         tp->rx_rcb_ptr = 0;
5195         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5196
5197         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5198                        tp->rx_rcb_mapping,
5199                        (TG3_RX_RCB_RING_SIZE(tp) <<
5200                         BDINFO_FLAGS_MAXLEN_SHIFT),
5201                        0);
5202
5203         tp->rx_std_ptr = tp->rx_pending;
5204         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5205                      tp->rx_std_ptr);
5206
5207         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5208                                                 tp->rx_jumbo_pending : 0;
5209         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5210                      tp->rx_jumbo_ptr);
5211
5212         /* Initialize MAC address and backoff seed. */
5213         __tg3_set_mac_addr(tp);
5214
5215         /* MTU + ethernet header + FCS + optional VLAN tag */
5216         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5217
5218         /* The slot time is changed by tg3_setup_phy if we
5219          * run at gigabit with half duplex.
5220          */
5221         tw32(MAC_TX_LENGTHS,
5222              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5223              (6 << TX_LENGTHS_IPG_SHIFT) |
5224              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5225
5226         /* Receive rules. */
5227         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5228         tw32(RCVLPC_CONFIG, 0x0181);
5229
5230         /* Calculate RDMAC_MODE setting early, we need it to determine
5231          * the RCVLPC_STATE_ENABLE mask.
5232          */
5233         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5234                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5235                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5236                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5237                       RDMAC_MODE_LNGREAD_ENAB);
5238         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5239                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5240         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5241              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
5242                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5243                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5244                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5245                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5246                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5247                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5248                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5249                 }
5250         }
5251
5252 #if TG3_TSO_SUPPORT != 0
5253         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5254                 rdmac_mode |= (1 << 27);
5255 #endif
5256
5257         /* Receive/send statistics. */
5258         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5259             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5260                 val = tr32(RCVLPC_STATS_ENABLE);
5261                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5262                 tw32(RCVLPC_STATS_ENABLE, val);
5263         } else {
5264                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5265         }
5266         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5267         tw32(SNDDATAI_STATSENAB, 0xffffff);
5268         tw32(SNDDATAI_STATSCTRL,
5269              (SNDDATAI_SCTRL_ENABLE |
5270               SNDDATAI_SCTRL_FASTUPD));
5271
5272         /* Setup host coalescing engine. */
5273         tw32(HOSTCC_MODE, 0);
5274         for (i = 0; i < 2000; i++) {
5275                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5276                         break;
5277                 udelay(10);
5278         }
5279
5280         tw32(HOSTCC_RXCOL_TICKS, 0);
5281         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5282         tw32(HOSTCC_RXMAX_FRAMES, 1);
5283         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5284         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5285                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5286                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5287         }
5288         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5289         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5290
5291         /* set status block DMA address */
5292         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5293              ((u64) tp->status_mapping >> 32));
5294         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5295              ((u64) tp->status_mapping & 0xffffffff));
5296
5297         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5298                 /* Status/statistics block address.  See tg3_timer,
5299                  * the tg3_periodic_fetch_stats call there, and
5300                  * tg3_get_stats to see how this works for 5705/5750 chips.
5301                  */
5302                 tw32(HOSTCC_STAT_COAL_TICKS,
5303                      DEFAULT_STAT_COAL_TICKS);
5304                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5305                      ((u64) tp->stats_mapping >> 32));
5306                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5307                      ((u64) tp->stats_mapping & 0xffffffff));
5308                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5309                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5310         }
5311
5312         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5313
5314         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5315         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5316         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5317                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5318
5319         /* Clear statistics/status block in chip, and status block in ram. */
5320         for (i = NIC_SRAM_STATS_BLK;
5321              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5322              i += sizeof(u32)) {
5323                 tg3_write_mem(tp, i, 0);
5324                 udelay(40);
5325         }
5326         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5327
5328         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5329                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5330         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5331         udelay(40);
5332
5333         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5335                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5336                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5337         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5338         udelay(100);
5339
5340         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5341         tr32(MAILBOX_INTERRUPT_0);
5342
5343         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5344                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5345                 udelay(40);
5346         }
5347
5348         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5349                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5350                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5351                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5352                WDMAC_MODE_LNGREAD_ENAB);
5353
5354         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5355              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
5356                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5357                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5358                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5359                         /* nothing */
5360                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5361                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5362                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5363                         val |= WDMAC_MODE_RX_ACCEL;
5364                 }
5365         }
5366
5367         tw32_f(WDMAC_MODE, val);
5368         udelay(40);
5369
5370         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5371                 val = tr32(TG3PCI_X_CAPS);
5372                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5373                         val &= ~PCIX_CAPS_BURST_MASK;
5374                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5375                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5376                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5377                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5378                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5379                                 val |= (tp->split_mode_max_reqs <<
5380                                         PCIX_CAPS_SPLIT_SHIFT);
5381                 }
5382                 tw32(TG3PCI_X_CAPS, val);
5383         }
5384
5385         tw32_f(RDMAC_MODE, rdmac_mode);
5386         udelay(40);
5387
5388         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5389         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5390                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5391         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5392         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5393         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5394         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5395         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5396 #if TG3_TSO_SUPPORT != 0
5397         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5398                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5399 #endif
5400         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5401         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5402
5403         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5404                 err = tg3_load_5701_a0_firmware_fix(tp);
5405                 if (err)
5406                         return err;
5407         }
5408
5409 #if TG3_TSO_SUPPORT != 0
5410         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5411                 err = tg3_load_tso_firmware(tp);
5412                 if (err)
5413                         return err;
5414         }
5415 #endif
5416
5417         tp->tx_mode = TX_MODE_ENABLE;
5418         tw32_f(MAC_TX_MODE, tp->tx_mode);
5419         udelay(100);
5420
5421         tp->rx_mode = RX_MODE_ENABLE;
5422         tw32_f(MAC_RX_MODE, tp->rx_mode);
5423         udelay(10);
5424
5425         if (tp->link_config.phy_is_low_power) {
5426                 tp->link_config.phy_is_low_power = 0;
5427                 tp->link_config.speed = tp->link_config.orig_speed;
5428                 tp->link_config.duplex = tp->link_config.orig_duplex;
5429                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5430         }
5431
5432         tp->mi_mode = MAC_MI_MODE_BASE;
5433         tw32_f(MAC_MI_MODE, tp->mi_mode);
5434         udelay(80);
5435
5436         tw32(MAC_LED_CTRL, tp->led_ctrl);
5437
5438         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5439         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5440                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5441                 udelay(10);
5442         }
5443         tw32_f(MAC_RX_MODE, tp->rx_mode);
5444         udelay(10);
5445
5446         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5447                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5448                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5449                         /* Set drive transmission level to 1.2V  */
5450                         /* only if the signal pre-emphasis bit is not set  */
5451                         val = tr32(MAC_SERDES_CFG);
5452                         val &= 0xfffff000;
5453                         val |= 0x880;
5454                         tw32(MAC_SERDES_CFG, val);
5455                 }
5456                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5457                         tw32(MAC_SERDES_CFG, 0x616000);
5458         }
5459
5460         /* Prevent chip from dropping frames when flow control
5461          * is enabled.
5462          */
5463         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5464
5465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5466             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5467                 /* Use hardware link auto-negotiation */
5468                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5469         }
5470
5471         err = tg3_setup_phy(tp, 1);
5472         if (err)
5473                 return err;
5474
5475         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5476                 u32 tmp;
5477
5478                 /* Clear CRC stats. */
5479                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5480                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5481                         tg3_readphy(tp, 0x14, &tmp);
5482                 }
5483         }
5484
5485         __tg3_set_rx_mode(tp->dev);
5486
5487         /* Initialize receive rules. */
5488         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5489         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5490         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5491         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5492
5493         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5494                 limit = 8;
5495         else
5496                 limit = 16;
5497         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5498                 limit -= 4;
5499         switch (limit) {
5500         case 16:
5501                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5502         case 15:
5503                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5504         case 14:
5505                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5506         case 13:
5507                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5508         case 12:
5509                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5510         case 11:
5511                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5512         case 10:
5513                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5514         case 9:
5515                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5516         case 8:
5517                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5518         case 7:
5519                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5520         case 6:
5521                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5522         case 5:
5523                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5524         case 4:
5525                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5526         case 3:
5527                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5528         case 2:
5529         case 1:
5530
5531         default:
5532                 break;
5533         };
5534
5535         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5536
5537         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5538                 tg3_enable_ints(tp);
5539
5540         return 0;
5541 }
5542
5543 /* Called at device open time to get the chip ready for
5544  * packet processing.  Invoked with tp->lock held.
5545  */
5546 static int tg3_init_hw(struct tg3 *tp)
5547 {
5548         int err;
5549
5550         /* Force the chip into D0. */
5551         err = tg3_set_power_state(tp, 0);
5552         if (err)
5553                 goto out;
5554
5555         tg3_switch_clocks(tp);
5556
5557         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5558
5559         err = tg3_reset_hw(tp);
5560
5561 out:
5562         return err;
5563 }
5564
5565 #define TG3_STAT_ADD32(PSTAT, REG) \
5566 do {    u32 __val = tr32(REG); \
5567         (PSTAT)->low += __val; \
5568         if ((PSTAT)->low < __val) \
5569                 (PSTAT)->high += 1; \
5570 } while (0)
5571
5572 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5573 {
5574         struct tg3_hw_stats *sp = tp->hw_stats;
5575
5576         if (!netif_carrier_ok(tp->dev))
5577                 return;
5578
5579         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5580         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5581         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5582         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5583         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5584         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5585         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5586         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5587         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5588         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5589         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5590         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5591         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5592
5593         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5594         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5595         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5596         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5597         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5598         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5599         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5600         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5601         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5602         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5603         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5604         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5605         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5606         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5607 }
5608
5609 static void tg3_timer(unsigned long __opaque)
5610 {
5611         struct tg3 *tp = (struct tg3 *) __opaque;
5612         unsigned long flags;
5613
5614         spin_lock_irqsave(&tp->lock, flags);
5615         spin_lock(&tp->tx_lock);
5616
5617         /* All of this garbage is because when using non-tagged
5618          * IRQ status the mailbox/status_block protocol the chip
5619          * uses with the cpu is race prone.
5620          */
5621         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5622                 tw32(GRC_LOCAL_CTRL,
5623                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5624         } else {
5625                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5626                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5627         }
5628
5629         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5630                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5631                 spin_unlock(&tp->tx_lock);
5632                 spin_unlock_irqrestore(&tp->lock, flags);
5633                 schedule_work(&tp->reset_task);
5634                 return;
5635         }
5636
5637         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5638                 tg3_periodic_fetch_stats(tp);
5639
5640         /* This part only runs once per second. */
5641         if (!--tp->timer_counter) {
5642                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5643                         u32 mac_stat;
5644                         int phy_event;
5645
5646                         mac_stat = tr32(MAC_STATUS);
5647
5648                         phy_event = 0;
5649                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5650                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5651                                         phy_event = 1;
5652                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5653                                 phy_event = 1;
5654
5655                         if (phy_event)
5656                                 tg3_setup_phy(tp, 0);
5657                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5658                         u32 mac_stat = tr32(MAC_STATUS);
5659                         int need_setup = 0;
5660
5661                         if (netif_carrier_ok(tp->dev) &&
5662                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5663                                 need_setup = 1;
5664                         }
5665                         if (! netif_carrier_ok(tp->dev) &&
5666                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5667                                          MAC_STATUS_SIGNAL_DET))) {
5668                                 need_setup = 1;
5669                         }
5670                         if (need_setup) {
5671                                 tw32_f(MAC_MODE,
5672                                      (tp->mac_mode &
5673                                       ~MAC_MODE_PORT_MODE_MASK));
5674                                 udelay(40);
5675                                 tw32_f(MAC_MODE, tp->mac_mode);
5676                                 udelay(40);
5677                                 tg3_setup_phy(tp, 0);
5678                         }
5679                 }
5680
5681                 tp->timer_counter = tp->timer_multiplier;
5682         }
5683
5684         /* Heartbeat is only sent once every 120 seconds.  */
5685         if (!--tp->asf_counter) {
5686                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5687                         u32 val;
5688
5689                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5690                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5691                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5692                         val = tr32(GRC_RX_CPU_EVENT);
5693                         val |= (1 << 14);
5694                         tw32(GRC_RX_CPU_EVENT, val);
5695                 }
5696                 tp->asf_counter = tp->asf_multiplier;
5697         }
5698
5699         spin_unlock(&tp->tx_lock);
5700         spin_unlock_irqrestore(&tp->lock, flags);
5701
5702         tp->timer.expires = jiffies + tp->timer_offset;
5703         add_timer(&tp->timer);
5704 }
5705
5706 static int tg3_open(struct net_device *dev)
5707 {
5708         struct tg3 *tp = netdev_priv(dev);
5709         int err;
5710
5711         spin_lock_irq(&tp->lock);
5712         spin_lock(&tp->tx_lock);
5713
5714         tg3_disable_ints(tp);
5715         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5716
5717         spin_unlock(&tp->tx_lock);
5718         spin_unlock_irq(&tp->lock);
5719
5720         /* The placement of this call is tied
5721          * to the setup and use of Host TX descriptors.
5722          */
5723         err = tg3_alloc_consistent(tp);
5724         if (err)
5725                 return err;
5726
5727         err = request_irq(dev->irq, tg3_interrupt,
5728                           SA_SHIRQ, dev->name, dev);
5729
5730         if (err) {
5731                 tg3_free_consistent(tp);
5732                 return err;
5733         }
5734
5735         spin_lock_irq(&tp->lock);
5736         spin_lock(&tp->tx_lock);
5737
5738         err = tg3_init_hw(tp);
5739         if (err) {
5740                 tg3_halt(tp);
5741                 tg3_free_rings(tp);
5742         } else {
5743                 tp->timer_offset = HZ / 10;
5744                 tp->timer_counter = tp->timer_multiplier = 10;
5745                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5746
5747                 init_timer(&tp->timer);
5748                 tp->timer.expires = jiffies + tp->timer_offset;
5749                 tp->timer.data = (unsigned long) tp;
5750                 tp->timer.function = tg3_timer;
5751                 add_timer(&tp->timer);
5752
5753                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5754         }
5755
5756         spin_unlock(&tp->tx_lock);
5757         spin_unlock_irq(&tp->lock);
5758
5759         if (err) {
5760                 free_irq(dev->irq, dev);
5761                 tg3_free_consistent(tp);
5762                 return err;
5763         }
5764
5765         spin_lock_irq(&tp->lock);
5766         spin_lock(&tp->tx_lock);
5767
5768         tg3_enable_ints(tp);
5769
5770         spin_unlock(&tp->tx_lock);
5771         spin_unlock_irq(&tp->lock);
5772
5773         netif_start_queue(dev);
5774
5775         return 0;
5776 }
5777
5778 #if 0
5779 /*static*/ void tg3_dump_state(struct tg3 *tp)
5780 {
5781         u32 val32, val32_2, val32_3, val32_4, val32_5;
5782         u16 val16;
5783         int i;
5784
5785         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5786         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5787         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5788                val16, val32);
5789
5790         /* MAC block */
5791         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5792                tr32(MAC_MODE), tr32(MAC_STATUS));
5793         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5794                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5795         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5796                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5797         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5798                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5799
5800         /* Send data initiator control block */
5801         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5802                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5803         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5804                tr32(SNDDATAI_STATSCTRL));
5805
5806         /* Send data completion control block */
5807         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5808
5809         /* Send BD ring selector block */
5810         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5811                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5812
5813         /* Send BD initiator control block */
5814         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5815                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5816
5817         /* Send BD completion control block */
5818         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5819
5820         /* Receive list placement control block */
5821         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5822                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5823         printk("       RCVLPC_STATSCTRL[%08x]\n",
5824                tr32(RCVLPC_STATSCTRL));
5825
5826         /* Receive data and receive BD initiator control block */
5827         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5828                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5829
5830         /* Receive data completion control block */
5831         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5832                tr32(RCVDCC_MODE));
5833
5834         /* Receive BD initiator control block */
5835         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5836                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5837
5838         /* Receive BD completion control block */
5839         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5840                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5841
5842         /* Receive list selector control block */
5843         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5844                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5845
5846         /* Mbuf cluster free block */
5847         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5848                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5849
5850         /* Host coalescing control block */
5851         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5852                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5853         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5854                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5855                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5856         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5857                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5858                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5859         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5860                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5861         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5862                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5863
5864         /* Memory arbiter control block */
5865         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5866                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5867
5868         /* Buffer manager control block */
5869         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5870                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5871         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5872                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5873         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5874                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5875                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5876                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5877
5878         /* Read DMA control block */
5879         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5880                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5881
5882         /* Write DMA control block */
5883         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5884                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5885
5886         /* DMA completion block */
5887         printk("DEBUG: DMAC_MODE[%08x]\n",
5888                tr32(DMAC_MODE));
5889
5890         /* GRC block */
5891         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5892                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5893         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5894                tr32(GRC_LOCAL_CTRL));
5895
5896         /* TG3_BDINFOs */
5897         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5898                tr32(RCVDBDI_JUMBO_BD + 0x0),
5899                tr32(RCVDBDI_JUMBO_BD + 0x4),
5900                tr32(RCVDBDI_JUMBO_BD + 0x8),
5901                tr32(RCVDBDI_JUMBO_BD + 0xc));
5902         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5903                tr32(RCVDBDI_STD_BD + 0x0),
5904                tr32(RCVDBDI_STD_BD + 0x4),
5905                tr32(RCVDBDI_STD_BD + 0x8),
5906                tr32(RCVDBDI_STD_BD + 0xc));
5907         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5908                tr32(RCVDBDI_MINI_BD + 0x0),
5909                tr32(RCVDBDI_MINI_BD + 0x4),
5910                tr32(RCVDBDI_MINI_BD + 0x8),
5911                tr32(RCVDBDI_MINI_BD + 0xc));
5912
5913         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5914         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5915         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5916         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5917         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5918                val32, val32_2, val32_3, val32_4);
5919
5920         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5921         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5922         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5923         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5924         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5925                val32, val32_2, val32_3, val32_4);
5926
5927         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5928         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5929         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5930         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5931         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5932         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5933                val32, val32_2, val32_3, val32_4, val32_5);
5934
5935         /* SW status block */
5936         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5937                tp->hw_status->status,
5938                tp->hw_status->status_tag,
5939                tp->hw_status->rx_jumbo_consumer,
5940                tp->hw_status->rx_consumer,
5941                tp->hw_status->rx_mini_consumer,
5942                tp->hw_status->idx[0].rx_producer,
5943                tp->hw_status->idx[0].tx_consumer);
5944
5945         /* SW statistics block */
5946         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5947                ((u32 *)tp->hw_stats)[0],
5948                ((u32 *)tp->hw_stats)[1],
5949                ((u32 *)tp->hw_stats)[2],
5950                ((u32 *)tp->hw_stats)[3]);
5951
5952         /* Mailboxes */
5953         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5954                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5955                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5956                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5957                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5958
5959         /* NIC side send descriptors. */
5960         for (i = 0; i < 6; i++) {
5961                 unsigned long txd;
5962
5963                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5964                         + (i * sizeof(struct tg3_tx_buffer_desc));
5965                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5966                        i,
5967                        readl(txd + 0x0), readl(txd + 0x4),
5968                        readl(txd + 0x8), readl(txd + 0xc));
5969         }
5970
5971         /* NIC side RX descriptors. */
5972         for (i = 0; i < 6; i++) {
5973                 unsigned long rxd;
5974
5975                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5976                         + (i * sizeof(struct tg3_rx_buffer_desc));
5977                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5978                        i,
5979                        readl(rxd + 0x0), readl(rxd + 0x4),
5980                        readl(rxd + 0x8), readl(rxd + 0xc));
5981                 rxd += (4 * sizeof(u32));
5982                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5983                        i,
5984                        readl(rxd + 0x0), readl(rxd + 0x4),
5985                        readl(rxd + 0x8), readl(rxd + 0xc));
5986         }
5987
5988         for (i = 0; i < 6; i++) {
5989                 unsigned long rxd;
5990
5991                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5992                         + (i * sizeof(struct tg3_rx_buffer_desc));
5993                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5994                        i,
5995                        readl(rxd + 0x0), readl(rxd + 0x4),
5996                        readl(rxd + 0x8), readl(rxd + 0xc));
5997                 rxd += (4 * sizeof(u32));
5998                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5999                        i,
6000                        readl(rxd + 0x0), readl(rxd + 0x4),
6001                        readl(rxd + 0x8), readl(rxd + 0xc));
6002         }
6003 }
6004 #endif
6005
6006 static struct net_device_stats *tg3_get_stats(struct net_device *);
6007 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6008
6009 static int tg3_close(struct net_device *dev)
6010 {
6011         struct tg3 *tp = netdev_priv(dev);
6012
6013         netif_stop_queue(dev);
6014
6015         del_timer_sync(&tp->timer);
6016
6017         spin_lock_irq(&tp->lock);
6018         spin_lock(&tp->tx_lock);
6019 #if 0
6020         tg3_dump_state(tp);
6021 #endif
6022
6023         tg3_disable_ints(tp);
6024
6025         tg3_halt(tp);
6026         tg3_free_rings(tp);
6027         tp->tg3_flags &=
6028                 ~(TG3_FLAG_INIT_COMPLETE |
6029                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6030         netif_carrier_off(tp->dev);
6031
6032         spin_unlock(&tp->tx_lock);
6033         spin_unlock_irq(&tp->lock);
6034
6035         free_irq(dev->irq, dev);
6036
6037         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6038                sizeof(tp->net_stats_prev));
6039         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6040                sizeof(tp->estats_prev));
6041
6042         tg3_free_consistent(tp);
6043
6044         return 0;
6045 }
6046
6047 static inline unsigned long get_stat64(tg3_stat64_t *val)
6048 {
6049         unsigned long ret;
6050
6051 #if (BITS_PER_LONG == 32)
6052         ret = val->low;
6053 #else
6054         ret = ((u64)val->high << 32) | ((u64)val->low);
6055 #endif
6056         return ret;
6057 }
6058
6059 static unsigned long calc_crc_errors(struct tg3 *tp)
6060 {
6061         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6062
6063         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6064             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6065              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6066                 unsigned long flags;
6067                 u32 val;
6068
6069                 spin_lock_irqsave(&tp->lock, flags);
6070                 if (!tg3_readphy(tp, 0x1e, &val)) {
6071                         tg3_writephy(tp, 0x1e, val | 0x8000);
6072                         tg3_readphy(tp, 0x14, &val);
6073                 } else
6074                         val = 0;
6075                 spin_unlock_irqrestore(&tp->lock, flags);
6076
6077                 tp->phy_crc_errors += val;
6078
6079                 return tp->phy_crc_errors;
6080         }
6081
6082         return get_stat64(&hw_stats->rx_fcs_errors);
6083 }
6084
6085 #define ESTAT_ADD(member) \
6086         estats->member =        old_estats->member + \
6087                                 get_stat64(&hw_stats->member)
6088
6089 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6090 {
6091         struct tg3_ethtool_stats *estats = &tp->estats;
6092         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6093         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6094
6095         if (!hw_stats)
6096                 return old_estats;
6097
6098         ESTAT_ADD(rx_octets);
6099         ESTAT_ADD(rx_fragments);
6100         ESTAT_ADD(rx_ucast_packets);
6101         ESTAT_ADD(rx_mcast_packets);
6102         ESTAT_ADD(rx_bcast_packets);
6103         ESTAT_ADD(rx_fcs_errors);
6104         ESTAT_ADD(rx_align_errors);
6105         ESTAT_ADD(rx_xon_pause_rcvd);
6106         ESTAT_ADD(rx_xoff_pause_rcvd);
6107         ESTAT_ADD(rx_mac_ctrl_rcvd);
6108         ESTAT_ADD(rx_xoff_entered);
6109         ESTAT_ADD(rx_frame_too_long_errors);
6110         ESTAT_ADD(rx_jabbers);
6111         ESTAT_ADD(rx_undersize_packets);
6112         ESTAT_ADD(rx_in_length_errors);
6113         ESTAT_ADD(rx_out_length_errors);
6114         ESTAT_ADD(rx_64_or_less_octet_packets);
6115         ESTAT_ADD(rx_65_to_127_octet_packets);
6116         ESTAT_ADD(rx_128_to_255_octet_packets);
6117         ESTAT_ADD(rx_256_to_511_octet_packets);
6118         ESTAT_ADD(rx_512_to_1023_octet_packets);
6119         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6120         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6121         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6122         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6123         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6124
6125         ESTAT_ADD(tx_octets);
6126         ESTAT_ADD(tx_collisions);
6127         ESTAT_ADD(tx_xon_sent);
6128         ESTAT_ADD(tx_xoff_sent);
6129         ESTAT_ADD(tx_flow_control);
6130         ESTAT_ADD(tx_mac_errors);
6131         ESTAT_ADD(tx_single_collisions);
6132         ESTAT_ADD(tx_mult_collisions);
6133         ESTAT_ADD(tx_deferred);
6134         ESTAT_ADD(tx_excessive_collisions);
6135         ESTAT_ADD(tx_late_collisions);
6136         ESTAT_ADD(tx_collide_2times);
6137         ESTAT_ADD(tx_collide_3times);
6138         ESTAT_ADD(tx_collide_4times);
6139         ESTAT_ADD(tx_collide_5times);
6140         ESTAT_ADD(tx_collide_6times);
6141         ESTAT_ADD(tx_collide_7times);
6142         ESTAT_ADD(tx_collide_8times);
6143         ESTAT_ADD(tx_collide_9times);
6144         ESTAT_ADD(tx_collide_10times);
6145         ESTAT_ADD(tx_collide_11times);
6146         ESTAT_ADD(tx_collide_12times);
6147         ESTAT_ADD(tx_collide_13times);
6148         ESTAT_ADD(tx_collide_14times);
6149         ESTAT_ADD(tx_collide_15times);
6150         ESTAT_ADD(tx_ucast_packets);
6151         ESTAT_ADD(tx_mcast_packets);
6152         ESTAT_ADD(tx_bcast_packets);
6153         ESTAT_ADD(tx_carrier_sense_errors);
6154         ESTAT_ADD(tx_discards);
6155         ESTAT_ADD(tx_errors);
6156
6157         ESTAT_ADD(dma_writeq_full);
6158         ESTAT_ADD(dma_write_prioq_full);
6159         ESTAT_ADD(rxbds_empty);
6160         ESTAT_ADD(rx_discards);
6161         ESTAT_ADD(rx_errors);
6162         ESTAT_ADD(rx_threshold_hit);
6163
6164         ESTAT_ADD(dma_readq_full);
6165         ESTAT_ADD(dma_read_prioq_full);
6166         ESTAT_ADD(tx_comp_queue_full);
6167
6168         ESTAT_ADD(ring_set_send_prod_index);
6169         ESTAT_ADD(ring_status_update);
6170         ESTAT_ADD(nic_irqs);
6171         ESTAT_ADD(nic_avoided_irqs);
6172         ESTAT_ADD(nic_tx_threshold_hit);
6173
6174         return estats;
6175 }
6176
6177 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6178 {
6179         struct tg3 *tp = netdev_priv(dev);
6180         struct net_device_stats *stats = &tp->net_stats;
6181         struct net_device_stats *old_stats = &tp->net_stats_prev;
6182         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6183
6184         if (!hw_stats)
6185                 return old_stats;
6186
6187         stats->rx_packets = old_stats->rx_packets +
6188                 get_stat64(&hw_stats->rx_ucast_packets) +
6189                 get_stat64(&hw_stats->rx_mcast_packets) +
6190                 get_stat64(&hw_stats->rx_bcast_packets);
6191                 
6192         stats->tx_packets = old_stats->tx_packets +
6193                 get_stat64(&hw_stats->tx_ucast_packets) +
6194                 get_stat64(&hw_stats->tx_mcast_packets) +
6195                 get_stat64(&hw_stats->tx_bcast_packets);
6196
6197         stats->rx_bytes = old_stats->rx_bytes +
6198                 get_stat64(&hw_stats->rx_octets);
6199         stats->tx_bytes = old_stats->tx_bytes +
6200                 get_stat64(&hw_stats->tx_octets);
6201
6202         stats->rx_errors = old_stats->rx_errors +
6203                 get_stat64(&hw_stats->rx_errors) +
6204                 get_stat64(&hw_stats->rx_discards);
6205         stats->tx_errors = old_stats->tx_errors +
6206                 get_stat64(&hw_stats->tx_errors) +
6207                 get_stat64(&hw_stats->tx_mac_errors) +
6208                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6209                 get_stat64(&hw_stats->tx_discards);
6210
6211         stats->multicast = old_stats->multicast +
6212                 get_stat64(&hw_stats->rx_mcast_packets);
6213         stats->collisions = old_stats->collisions +
6214                 get_stat64(&hw_stats->tx_collisions);
6215
6216         stats->rx_length_errors = old_stats->rx_length_errors +
6217                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6218                 get_stat64(&hw_stats->rx_undersize_packets);
6219
6220         stats->rx_over_errors = old_stats->rx_over_errors +
6221                 get_stat64(&hw_stats->rxbds_empty);
6222         stats->rx_frame_errors = old_stats->rx_frame_errors +
6223                 get_stat64(&hw_stats->rx_align_errors);
6224         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6225                 get_stat64(&hw_stats->tx_discards);
6226         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6227                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6228
6229         stats->rx_crc_errors = old_stats->rx_crc_errors +
6230                 calc_crc_errors(tp);
6231
6232         return stats;
6233 }
6234
6235 static inline u32 calc_crc(unsigned char *buf, int len)
6236 {
6237         u32 reg;
6238         u32 tmp;
6239         int j, k;
6240
6241         reg = 0xffffffff;
6242
6243         for (j = 0; j < len; j++) {
6244                 reg ^= buf[j];
6245
6246                 for (k = 0; k < 8; k++) {
6247                         tmp = reg & 0x01;
6248
6249                         reg >>= 1;
6250
6251                         if (tmp) {
6252                                 reg ^= 0xedb88320;
6253                         }
6254                 }
6255         }
6256
6257         return ~reg;
6258 }
6259
6260 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6261 {
6262         /* accept or reject all multicast frames */
6263         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6264         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6265         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6266         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6267 }
6268
6269 static void __tg3_set_rx_mode(struct net_device *dev)
6270 {
6271         struct tg3 *tp = netdev_priv(dev);
6272         u32 rx_mode;
6273
6274         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6275                                   RX_MODE_KEEP_VLAN_TAG);
6276
6277         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6278          * flag clear.
6279          */
6280 #if TG3_VLAN_TAG_USED
6281         if (!tp->vlgrp &&
6282             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6283                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6284 #else
6285         /* By definition, VLAN is disabled always in this
6286          * case.
6287          */
6288         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6289                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6290 #endif
6291
6292         if (dev->flags & IFF_PROMISC) {
6293                 /* Promiscuous mode. */
6294                 rx_mode |= RX_MODE_PROMISC;
6295         } else if (dev->flags & IFF_ALLMULTI) {
6296                 /* Accept all multicast. */
6297                 tg3_set_multi (tp, 1);
6298         } else if (dev->mc_count < 1) {
6299                 /* Reject all multicast. */
6300                 tg3_set_multi (tp, 0);
6301         } else {
6302                 /* Accept one or more multicast(s). */
6303                 struct dev_mc_list *mclist;
6304                 unsigned int i;
6305                 u32 mc_filter[4] = { 0, };
6306                 u32 regidx;
6307                 u32 bit;
6308                 u32 crc;
6309
6310                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6311                      i++, mclist = mclist->next) {
6312
6313                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6314                         bit = ~crc & 0x7f;
6315                         regidx = (bit & 0x60) >> 5;
6316                         bit &= 0x1f;
6317                         mc_filter[regidx] |= (1 << bit);
6318                 }
6319
6320                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6321                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6322                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6323                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6324         }
6325
6326         if (rx_mode != tp->rx_mode) {
6327                 tp->rx_mode = rx_mode;
6328                 tw32_f(MAC_RX_MODE, rx_mode);
6329                 udelay(10);
6330         }
6331 }
6332
6333 static void tg3_set_rx_mode(struct net_device *dev)
6334 {
6335         struct tg3 *tp = netdev_priv(dev);
6336
6337         spin_lock_irq(&tp->lock);
6338         spin_lock(&tp->tx_lock);
6339         __tg3_set_rx_mode(dev);
6340         spin_unlock(&tp->tx_lock);
6341         spin_unlock_irq(&tp->lock);
6342 }
6343
6344 #define TG3_REGDUMP_LEN         (32 * 1024)
6345
6346 static int tg3_get_regs_len(struct net_device *dev)
6347 {
6348         return TG3_REGDUMP_LEN;
6349 }
6350
6351 static void tg3_get_regs(struct net_device *dev,
6352                 struct ethtool_regs *regs, void *_p)
6353 {
6354         u32 *p = _p;
6355         struct tg3 *tp = netdev_priv(dev);
6356         u8 *orig_p = _p;
6357         int i;
6358
6359         regs->version = 0;
6360
6361         memset(p, 0, TG3_REGDUMP_LEN);
6362
6363         spin_lock_irq(&tp->lock);
6364         spin_lock(&tp->tx_lock);
6365
6366 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6367 #define GET_REG32_LOOP(base,len)                \
6368 do {    p = (u32 *)(orig_p + (base));           \
6369         for (i = 0; i < len; i += 4)            \
6370                 __GET_REG32((base) + i);        \
6371 } while (0)
6372 #define GET_REG32_1(reg)                        \
6373 do {    p = (u32 *)(orig_p + (reg));            \
6374         __GET_REG32((reg));                     \
6375 } while (0)
6376
6377         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6378         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6379         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6380         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6381         GET_REG32_1(SNDDATAC_MODE);
6382         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6383         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6384         GET_REG32_1(SNDBDC_MODE);
6385         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6386         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6387         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6388         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6389         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6390         GET_REG32_1(RCVDCC_MODE);
6391         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6392         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6393         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6394         GET_REG32_1(MBFREE_MODE);
6395         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6396         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6397         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6398         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6399         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6400         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6401         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6402         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6403         GET_REG32_LOOP(FTQ_RESET, 0x120);
6404         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6405         GET_REG32_1(DMAC_MODE);
6406         GET_REG32_LOOP(GRC_MODE, 0x4c);
6407         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6408                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6409
6410 #undef __GET_REG32
6411 #undef GET_REG32_LOOP
6412 #undef GET_REG32_1
6413
6414         spin_unlock(&tp->tx_lock);
6415         spin_unlock_irq(&tp->lock);
6416 }
6417
6418 static int tg3_get_eeprom_len(struct net_device *dev)
6419 {
6420         struct tg3 *tp = netdev_priv(dev);
6421
6422         return tp->nvram_size;
6423 }
6424
6425 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6426
6427 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6428 {
6429         struct tg3 *tp = netdev_priv(dev);
6430         int ret;
6431         u8  *pd;
6432         u32 i, offset, len, val, b_offset, b_count;
6433
6434         offset = eeprom->offset;
6435         len = eeprom->len;
6436         eeprom->len = 0;
6437
6438         eeprom->magic = TG3_EEPROM_MAGIC;
6439
6440         if (offset & 3) {
6441                 /* adjustments to start on required 4 byte boundary */
6442                 b_offset = offset & 3;
6443                 b_count = 4 - b_offset;
6444                 if (b_count > len) {
6445                         /* i.e. offset=1 len=2 */
6446                         b_count = len;
6447                 }
6448                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6449                 if (ret)
6450                         return ret;
6451                 val = cpu_to_le32(val);
6452                 memcpy(data, ((char*)&val) + b_offset, b_count);
6453                 len -= b_count;
6454                 offset += b_count;
6455                 eeprom->len += b_count;
6456         }
6457
6458         /* read bytes upto the last 4 byte boundary */
6459         pd = &data[eeprom->len];
6460         for (i = 0; i < (len - (len & 3)); i += 4) {
6461                 ret = tg3_nvram_read(tp, offset + i, &val);
6462                 if (ret) {
6463                         eeprom->len += i;
6464                         return ret;
6465                 }
6466                 val = cpu_to_le32(val);
6467                 memcpy(pd + i, &val, 4);
6468         }
6469         eeprom->len += i;
6470
6471         if (len & 3) {
6472                 /* read last bytes not ending on 4 byte boundary */
6473                 pd = &data[eeprom->len];
6474                 b_count = len & 3;
6475                 b_offset = offset + len - b_count;
6476                 ret = tg3_nvram_read(tp, b_offset, &val);
6477                 if (ret)
6478                         return ret;
6479                 val = cpu_to_le32(val);
6480                 memcpy(pd, ((char*)&val), b_count);
6481                 eeprom->len += b_count;
6482         }
6483         return 0;
6484 }
6485
6486 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6487
6488 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6489 {
6490         struct tg3 *tp = netdev_priv(dev);
6491         int ret;
6492         u32 offset, len, b_offset, odd_len, start, end;
6493         u8 *buf;
6494
6495         if (eeprom->magic != TG3_EEPROM_MAGIC)
6496                 return -EINVAL;
6497
6498         offset = eeprom->offset;
6499         len = eeprom->len;
6500
6501         if ((b_offset = (offset & 3))) {
6502                 /* adjustments to start on required 4 byte boundary */
6503                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6504                 if (ret)
6505                         return ret;
6506                 start = cpu_to_le32(start);
6507                 len += b_offset;
6508                 offset &= ~3;
6509         }
6510
6511         odd_len = 0;
6512         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6513                 /* adjustments to end on required 4 byte boundary */
6514                 odd_len = 1;
6515                 len = (len + 3) & ~3;
6516                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6517                 if (ret)
6518                         return ret;
6519                 end = cpu_to_le32(end);
6520         }
6521
6522         buf = data;
6523         if (b_offset || odd_len) {
6524                 buf = kmalloc(len, GFP_KERNEL);
6525                 if (buf == 0)
6526                         return -ENOMEM;
6527                 if (b_offset)
6528                         memcpy(buf, &start, 4);
6529                 if (odd_len)
6530                         memcpy(buf+len-4, &end, 4);
6531                 memcpy(buf + b_offset, data, eeprom->len);
6532         }
6533
6534         ret = tg3_nvram_write_block(tp, offset, len, buf);
6535
6536         if (buf != data)
6537                 kfree(buf);
6538
6539         return ret;
6540 }
6541
6542 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6543 {
6544         struct tg3 *tp = netdev_priv(dev);
6545   
6546         cmd->supported = (SUPPORTED_Autoneg);
6547
6548         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6549                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6550                                    SUPPORTED_1000baseT_Full);
6551
6552         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6553                 cmd->supported |= (SUPPORTED_100baseT_Half |
6554                                   SUPPORTED_100baseT_Full |
6555                                   SUPPORTED_10baseT_Half |
6556                                   SUPPORTED_10baseT_Full |
6557                                   SUPPORTED_MII);
6558         else
6559                 cmd->supported |= SUPPORTED_FIBRE;
6560   
6561         cmd->advertising = tp->link_config.advertising;
6562         if (netif_running(dev)) {
6563                 cmd->speed = tp->link_config.active_speed;
6564                 cmd->duplex = tp->link_config.active_duplex;
6565         }
6566         cmd->port = 0;
6567         cmd->phy_address = PHY_ADDR;
6568         cmd->transceiver = 0;
6569         cmd->autoneg = tp->link_config.autoneg;
6570         cmd->maxtxpkt = 0;
6571         cmd->maxrxpkt = 0;
6572         return 0;
6573 }
6574   
6575 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6576 {
6577         struct tg3 *tp = netdev_priv(dev);
6578   
6579         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6580                 /* These are the only valid advertisement bits allowed.  */
6581                 if (cmd->autoneg == AUTONEG_ENABLE &&
6582                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6583                                           ADVERTISED_1000baseT_Full |
6584                                           ADVERTISED_Autoneg |
6585                                           ADVERTISED_FIBRE)))
6586                         return -EINVAL;
6587         }
6588
6589         spin_lock_irq(&tp->lock);
6590         spin_lock(&tp->tx_lock);
6591
6592         tp->link_config.autoneg = cmd->autoneg;
6593         if (cmd->autoneg == AUTONEG_ENABLE) {
6594                 tp->link_config.advertising = cmd->advertising;
6595                 tp->link_config.speed = SPEED_INVALID;
6596                 tp->link_config.duplex = DUPLEX_INVALID;
6597         } else {
6598                 tp->link_config.advertising = 0;
6599                 tp->link_config.speed = cmd->speed;
6600                 tp->link_config.duplex = cmd->duplex;
6601         }
6602   
6603         if (netif_running(dev))
6604                 tg3_setup_phy(tp, 1);
6605
6606         spin_unlock(&tp->tx_lock);
6607         spin_unlock_irq(&tp->lock);
6608   
6609         return 0;
6610 }
6611   
6612 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6613 {
6614         struct tg3 *tp = netdev_priv(dev);
6615   
6616         strcpy(info->driver, DRV_MODULE_NAME);
6617         strcpy(info->version, DRV_MODULE_VERSION);
6618         strcpy(info->bus_info, pci_name(tp->pdev));
6619 }
6620   
6621 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6622 {
6623         struct tg3 *tp = netdev_priv(dev);
6624   
6625         wol->supported = WAKE_MAGIC;
6626         wol->wolopts = 0;
6627         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6628                 wol->wolopts = WAKE_MAGIC;
6629         memset(&wol->sopass, 0, sizeof(wol->sopass));
6630 }
6631   
6632 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6633 {
6634         struct tg3 *tp = netdev_priv(dev);
6635   
6636         if (wol->wolopts & ~WAKE_MAGIC)
6637                 return -EINVAL;
6638         if ((wol->wolopts & WAKE_MAGIC) &&
6639             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6640             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6641                 return -EINVAL;
6642   
6643         spin_lock_irq(&tp->lock);
6644         if (wol->wolopts & WAKE_MAGIC)
6645                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6646         else
6647                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6648         spin_unlock_irq(&tp->lock);
6649   
6650         return 0;
6651 }
6652   
6653 static u32 tg3_get_msglevel(struct net_device *dev)
6654 {
6655         struct tg3 *tp = netdev_priv(dev);
6656         return tp->msg_enable;
6657 }
6658   
6659 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6660 {
6661         struct tg3 *tp = netdev_priv(dev);
6662         tp->msg_enable = value;
6663 }
6664   
6665 #if TG3_TSO_SUPPORT != 0
6666 static int tg3_set_tso(struct net_device *dev, u32 value)
6667 {
6668         struct tg3 *tp = netdev_priv(dev);
6669
6670         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6671                 if (value)
6672                         return -EINVAL;
6673                 return 0;
6674         }
6675         return ethtool_op_set_tso(dev, value);
6676 }
6677 #endif
6678   
6679 static int tg3_nway_reset(struct net_device *dev)
6680 {
6681         struct tg3 *tp = netdev_priv(dev);
6682         u32 bmcr;
6683         int r;
6684   
6685         if (!netif_running(dev))
6686                 return -EAGAIN;
6687
6688         spin_lock_irq(&tp->lock);
6689         r = -EINVAL;
6690         tg3_readphy(tp, MII_BMCR, &bmcr);
6691         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6692             (bmcr & BMCR_ANENABLE)) {
6693                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6694                 r = 0;
6695         }
6696         spin_unlock_irq(&tp->lock);
6697   
6698         return r;
6699 }
6700   
6701 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6702 {
6703         struct tg3 *tp = netdev_priv(dev);
6704   
6705         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6706         ering->rx_mini_max_pending = 0;
6707         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6708
6709         ering->rx_pending = tp->rx_pending;
6710         ering->rx_mini_pending = 0;
6711         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6712         ering->tx_pending = tp->tx_pending;
6713 }
6714   
6715 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6716 {
6717         struct tg3 *tp = netdev_priv(dev);
6718   
6719         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6720             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6721             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6722                 return -EINVAL;
6723   
6724         if (netif_running(dev))
6725                 tg3_netif_stop(tp);
6726
6727         spin_lock_irq(&tp->lock);
6728         spin_lock(&tp->tx_lock);
6729   
6730         tp->rx_pending = ering->rx_pending;
6731
6732         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6733             tp->rx_pending > 63)
6734                 tp->rx_pending = 63;
6735         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6736         tp->tx_pending = ering->tx_pending;
6737
6738         if (netif_running(dev)) {
6739                 tg3_halt(tp);
6740                 tg3_init_hw(tp);
6741                 tg3_netif_start(tp);
6742         }
6743
6744         spin_unlock(&tp->tx_lock);
6745         spin_unlock_irq(&tp->lock);
6746   
6747         return 0;
6748 }
6749   
6750 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6751 {
6752         struct tg3 *tp = netdev_priv(dev);
6753   
6754         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6755         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6756         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6757 }
6758   
6759 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6760 {
6761         struct tg3 *tp = netdev_priv(dev);
6762   
6763         if (netif_running(dev))
6764                 tg3_netif_stop(tp);
6765
6766         spin_lock_irq(&tp->lock);
6767         spin_lock(&tp->tx_lock);
6768         if (epause->autoneg)
6769                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6770         else
6771                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6772         if (epause->rx_pause)
6773                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6774         else
6775                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6776         if (epause->tx_pause)
6777                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6778         else
6779                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6780
6781         if (netif_running(dev)) {
6782                 tg3_halt(tp);
6783                 tg3_init_hw(tp);
6784                 tg3_netif_start(tp);
6785         }
6786         spin_unlock(&tp->tx_lock);
6787         spin_unlock_irq(&tp->lock);
6788   
6789         return 0;
6790 }
6791   
6792 static u32 tg3_get_rx_csum(struct net_device *dev)
6793 {
6794         struct tg3 *tp = netdev_priv(dev);
6795         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6796 }
6797   
6798 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6799 {
6800         struct tg3 *tp = netdev_priv(dev);
6801   
6802         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6803                 if (data != 0)
6804                         return -EINVAL;
6805                 return 0;
6806         }
6807   
6808         spin_lock_irq(&tp->lock);
6809         if (data)
6810                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6811         else
6812                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6813         spin_unlock_irq(&tp->lock);
6814   
6815         return 0;
6816 }
6817   
6818 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6819 {
6820         struct tg3 *tp = netdev_priv(dev);
6821   
6822         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6823                 if (data != 0)
6824                         return -EINVAL;
6825                 return 0;
6826         }
6827   
6828         if (data)
6829                 dev->features |= NETIF_F_IP_CSUM;
6830         else
6831                 dev->features &= ~NETIF_F_IP_CSUM;
6832
6833         return 0;
6834 }
6835
6836 static int tg3_get_stats_count (struct net_device *dev)
6837 {
6838         return TG3_NUM_STATS;
6839 }
6840
6841 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6842 {
6843         switch (stringset) {
6844         case ETH_SS_STATS:
6845                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6846                 break;
6847         default:
6848                 WARN_ON(1);     /* we need a WARN() */
6849                 break;
6850         }
6851 }
6852
6853 static void tg3_get_ethtool_stats (struct net_device *dev,
6854                                    struct ethtool_stats *estats, u64 *tmp_stats)
6855 {
6856         struct tg3 *tp = netdev_priv(dev);
6857         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6858 }
6859
6860 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6861 {
6862         struct mii_ioctl_data *data = if_mii(ifr);
6863         struct tg3 *tp = netdev_priv(dev);
6864         int err;
6865
6866         switch(cmd) {
6867         case SIOCGMIIPHY:
6868                 data->phy_id = PHY_ADDR;
6869
6870                 /* fallthru */
6871         case SIOCGMIIREG: {
6872                 u32 mii_regval;
6873
6874                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6875                         break;                  /* We have no PHY */
6876
6877                 spin_lock_irq(&tp->lock);
6878                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6879                 spin_unlock_irq(&tp->lock);
6880
6881                 data->val_out = mii_regval;
6882
6883                 return err;
6884         }
6885
6886         case SIOCSMIIREG:
6887                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6888                         break;                  /* We have no PHY */
6889
6890                 if (!capable(CAP_NET_ADMIN))
6891                         return -EPERM;
6892
6893                 spin_lock_irq(&tp->lock);
6894                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6895                 spin_unlock_irq(&tp->lock);
6896
6897                 return err;
6898
6899         default:
6900                 /* do nothing */
6901                 break;
6902         }
6903         return -EOPNOTSUPP;
6904 }
6905
6906 #if TG3_VLAN_TAG_USED
6907 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6908 {
6909         struct tg3 *tp = netdev_priv(dev);
6910
6911         spin_lock_irq(&tp->lock);
6912         spin_lock(&tp->tx_lock);
6913
6914         tp->vlgrp = grp;
6915
6916         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6917         __tg3_set_rx_mode(dev);
6918
6919         spin_unlock(&tp->tx_lock);
6920         spin_unlock_irq(&tp->lock);
6921 }
6922
6923 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6924 {
6925         struct tg3 *tp = netdev_priv(dev);
6926
6927         spin_lock_irq(&tp->lock);
6928         spin_lock(&tp->tx_lock);
6929         if (tp->vlgrp)
6930                 tp->vlgrp->vlan_devices[vid] = NULL;
6931         spin_unlock(&tp->tx_lock);
6932         spin_unlock_irq(&tp->lock);
6933 }
6934 #endif
6935
6936 static struct ethtool_ops tg3_ethtool_ops = {
6937         .get_settings           = tg3_get_settings,
6938         .set_settings           = tg3_set_settings,
6939         .get_drvinfo            = tg3_get_drvinfo,
6940         .get_regs_len           = tg3_get_regs_len,
6941         .get_regs               = tg3_get_regs,
6942         .get_wol                = tg3_get_wol,
6943         .set_wol                = tg3_set_wol,
6944         .get_msglevel           = tg3_get_msglevel,
6945         .set_msglevel           = tg3_set_msglevel,
6946         .nway_reset             = tg3_nway_reset,
6947         .get_link               = ethtool_op_get_link,
6948         .get_eeprom_len         = tg3_get_eeprom_len,
6949         .get_eeprom             = tg3_get_eeprom,
6950         .set_eeprom             = tg3_set_eeprom,
6951         .get_ringparam          = tg3_get_ringparam,
6952         .set_ringparam          = tg3_set_ringparam,
6953         .get_pauseparam         = tg3_get_pauseparam,
6954         .set_pauseparam         = tg3_set_pauseparam,
6955         .get_rx_csum            = tg3_get_rx_csum,
6956         .set_rx_csum            = tg3_set_rx_csum,
6957         .get_tx_csum            = ethtool_op_get_tx_csum,
6958         .set_tx_csum            = tg3_set_tx_csum,
6959         .get_sg                 = ethtool_op_get_sg,
6960         .set_sg                 = ethtool_op_set_sg,
6961 #if TG3_TSO_SUPPORT != 0
6962         .get_tso                = ethtool_op_get_tso,
6963         .set_tso                = tg3_set_tso,
6964 #endif
6965         .get_strings            = tg3_get_strings,
6966         .get_stats_count        = tg3_get_stats_count,
6967         .get_ethtool_stats      = tg3_get_ethtool_stats,
6968 };
6969
6970 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6971 {
6972         u32 cursize, val;
6973
6974         tp->nvram_size = EEPROM_CHIP_SIZE;
6975
6976         if (tg3_nvram_read(tp, 0, &val) != 0)
6977                 return;
6978
6979         if (swab32(val) != TG3_EEPROM_MAGIC)
6980                 return;
6981
6982         /*
6983          * Size the chip by reading offsets at increasing powers of two.
6984          * When we encounter our validation signature, we know the addressing
6985          * has wrapped around, and thus have our chip size.
6986          */
6987         cursize = 0x800;
6988
6989         while (cursize < tp->nvram_size) {
6990                 if (tg3_nvram_read(tp, cursize, &val) != 0)
6991                         return;
6992
6993                 if (swab32(val) == TG3_EEPROM_MAGIC)
6994                         break;
6995
6996                 cursize <<= 1;
6997         }
6998
6999         tp->nvram_size = cursize;
7000 }
7001                 
7002 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7003 {
7004         u32 val;
7005
7006         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7007                 if (val != 0) {
7008                         tp->nvram_size = (val >> 16) * 1024;
7009                         return;
7010                 }
7011         }
7012         tp->nvram_size = 0x20000;
7013 }
7014
7015 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7016 {
7017         u32 nvcfg1;
7018
7019         nvcfg1 = tr32(NVRAM_CFG1);
7020         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7021                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7022         }
7023         else {
7024                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7025                 tw32(NVRAM_CFG1, nvcfg1);
7026         }
7027
7028         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7029                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7030                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7031                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7032                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7033                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7034                                 break;
7035                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7036                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7037                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7038                                 break;
7039                         case FLASH_VENDOR_ATMEL_EEPROM:
7040                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7041                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7042                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7043                                 break;
7044                         case FLASH_VENDOR_ST:
7045                                 tp->nvram_jedecnum = JEDEC_ST;
7046                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7047                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7048                                 break;
7049                         case FLASH_VENDOR_SAIFUN:
7050                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7051                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7052                                 break;
7053                         case FLASH_VENDOR_SST_SMALL:
7054                         case FLASH_VENDOR_SST_LARGE:
7055                                 tp->nvram_jedecnum = JEDEC_SST;
7056                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7057                                 break;
7058                 }
7059         }
7060         else {
7061                 tp->nvram_jedecnum = JEDEC_ATMEL;
7062                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7063                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7064         }
7065 }
7066
7067 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7068 static void __devinit tg3_nvram_init(struct tg3 *tp)
7069 {
7070         int j;
7071
7072         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7073                 return;
7074
7075         tw32_f(GRC_EEPROM_ADDR,
7076              (EEPROM_ADDR_FSM_RESET |
7077               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7078                EEPROM_ADDR_CLKPERD_SHIFT)));
7079
7080         /* XXX schedule_timeout() ... */
7081         for (j = 0; j < 100; j++)
7082                 udelay(10);
7083
7084         /* Enable seeprom accesses. */
7085         tw32_f(GRC_LOCAL_CTRL,
7086              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7087         udelay(100);
7088
7089         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7090             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7091                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7092
7093                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7094                         u32 nvaccess = tr32(NVRAM_ACCESS);
7095
7096                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7097                 }
7098
7099                 tg3_get_nvram_info(tp);
7100                 tg3_get_nvram_size(tp);
7101
7102                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7103                         u32 nvaccess = tr32(NVRAM_ACCESS);
7104
7105                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7106                 }
7107
7108         } else {
7109                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7110
7111                 tg3_get_eeprom_size(tp);
7112         }
7113 }
7114
7115 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7116                                         u32 offset, u32 *val)
7117 {
7118         u32 tmp;
7119         int i;
7120
7121         if (offset > EEPROM_ADDR_ADDR_MASK ||
7122             (offset % 4) != 0)
7123                 return -EINVAL;
7124
7125         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7126                                         EEPROM_ADDR_DEVID_MASK |
7127                                         EEPROM_ADDR_READ);
7128         tw32(GRC_EEPROM_ADDR,
7129              tmp |
7130              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7131              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7132               EEPROM_ADDR_ADDR_MASK) |
7133              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7134
7135         for (i = 0; i < 10000; i++) {
7136                 tmp = tr32(GRC_EEPROM_ADDR);
7137
7138                 if (tmp & EEPROM_ADDR_COMPLETE)
7139                         break;
7140                 udelay(100);
7141         }
7142         if (!(tmp & EEPROM_ADDR_COMPLETE))
7143                 return -EBUSY;
7144
7145         *val = tr32(GRC_EEPROM_DATA);
7146         return 0;
7147 }
7148
7149 #define NVRAM_CMD_TIMEOUT 10000
7150
7151 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7152 {
7153         int i;
7154
7155         tw32(NVRAM_CMD, nvram_cmd);
7156         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7157                 udelay(10);
7158                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7159                         udelay(10);
7160                         break;
7161                 }
7162         }
7163         if (i == NVRAM_CMD_TIMEOUT) {
7164                 return -EBUSY;
7165         }
7166         return 0;
7167 }
7168
7169 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7170 {
7171         int ret;
7172
7173         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7174                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7175                 return -EINVAL;
7176         }
7177
7178         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7179                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7180
7181         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7182                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7183                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7184
7185                 offset = ((offset / tp->nvram_pagesize) <<
7186                           ATMEL_AT45DB0X1B_PAGE_POS) +
7187                         (offset % tp->nvram_pagesize);
7188         }
7189
7190         if (offset > NVRAM_ADDR_MSK)
7191                 return -EINVAL;
7192
7193         tg3_nvram_lock(tp);
7194
7195         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7196                 u32 nvaccess = tr32(NVRAM_ACCESS);
7197
7198                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7199         }
7200
7201         tw32(NVRAM_ADDR, offset);
7202         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7203                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7204
7205         if (ret == 0)
7206                 *val = swab32(tr32(NVRAM_RDDATA));
7207
7208         tg3_nvram_unlock(tp);
7209
7210         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7211                 u32 nvaccess = tr32(NVRAM_ACCESS);
7212
7213                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7214         }
7215
7216         return ret;
7217 }
7218
7219 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7220                                     u32 offset, u32 len, u8 *buf)
7221 {
7222         int i, j, rc = 0;
7223         u32 val;
7224
7225         for (i = 0; i < len; i += 4) {
7226                 u32 addr, data;
7227
7228                 addr = offset + i;
7229
7230                 memcpy(&data, buf + i, 4);
7231
7232                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7233
7234                 val = tr32(GRC_EEPROM_ADDR);
7235                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7236
7237                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7238                         EEPROM_ADDR_READ);
7239                 tw32(GRC_EEPROM_ADDR, val |
7240                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7241                         (addr & EEPROM_ADDR_ADDR_MASK) |
7242                         EEPROM_ADDR_START |
7243                         EEPROM_ADDR_WRITE);
7244                 
7245                 for (j = 0; j < 10000; j++) {
7246                         val = tr32(GRC_EEPROM_ADDR);
7247
7248                         if (val & EEPROM_ADDR_COMPLETE)
7249                                 break;
7250                         udelay(100);
7251                 }
7252                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7253                         rc = -EBUSY;
7254                         break;
7255                 }
7256         }
7257
7258         return rc;
7259 }
7260
7261 /* offset and length are dword aligned */
7262 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7263                 u8 *buf)
7264 {
7265         int ret = 0;
7266         u32 pagesize = tp->nvram_pagesize;
7267         u32 pagemask = pagesize - 1;
7268         u32 nvram_cmd;
7269         u8 *tmp;
7270
7271         tmp = kmalloc(pagesize, GFP_KERNEL);
7272         if (tmp == NULL)
7273                 return -ENOMEM;
7274
7275         while (len) {
7276                 int j;
7277                 u32 phy_addr, page_off, size, nvaccess;
7278
7279                 phy_addr = offset & ~pagemask;
7280         
7281                 for (j = 0; j < pagesize; j += 4) {
7282                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7283                                                 (u32 *) (tmp + j))))
7284                                 break;
7285                 }
7286                 if (ret)
7287                         break;
7288
7289                 page_off = offset & pagemask;
7290                 size = pagesize;
7291                 if (len < size)
7292                         size = len;
7293
7294                 len -= size;
7295
7296                 memcpy(tmp + page_off, buf, size);
7297
7298                 offset = offset + (pagesize - page_off);
7299
7300                 nvaccess = tr32(NVRAM_ACCESS);
7301                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7302
7303                 /*
7304                  * Before we can erase the flash page, we need
7305                  * to issue a special "write enable" command.
7306                  */
7307                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7308
7309                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7310                         break;
7311
7312                 /* Erase the target page */
7313                 tw32(NVRAM_ADDR, phy_addr);
7314
7315                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7316                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7317
7318                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7319                         break;
7320
7321                 /* Issue another write enable to start the write. */
7322                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7323
7324                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7325                         break;
7326
7327                 for (j = 0; j < pagesize; j += 4) {
7328                         u32 data;
7329
7330                         data = *((u32 *) (tmp + j));
7331                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7332
7333                         tw32(NVRAM_ADDR, phy_addr + j);
7334
7335                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7336                                 NVRAM_CMD_WR;
7337
7338                         if (j == 0)
7339                                 nvram_cmd |= NVRAM_CMD_FIRST;
7340                         else if (j == (pagesize - 4))
7341                                 nvram_cmd |= NVRAM_CMD_LAST;
7342
7343                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7344                                 break;
7345                 }
7346                 if (ret)
7347                         break;
7348         }
7349
7350         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7351         tg3_nvram_exec_cmd(tp, nvram_cmd);
7352
7353         kfree(tmp);
7354
7355         return ret;
7356 }
7357
7358 /* offset and length are dword aligned */
7359 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7360                 u8 *buf)
7361 {
7362         int i, ret = 0;
7363
7364         for (i = 0; i < len; i += 4, offset += 4) {
7365                 u32 data, page_off, phy_addr, nvram_cmd;
7366
7367                 memcpy(&data, buf + i, 4);
7368                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7369
7370                 page_off = offset % tp->nvram_pagesize;
7371
7372                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7373                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7374
7375                         phy_addr = ((offset / tp->nvram_pagesize) <<
7376                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7377                 }
7378                 else {
7379                         phy_addr = offset;
7380                 }
7381
7382                 tw32(NVRAM_ADDR, phy_addr);
7383
7384                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7385
7386                 if ((page_off == 0) || (i == 0))
7387                         nvram_cmd |= NVRAM_CMD_FIRST;
7388                 else if (page_off == (tp->nvram_pagesize - 4))
7389                         nvram_cmd |= NVRAM_CMD_LAST;
7390
7391                 if (i == (len - 4))
7392                         nvram_cmd |= NVRAM_CMD_LAST;
7393
7394                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7395                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7396
7397                         if ((ret = tg3_nvram_exec_cmd(tp,
7398                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7399                                 NVRAM_CMD_DONE)))
7400
7401                                 break;
7402                 }
7403                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7404                         /* We always do complete word writes to eeprom. */
7405                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7406                 }
7407
7408                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7409                         break;
7410         }
7411         return ret;
7412 }
7413
7414 /* offset and length are dword aligned */
7415 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7416 {
7417         int ret;
7418
7419         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7420                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7421                 return -EINVAL;
7422         }
7423
7424         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7425                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7426                        GRC_LCLCTRL_GPIO_OE1);
7427                 udelay(40);
7428         }
7429
7430         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7431                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7432         }
7433         else {
7434                 u32 grc_mode;
7435
7436                 tg3_nvram_lock(tp);
7437
7438                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7439                         u32 nvaccess = tr32(NVRAM_ACCESS);
7440
7441                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7442
7443                         tw32(NVRAM_WRITE1, 0x406);
7444                 }
7445
7446                 grc_mode = tr32(GRC_MODE);
7447                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7448
7449                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7450                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7451
7452                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7453                                 buf);
7454                 }
7455                 else {
7456                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7457                                 buf);
7458                 }
7459
7460                 grc_mode = tr32(GRC_MODE);
7461                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7462
7463                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7464                         u32 nvaccess = tr32(NVRAM_ACCESS);
7465
7466                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7467                 }
7468                 tg3_nvram_unlock(tp);
7469         }
7470
7471         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7472                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7473                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7474                 udelay(40);
7475         }
7476
7477         return ret;
7478 }
7479
7480 struct subsys_tbl_ent {
7481         u16 subsys_vendor, subsys_devid;
7482         u32 phy_id;
7483 };
7484
7485 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7486         /* Broadcom boards. */
7487         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7488         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7489         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7490         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7491         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7492         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7493         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7494         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7495         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7496         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7497         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7498
7499         /* 3com boards. */
7500         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7501         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7502         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7503         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7504         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7505
7506         /* DELL boards. */
7507         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7508         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7509         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7510         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7511
7512         /* Compaq boards. */
7513         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7514         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7515         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7516         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7517         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7518
7519         /* IBM boards. */
7520         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7521 };
7522
7523 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7524 {
7525         int i;
7526
7527         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7528                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7529                      tp->pdev->subsystem_vendor) &&
7530                     (subsys_id_to_phy_id[i].subsys_devid ==
7531                      tp->pdev->subsystem_device))
7532                         return &subsys_id_to_phy_id[i];
7533         }
7534         return NULL;
7535 }
7536
7537 static int __devinit tg3_phy_probe(struct tg3 *tp)
7538 {
7539         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7540         u32 hw_phy_id, hw_phy_id_masked;
7541         u32 val;
7542         int eeprom_signature_found, eeprom_phy_serdes, err;
7543
7544         tp->phy_id = PHY_ID_INVALID;
7545         eeprom_phy_id = PHY_ID_INVALID;
7546         eeprom_phy_serdes = 0;
7547         eeprom_signature_found = 0;
7548         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7549         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7550                 u32 nic_cfg, led_cfg;
7551                 u32 nic_phy_id, ver, cfg2 = 0;
7552
7553                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7554                 tp->nic_sram_data_cfg = nic_cfg;
7555
7556                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7557                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7558                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7559                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7560                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7561                     (ver > 0) && (ver < 0x100))
7562                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7563
7564                 eeprom_signature_found = 1;
7565
7566                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7567                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7568                         eeprom_phy_serdes = 1;
7569
7570                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7571                 if (nic_phy_id != 0) {
7572                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7573                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7574
7575                         eeprom_phy_id  = (id1 >> 16) << 10;
7576                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7577                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7578                 } else
7579                         eeprom_phy_id = 0;
7580
7581                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7582                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7583                                     SHASTA_EXT_LED_MODE_MASK);
7584                 else
7585                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7586
7587                 switch (led_cfg) {
7588                 default:
7589                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7590                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7591                         break;
7592
7593                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7594                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7595                         break;
7596
7597                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7598                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7599                         break;
7600
7601                 case SHASTA_EXT_LED_SHARED:
7602                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7603                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7604                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7605                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7606                                                  LED_CTRL_MODE_PHY_2);
7607                         break;
7608
7609                 case SHASTA_EXT_LED_MAC:
7610                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7611                         break;
7612
7613                 case SHASTA_EXT_LED_COMBO:
7614                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7615                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7616                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7617                                                  LED_CTRL_MODE_PHY_2);
7618                         break;
7619
7620                 };
7621
7622                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7623                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7624                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7625                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7626
7627                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7628                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7629                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7630                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7631
7632                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7633                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7634                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7635                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7636                 }
7637                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7638                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7639
7640                 if (cfg2 & (1 << 17))
7641                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7642
7643                 /* serdes signal pre-emphasis in register 0x590 set by */
7644                 /* bootcode if bit 18 is set */
7645                 if (cfg2 & (1 << 18))
7646                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7647         }
7648
7649         /* Reading the PHY ID register can conflict with ASF
7650          * firwmare access to the PHY hardware.
7651          */
7652         err = 0;
7653         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7654                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7655         } else {
7656                 /* Now read the physical PHY_ID from the chip and verify
7657                  * that it is sane.  If it doesn't look good, we fall back
7658                  * to either the hard-coded table based PHY_ID and failing
7659                  * that the value found in the eeprom area.
7660                  */
7661                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7662                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7663
7664                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7665                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7666                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7667
7668                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7669         }
7670
7671         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7672                 tp->phy_id = hw_phy_id;
7673                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7674                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7675         } else {
7676                 if (eeprom_signature_found) {
7677                         tp->phy_id = eeprom_phy_id;
7678                         if (eeprom_phy_serdes)
7679                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7680                 } else {
7681                         struct subsys_tbl_ent *p;
7682
7683                         /* No eeprom signature?  Try the hardcoded
7684                          * subsys device table.
7685                          */
7686                         p = lookup_by_subsys(tp);
7687                         if (!p)
7688                                 return -ENODEV;
7689
7690                         tp->phy_id = p->phy_id;
7691                         if (!tp->phy_id ||
7692                             tp->phy_id == PHY_ID_BCM8002)
7693                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7694                 }
7695         }
7696
7697         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7698             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7699                 u32 bmsr, adv_reg, tg3_ctrl;
7700
7701                 tg3_readphy(tp, MII_BMSR, &bmsr);
7702                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7703                     (bmsr & BMSR_LSTATUS))
7704                         goto skip_phy_reset;
7705                     
7706                 err = tg3_phy_reset(tp);
7707                 if (err)
7708                         return err;
7709
7710                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7711                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7712                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7713                 tg3_ctrl = 0;
7714                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7715                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7716                                     MII_TG3_CTRL_ADV_1000_FULL);
7717                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7718                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7719                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7720                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7721                 }
7722
7723                 if (!tg3_copper_is_advertising_all(tp)) {
7724                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7725
7726                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7727                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7728
7729                         tg3_writephy(tp, MII_BMCR,
7730                                      BMCR_ANENABLE | BMCR_ANRESTART);
7731                 }
7732                 tg3_phy_set_wirespeed(tp);
7733
7734                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7735                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7736                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7737         }
7738
7739 skip_phy_reset:
7740         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7741                 err = tg3_init_5401phy_dsp(tp);
7742                 if (err)
7743                         return err;
7744         }
7745
7746         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7747                 err = tg3_init_5401phy_dsp(tp);
7748         }
7749
7750         if (!eeprom_signature_found)
7751                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7752
7753         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7754                 tp->link_config.advertising =
7755                         (ADVERTISED_1000baseT_Half |
7756                          ADVERTISED_1000baseT_Full |
7757                          ADVERTISED_Autoneg |
7758                          ADVERTISED_FIBRE);
7759         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7760                 tp->link_config.advertising &=
7761                         ~(ADVERTISED_1000baseT_Half |
7762                           ADVERTISED_1000baseT_Full);
7763
7764         return err;
7765 }
7766
7767 static void __devinit tg3_read_partno(struct tg3 *tp)
7768 {
7769         unsigned char vpd_data[256];
7770         int i;
7771
7772         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7773                 /* Sun decided not to put the necessary bits in the
7774                  * NVRAM of their onboard tg3 parts :(
7775                  */
7776                 strcpy(tp->board_part_number, "Sun 570X");
7777                 return;
7778         }
7779
7780         for (i = 0; i < 256; i += 4) {
7781                 u32 tmp;
7782
7783                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7784                         goto out_not_found;
7785
7786                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7787                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7788                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7789                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7790         }
7791
7792         /* Now parse and find the part number. */
7793         for (i = 0; i < 256; ) {
7794                 unsigned char val = vpd_data[i];
7795                 int block_end;
7796
7797                 if (val == 0x82 || val == 0x91) {
7798                         i = (i + 3 +
7799                              (vpd_data[i + 1] +
7800                               (vpd_data[i + 2] << 8)));
7801                         continue;
7802                 }
7803
7804                 if (val != 0x90)
7805                         goto out_not_found;
7806
7807                 block_end = (i + 3 +
7808                              (vpd_data[i + 1] +
7809                               (vpd_data[i + 2] << 8)));
7810                 i += 3;
7811                 while (i < block_end) {
7812                         if (vpd_data[i + 0] == 'P' &&
7813                             vpd_data[i + 1] == 'N') {
7814                                 int partno_len = vpd_data[i + 2];
7815
7816                                 if (partno_len > 24)
7817                                         goto out_not_found;
7818
7819                                 memcpy(tp->board_part_number,
7820                                        &vpd_data[i + 3],
7821                                        partno_len);
7822
7823                                 /* Success. */
7824                                 return;
7825                         }
7826                 }
7827
7828                 /* Part number not found. */
7829                 goto out_not_found;
7830         }
7831
7832 out_not_found:
7833         strcpy(tp->board_part_number, "none");
7834 }
7835
7836 #ifdef CONFIG_SPARC64
7837 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7838 {
7839         struct pci_dev *pdev = tp->pdev;
7840         struct pcidev_cookie *pcp = pdev->sysdata;
7841
7842         if (pcp != NULL) {
7843                 int node = pcp->prom_node;
7844                 u32 venid;
7845                 int err;
7846
7847                 err = prom_getproperty(node, "subsystem-vendor-id",
7848                                        (char *) &venid, sizeof(venid));
7849                 if (err == 0 || err == -1)
7850                         return 0;
7851                 if (venid == PCI_VENDOR_ID_SUN)
7852                         return 1;
7853         }
7854         return 0;
7855 }
7856 #endif
7857
7858 static int __devinit tg3_get_invariants(struct tg3 *tp)
7859 {
7860         static struct pci_device_id write_reorder_chipsets[] = {
7861                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7862                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7863                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7864                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7865                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7866                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7867                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7868                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7869                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7870                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7871                 { },
7872         };
7873         u32 misc_ctrl_reg;
7874         u32 cacheline_sz_reg;
7875         u32 pci_state_reg, grc_misc_cfg;
7876         u32 val;
7877         u16 pci_cmd;
7878         int err;
7879
7880 #ifdef CONFIG_SPARC64
7881         if (tg3_is_sun_570X(tp))
7882                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7883 #endif
7884
7885         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7886          * reordering to the mailbox registers done by the host
7887          * controller can cause major troubles.  We read back from
7888          * every mailbox register write to force the writes to be
7889          * posted to the chip in order.
7890          */
7891         if (pci_dev_present(write_reorder_chipsets))
7892                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7893
7894         /* Force memory write invalidate off.  If we leave it on,
7895          * then on 5700_BX chips we have to enable a workaround.
7896          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7897          * to match the cacheline size.  The Broadcom driver have this
7898          * workaround but turns MWI off all the times so never uses
7899          * it.  This seems to suggest that the workaround is insufficient.
7900          */
7901         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7902         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7903         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7904
7905         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7906          * has the register indirect write enable bit set before
7907          * we try to access any of the MMIO registers.  It is also
7908          * critical that the PCI-X hw workaround situation is decided
7909          * before that as well.
7910          */
7911         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7912                               &misc_ctrl_reg);
7913
7914         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7915                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7916
7917         /* Initialize misc host control in PCI block. */
7918         tp->misc_host_ctrl |= (misc_ctrl_reg &
7919                                MISC_HOST_CTRL_CHIPREV);
7920         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7921                                tp->misc_host_ctrl);
7922
7923         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7924                               &cacheline_sz_reg);
7925
7926         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7927         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7928         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7929         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7930
7931         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
7932             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
7933             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752))
7934                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7935
7936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7938                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
7939
7940         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7941                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7942
7943         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7944                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7945
7946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7947             tp->pci_lat_timer < 64) {
7948                 tp->pci_lat_timer = 64;
7949
7950                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7951                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7952                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7953                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7954
7955                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7956                                        cacheline_sz_reg);
7957         }
7958
7959         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7960                               &pci_state_reg);
7961
7962         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7963                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7964
7965                 /* If this is a 5700 BX chipset, and we are in PCI-X
7966                  * mode, enable register write workaround.
7967                  *
7968                  * The workaround is to use indirect register accesses
7969                  * for all chip writes not to mailbox registers.
7970                  */
7971                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7972                         u32 pm_reg;
7973                         u16 pci_cmd;
7974
7975                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7976
7977                         /* The chip can have it's power management PCI config
7978                          * space registers clobbered due to this bug.
7979                          * So explicitly force the chip into D0 here.
7980                          */
7981                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7982                                               &pm_reg);
7983                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7984                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7985                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7986                                                pm_reg);
7987
7988                         /* Also, force SERR#/PERR# in PCI command. */
7989                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7990                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7991                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7992                 }
7993         }
7994
7995         /* Back to back register writes can cause problems on this chip,
7996          * the workaround is to read back all reg writes except those to
7997          * mailbox regs.  See tg3_write_indirect_reg32().
7998          *
7999          * PCI Express 5750_A0 rev chips need this workaround too.
8000          */
8001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8002             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8003              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8004                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8005
8006         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8007                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8008         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8009                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8010
8011         /* Chip-specific fixup from Broadcom driver */
8012         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8013             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8014                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8015                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8016         }
8017
8018         /* Force the chip into D0. */
8019         err = tg3_set_power_state(tp, 0);
8020         if (err) {
8021                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8022                        pci_name(tp->pdev));
8023                 return err;
8024         }
8025
8026         /* 5700 B0 chips do not support checksumming correctly due
8027          * to hardware bugs.
8028          */
8029         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8030                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8031
8032         /* Pseudo-header checksum is done by hardware logic and not
8033          * the offload processers, so make the chip do the pseudo-
8034          * header checksums on receive.  For transmit it is more
8035          * convenient to do the pseudo-header checksum in software
8036          * as Linux does that on transmit for us in all cases.
8037          */
8038         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8039         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8040
8041         /* Derive initial jumbo mode from MTU assigned in
8042          * ether_setup() via the alloc_etherdev() call
8043          */
8044         if (tp->dev->mtu > ETH_DATA_LEN)
8045                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8046
8047         /* Determine WakeOnLan speed to use. */
8048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8049             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8050             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8051             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8052                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8053         } else {
8054                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8055         }
8056
8057         /* A few boards don't want Ethernet@WireSpeed phy feature */
8058         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8059             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8060              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8061              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8062                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8063
8064         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8065             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8066                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8067         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8068                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8069
8070         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8071                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8072
8073         /* Only 5701 and later support tagged irq status mode.
8074          * Also, 5788 chips cannot use tagged irq status.
8075          *
8076          * However, since we are using NAPI avoid tagged irq status
8077          * because the interrupt condition is more difficult to
8078          * fully clear in that mode.
8079          */
8080         tp->coalesce_mode = 0;
8081
8082         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8083             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8084                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8085
8086         /* Initialize MAC MI mode, polling disabled. */
8087         tw32_f(MAC_MI_MODE, tp->mi_mode);
8088         udelay(80);
8089
8090         /* Initialize data/descriptor byte/word swapping. */
8091         val = tr32(GRC_MODE);
8092         val &= GRC_MODE_HOST_STACKUP;
8093         tw32(GRC_MODE, val | tp->grc_mode);
8094
8095         tg3_switch_clocks(tp);
8096
8097         /* Clear this out for sanity. */
8098         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8099
8100         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8101                               &pci_state_reg);
8102         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8103             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8104                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8105
8106                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8107                     chiprevid == CHIPREV_ID_5701_B0 ||
8108                     chiprevid == CHIPREV_ID_5701_B2 ||
8109                     chiprevid == CHIPREV_ID_5701_B5) {
8110                         void __iomem *sram_base;
8111
8112                         /* Write some dummy words into the SRAM status block
8113                          * area, see if it reads back correctly.  If the return
8114                          * value is bad, force enable the PCIX workaround.
8115                          */
8116                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8117
8118                         writel(0x00000000, sram_base);
8119                         writel(0x00000000, sram_base + 4);
8120                         writel(0xffffffff, sram_base + 4);
8121                         if (readl(sram_base) != 0x00000000)
8122                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8123                 }
8124         }
8125
8126         udelay(50);
8127         tg3_nvram_init(tp);
8128
8129         grc_misc_cfg = tr32(GRC_MISC_CFG);
8130         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8131
8132         /* Broadcom's driver says that CIOBE multisplit has a bug */
8133 #if 0
8134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8135             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8136                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8137                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8138         }
8139 #endif
8140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8141             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8142              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8143                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8144
8145         /* these are limited to 10/100 only */
8146         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8147              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8148             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8149              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8150              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8151               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8152               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8153             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8154              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8155               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8156                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8157
8158         err = tg3_phy_probe(tp);
8159         if (err) {
8160                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8161                        pci_name(tp->pdev), err);
8162                 /* ... but do not return immediately ... */
8163         }
8164
8165         tg3_read_partno(tp);
8166
8167         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8168                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8169         } else {
8170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8171                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8172                 else
8173                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8174         }
8175
8176         /* 5700 {AX,BX} chips have a broken status block link
8177          * change bit implementation, so we must use the
8178          * status register in those cases.
8179          */
8180         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8181                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8182         else
8183                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8184
8185         /* The led_ctrl is set during tg3_phy_probe, here we might
8186          * have to force the link status polling mechanism based
8187          * upon subsystem IDs.
8188          */
8189         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8190             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8191                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8192                                   TG3_FLAG_USE_LINKCHG_REG);
8193         }
8194
8195         /* For all SERDES we poll the MAC status register. */
8196         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8197                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8198         else
8199                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8200
8201         /* 5700 BX chips need to have their TX producer index mailboxes
8202          * written twice to workaround a bug.
8203          */
8204         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8205                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8206         else
8207                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8208
8209         /* It seems all chips can get confused if TX buffers
8210          * straddle the 4GB address boundary in some cases.
8211          */
8212         tp->dev->hard_start_xmit = tg3_start_xmit;
8213
8214         tp->rx_offset = 2;
8215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8216             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8217                 tp->rx_offset = 0;
8218
8219         /* By default, disable wake-on-lan.  User can change this
8220          * using ETHTOOL_SWOL.
8221          */
8222         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8223
8224         return err;
8225 }
8226
8227 #ifdef CONFIG_SPARC64
8228 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8229 {
8230         struct net_device *dev = tp->dev;
8231         struct pci_dev *pdev = tp->pdev;
8232         struct pcidev_cookie *pcp = pdev->sysdata;
8233
8234         if (pcp != NULL) {
8235                 int node = pcp->prom_node;
8236
8237                 if (prom_getproplen(node, "local-mac-address") == 6) {
8238                         prom_getproperty(node, "local-mac-address",
8239                                          dev->dev_addr, 6);
8240                         return 0;
8241                 }
8242         }
8243         return -ENODEV;
8244 }
8245
8246 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8247 {
8248         struct net_device *dev = tp->dev;
8249
8250         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8251         return 0;
8252 }
8253 #endif
8254
8255 static int __devinit tg3_get_device_address(struct tg3 *tp)
8256 {
8257         struct net_device *dev = tp->dev;
8258         u32 hi, lo, mac_offset;
8259
8260 #ifdef CONFIG_SPARC64
8261         if (!tg3_get_macaddr_sparc(tp))
8262                 return 0;
8263 #endif
8264
8265         mac_offset = 0x7c;
8266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8267             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8268                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8269                         mac_offset = 0xcc;
8270                 if (tg3_nvram_lock(tp))
8271                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8272                 else
8273                         tg3_nvram_unlock(tp);
8274         }
8275
8276         /* First try to get it from MAC address mailbox. */
8277         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8278         if ((hi >> 16) == 0x484b) {
8279                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8280                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8281
8282                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8283                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8284                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8285                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8286                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8287         }
8288         /* Next, try NVRAM. */
8289         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8290                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8291                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8292                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8293                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8294                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8295                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8296                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8297                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8298         }
8299         /* Finally just fetch it out of the MAC control regs. */
8300         else {
8301                 hi = tr32(MAC_ADDR_0_HIGH);
8302                 lo = tr32(MAC_ADDR_0_LOW);
8303
8304                 dev->dev_addr[5] = lo & 0xff;
8305                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8306                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8307                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8308                 dev->dev_addr[1] = hi & 0xff;
8309                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8310         }
8311
8312         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8313 #ifdef CONFIG_SPARC64
8314                 if (!tg3_get_default_macaddr_sparc(tp))
8315                         return 0;
8316 #endif
8317                 return -EINVAL;
8318         }
8319         return 0;
8320 }
8321
8322 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8323 {
8324         struct tg3_internal_buffer_desc test_desc;
8325         u32 sram_dma_descs;
8326         int i, ret;
8327
8328         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8329
8330         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8331         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8332         tw32(RDMAC_STATUS, 0);
8333         tw32(WDMAC_STATUS, 0);
8334
8335         tw32(BUFMGR_MODE, 0);
8336         tw32(FTQ_RESET, 0);
8337
8338         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8339         test_desc.addr_lo = buf_dma & 0xffffffff;
8340         test_desc.nic_mbuf = 0x00002100;
8341         test_desc.len = size;
8342
8343         /*
8344          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8345          * the *second* time the tg3 driver was getting loaded after an
8346          * initial scan.
8347          *
8348          * Broadcom tells me:
8349          *   ...the DMA engine is connected to the GRC block and a DMA
8350          *   reset may affect the GRC block in some unpredictable way...
8351          *   The behavior of resets to individual blocks has not been tested.
8352          *
8353          * Broadcom noted the GRC reset will also reset all sub-components.
8354          */
8355         if (to_device) {
8356                 test_desc.cqid_sqid = (13 << 8) | 2;
8357
8358                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8359                 udelay(40);
8360         } else {
8361                 test_desc.cqid_sqid = (16 << 8) | 7;
8362
8363                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8364                 udelay(40);
8365         }
8366         test_desc.flags = 0x00000005;
8367
8368         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8369                 u32 val;
8370
8371                 val = *(((u32 *)&test_desc) + i);
8372                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8373                                        sram_dma_descs + (i * sizeof(u32)));
8374                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8375         }
8376         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8377
8378         if (to_device) {
8379                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8380         } else {
8381                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8382         }
8383
8384         ret = -ENODEV;
8385         for (i = 0; i < 40; i++) {
8386                 u32 val;
8387
8388                 if (to_device)
8389                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8390                 else
8391                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8392                 if ((val & 0xffff) == sram_dma_descs) {
8393                         ret = 0;
8394                         break;
8395                 }
8396
8397                 udelay(100);
8398         }
8399
8400         return ret;
8401 }
8402
8403 #define TEST_BUFFER_SIZE        0x400
8404
8405 static int __devinit tg3_test_dma(struct tg3 *tp)
8406 {
8407         dma_addr_t buf_dma;
8408         u32 *buf;
8409         int ret;
8410
8411         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8412         if (!buf) {
8413                 ret = -ENOMEM;
8414                 goto out_nofree;
8415         }
8416
8417         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8418                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8419
8420 #ifndef CONFIG_X86
8421         {
8422                 u8 byte;
8423                 int cacheline_size;
8424                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8425
8426                 if (byte == 0)
8427                         cacheline_size = 1024;
8428                 else
8429                         cacheline_size = (int) byte * 4;
8430
8431                 switch (cacheline_size) {
8432                 case 16:
8433                 case 32:
8434                 case 64:
8435                 case 128:
8436                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8437                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8438                                 tp->dma_rwctrl |=
8439                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8440                                 break;
8441                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8442                                 tp->dma_rwctrl &=
8443                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8444                                 tp->dma_rwctrl |=
8445                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8446                                 break;
8447                         }
8448                         /* fallthrough */
8449                 case 256:
8450                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8451                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8452                                 tp->dma_rwctrl |=
8453                                         DMA_RWCTRL_WRITE_BNDRY_256;
8454                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8455                                 tp->dma_rwctrl |=
8456                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8457                 };
8458         }
8459 #endif
8460
8461         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8462                 /* DMA read watermark not used on PCIE */
8463                 tp->dma_rwctrl |= 0x00180000;
8464         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8465                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8466                         tp->dma_rwctrl |= 0x003f0000;
8467                 else
8468                         tp->dma_rwctrl |= 0x003f000f;
8469         } else {
8470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8471                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8472                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8473
8474                         if (ccval == 0x6 || ccval == 0x7)
8475                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8476
8477                         /* Set bit 23 to renable PCIX hw bug fix */
8478                         tp->dma_rwctrl |= 0x009f0000;
8479                 } else {
8480                         tp->dma_rwctrl |= 0x001b000f;
8481                 }
8482         }
8483
8484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8486                 tp->dma_rwctrl &= 0xfffffff0;
8487
8488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8489             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8490                 /* Remove this if it causes problems for some boards. */
8491                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8492
8493                 /* On 5700/5701 chips, we need to set this bit.
8494                  * Otherwise the chip will issue cacheline transactions
8495                  * to streamable DMA memory with not all the byte
8496                  * enables turned on.  This is an error on several
8497                  * RISC PCI controllers, in particular sparc64.
8498                  *
8499                  * On 5703/5704 chips, this bit has been reassigned
8500                  * a different meaning.  In particular, it is used
8501                  * on those chips to enable a PCI-X workaround.
8502                  */
8503                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8504         }
8505
8506         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8507
8508 #if 0
8509         /* Unneeded, already done by tg3_get_invariants.  */
8510         tg3_switch_clocks(tp);
8511 #endif
8512
8513         ret = 0;
8514         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8515             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8516                 goto out;
8517
8518         while (1) {
8519                 u32 *p = buf, i;
8520
8521                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8522                         p[i] = i;
8523
8524                 /* Send the buffer to the chip. */
8525                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8526                 if (ret) {
8527                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8528                         break;
8529                 }
8530
8531 #if 0
8532                 /* validate data reached card RAM correctly. */
8533                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8534                         u32 val;
8535                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8536                         if (le32_to_cpu(val) != p[i]) {
8537                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8538                                 /* ret = -ENODEV here? */
8539                         }
8540                         p[i] = 0;
8541                 }
8542 #endif
8543                 /* Now read it back. */
8544                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8545                 if (ret) {
8546                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8547
8548                         break;
8549                 }
8550
8551                 /* Verify it. */
8552                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8553                         if (p[i] == i)
8554                                 continue;
8555
8556                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8557                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8558                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8559                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8560                                 break;
8561                         } else {
8562                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8563                                 ret = -ENODEV;
8564                                 goto out;
8565                         }
8566                 }
8567
8568                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8569                         /* Success. */
8570                         ret = 0;
8571                         break;
8572                 }
8573         }
8574
8575 out:
8576         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8577 out_nofree:
8578         return ret;
8579 }
8580
8581 static void __devinit tg3_init_link_config(struct tg3 *tp)
8582 {
8583         tp->link_config.advertising =
8584                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8585                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8586                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8587                  ADVERTISED_Autoneg | ADVERTISED_MII);
8588         tp->link_config.speed = SPEED_INVALID;
8589         tp->link_config.duplex = DUPLEX_INVALID;
8590         tp->link_config.autoneg = AUTONEG_ENABLE;
8591         netif_carrier_off(tp->dev);
8592         tp->link_config.active_speed = SPEED_INVALID;
8593         tp->link_config.active_duplex = DUPLEX_INVALID;
8594         tp->link_config.phy_is_low_power = 0;
8595         tp->link_config.orig_speed = SPEED_INVALID;
8596         tp->link_config.orig_duplex = DUPLEX_INVALID;
8597         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8598 }
8599
8600 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8601 {
8602         tp->bufmgr_config.mbuf_read_dma_low_water =
8603                 DEFAULT_MB_RDMA_LOW_WATER;
8604         tp->bufmgr_config.mbuf_mac_rx_low_water =
8605                 DEFAULT_MB_MACRX_LOW_WATER;
8606         tp->bufmgr_config.mbuf_high_water =
8607                 DEFAULT_MB_HIGH_WATER;
8608
8609         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8610                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8611         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8612                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8613         tp->bufmgr_config.mbuf_high_water_jumbo =
8614                 DEFAULT_MB_HIGH_WATER_JUMBO;
8615
8616         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8617         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8618 }
8619
8620 static char * __devinit tg3_phy_string(struct tg3 *tp)
8621 {
8622         switch (tp->phy_id & PHY_ID_MASK) {
8623         case PHY_ID_BCM5400:    return "5400";
8624         case PHY_ID_BCM5401:    return "5401";
8625         case PHY_ID_BCM5411:    return "5411";
8626         case PHY_ID_BCM5701:    return "5701";
8627         case PHY_ID_BCM5703:    return "5703";
8628         case PHY_ID_BCM5704:    return "5704";
8629         case PHY_ID_BCM5705:    return "5705";
8630         case PHY_ID_BCM5750:    return "5750";
8631         case PHY_ID_BCM8002:    return "8002/serdes";
8632         case 0:                 return "serdes";
8633         default:                return "unknown";
8634         };
8635 }
8636
8637 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8638 {
8639         struct pci_dev *peer;
8640         unsigned int func, devnr = tp->pdev->devfn & ~7;
8641
8642         for (func = 0; func < 8; func++) {
8643                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8644                 if (peer && peer != tp->pdev)
8645                         break;
8646                 pci_dev_put(peer);
8647         }
8648         if (!peer || peer == tp->pdev)
8649                 BUG();
8650
8651         /*
8652          * We don't need to keep the refcount elevated; there's no way
8653          * to remove one half of this device without removing the other
8654          */
8655         pci_dev_put(peer);
8656
8657         return peer;
8658 }
8659
8660 static int __devinit tg3_init_one(struct pci_dev *pdev,
8661                                   const struct pci_device_id *ent)
8662 {
8663         static int tg3_version_printed = 0;
8664         unsigned long tg3reg_base, tg3reg_len;
8665         struct net_device *dev;
8666         struct tg3 *tp;
8667         int i, err, pci_using_dac, pm_cap;
8668
8669         if (tg3_version_printed++ == 0)
8670                 printk(KERN_INFO "%s", version);
8671
8672         err = pci_enable_device(pdev);
8673         if (err) {
8674                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8675                        "aborting.\n");
8676                 return err;
8677         }
8678
8679         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8680                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8681                        "base address, aborting.\n");
8682                 err = -ENODEV;
8683                 goto err_out_disable_pdev;
8684         }
8685
8686         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8687         if (err) {
8688                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8689                        "aborting.\n");
8690                 goto err_out_disable_pdev;
8691         }
8692
8693         pci_set_master(pdev);
8694
8695         /* Find power-management capability. */
8696         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8697         if (pm_cap == 0) {
8698                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8699                        "aborting.\n");
8700                 err = -EIO;
8701                 goto err_out_free_res;
8702         }
8703
8704         /* Configure DMA attributes. */
8705         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8706         if (!err) {
8707                 pci_using_dac = 1;
8708                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8709                 if (err < 0) {
8710                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8711                                "for consistent allocations\n");
8712                         goto err_out_free_res;
8713                 }
8714         } else {
8715                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8716                 if (err) {
8717                         printk(KERN_ERR PFX "No usable DMA configuration, "
8718                                "aborting.\n");
8719                         goto err_out_free_res;
8720                 }
8721                 pci_using_dac = 0;
8722         }
8723
8724         tg3reg_base = pci_resource_start(pdev, 0);
8725         tg3reg_len = pci_resource_len(pdev, 0);
8726
8727         dev = alloc_etherdev(sizeof(*tp));
8728         if (!dev) {
8729                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8730                 err = -ENOMEM;
8731                 goto err_out_free_res;
8732         }
8733
8734         SET_MODULE_OWNER(dev);
8735         SET_NETDEV_DEV(dev, &pdev->dev);
8736
8737         if (pci_using_dac)
8738                 dev->features |= NETIF_F_HIGHDMA;
8739         dev->features |= NETIF_F_LLTX;
8740 #if TG3_VLAN_TAG_USED
8741         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8742         dev->vlan_rx_register = tg3_vlan_rx_register;
8743         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8744 #endif
8745
8746         tp = netdev_priv(dev);
8747         tp->pdev = pdev;
8748         tp->dev = dev;
8749         tp->pm_cap = pm_cap;
8750         tp->mac_mode = TG3_DEF_MAC_MODE;
8751         tp->rx_mode = TG3_DEF_RX_MODE;
8752         tp->tx_mode = TG3_DEF_TX_MODE;
8753         tp->mi_mode = MAC_MI_MODE_BASE;
8754         if (tg3_debug > 0)
8755                 tp->msg_enable = tg3_debug;
8756         else
8757                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8758
8759         /* The word/byte swap controls here control register access byte
8760          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8761          * setting below.
8762          */
8763         tp->misc_host_ctrl =
8764                 MISC_HOST_CTRL_MASK_PCI_INT |
8765                 MISC_HOST_CTRL_WORD_SWAP |
8766                 MISC_HOST_CTRL_INDIR_ACCESS |
8767                 MISC_HOST_CTRL_PCISTATE_RW;
8768
8769         /* The NONFRM (non-frame) byte/word swap controls take effect
8770          * on descriptor entries, anything which isn't packet data.
8771          *
8772          * The StrongARM chips on the board (one for tx, one for rx)
8773          * are running in big-endian mode.
8774          */
8775         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8776                         GRC_MODE_WSWAP_NONFRM_DATA);
8777 #ifdef __BIG_ENDIAN
8778         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8779 #endif
8780         spin_lock_init(&tp->lock);
8781         spin_lock_init(&tp->tx_lock);
8782         spin_lock_init(&tp->indirect_lock);
8783         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8784
8785         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8786         if (tp->regs == 0UL) {
8787                 printk(KERN_ERR PFX "Cannot map device registers, "
8788                        "aborting.\n");
8789                 err = -ENOMEM;
8790                 goto err_out_free_dev;
8791         }
8792
8793         tg3_init_link_config(tp);
8794
8795         tg3_init_bufmgr_config(tp);
8796
8797         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8798         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8799         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8800
8801         dev->open = tg3_open;
8802         dev->stop = tg3_close;
8803         dev->get_stats = tg3_get_stats;
8804         dev->set_multicast_list = tg3_set_rx_mode;
8805         dev->set_mac_address = tg3_set_mac_addr;
8806         dev->do_ioctl = tg3_ioctl;
8807         dev->tx_timeout = tg3_tx_timeout;
8808         dev->poll = tg3_poll;
8809         dev->ethtool_ops = &tg3_ethtool_ops;
8810         dev->weight = 64;
8811         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8812         dev->change_mtu = tg3_change_mtu;
8813         dev->irq = pdev->irq;
8814 #ifdef CONFIG_NET_POLL_CONTROLLER
8815         dev->poll_controller = tg3_poll_controller;
8816 #endif
8817
8818         err = tg3_get_invariants(tp);
8819         if (err) {
8820                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8821                        "aborting.\n");
8822                 goto err_out_iounmap;
8823         }
8824
8825         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8826                 tp->bufmgr_config.mbuf_read_dma_low_water =
8827                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8828                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8829                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8830                 tp->bufmgr_config.mbuf_high_water =
8831                         DEFAULT_MB_HIGH_WATER_5705;
8832         }
8833
8834 #if TG3_TSO_SUPPORT != 0
8835         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8836                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8837         }
8838         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8840             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8841             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8842                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8843         } else {
8844                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8845         }
8846
8847         /* TSO is off by default, user can enable using ethtool.  */
8848 #if 0
8849         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8850                 dev->features |= NETIF_F_TSO;
8851 #endif
8852
8853 #endif
8854
8855         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8856             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8858                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8859                 tp->rx_pending = 63;
8860         }
8861
8862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8863                 tp->pdev_peer = tg3_find_5704_peer(tp);
8864
8865         err = tg3_get_device_address(tp);
8866         if (err) {
8867                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8868                        "aborting.\n");
8869                 goto err_out_iounmap;
8870         }
8871
8872         /*
8873          * Reset chip in case UNDI or EFI driver did not shutdown
8874          * DMA self test will enable WDMAC and we'll see (spurious)
8875          * pending DMA on the PCI bus at that point.
8876          */
8877         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8878             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8879                 pci_save_state(tp->pdev);
8880                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8881                 tg3_halt(tp);
8882         }
8883
8884         err = tg3_test_dma(tp);
8885         if (err) {
8886                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8887                 goto err_out_iounmap;
8888         }
8889
8890         /* Tigon3 can do ipv4 only... and some chips have buggy
8891          * checksumming.
8892          */
8893         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8894                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8895                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8896         } else
8897                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8898
8899         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8900                 dev->features &= ~NETIF_F_HIGHDMA;
8901
8902         /* flow control autonegotiation is default behavior */
8903         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8904
8905         err = register_netdev(dev);
8906         if (err) {
8907                 printk(KERN_ERR PFX "Cannot register net device, "
8908                        "aborting.\n");
8909                 goto err_out_iounmap;
8910         }
8911
8912         pci_set_drvdata(pdev, dev);
8913
8914         /* Now that we have fully setup the chip, save away a snapshot
8915          * of the PCI config space.  We need to restore this after
8916          * GRC_MISC_CFG core clock resets and some resume events.
8917          */
8918         pci_save_state(tp->pdev);
8919
8920         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8921                dev->name,
8922                tp->board_part_number,
8923                tp->pci_chip_rev_id,
8924                tg3_phy_string(tp),
8925                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8926                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8927                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8928                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8929                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8930                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8931
8932         for (i = 0; i < 6; i++)
8933                 printk("%2.2x%c", dev->dev_addr[i],
8934                        i == 5 ? '\n' : ':');
8935
8936         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8937                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8938                "TSOcap[%d] \n",
8939                dev->name,
8940                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8941                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8942                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8943                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8944                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8945                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8946                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8947
8948         return 0;
8949
8950 err_out_iounmap:
8951         iounmap(tp->regs);
8952
8953 err_out_free_dev:
8954         free_netdev(dev);
8955
8956 err_out_free_res:
8957         pci_release_regions(pdev);
8958
8959 err_out_disable_pdev:
8960         pci_disable_device(pdev);
8961         pci_set_drvdata(pdev, NULL);
8962         return err;
8963 }
8964
8965 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8966 {
8967         struct net_device *dev = pci_get_drvdata(pdev);
8968
8969         if (dev) {
8970                 struct tg3 *tp = netdev_priv(dev);
8971
8972                 unregister_netdev(dev);
8973                 iounmap(tp->regs);
8974                 free_netdev(dev);
8975                 pci_release_regions(pdev);
8976                 pci_disable_device(pdev);
8977                 pci_set_drvdata(pdev, NULL);
8978         }
8979 }
8980
8981 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
8982 {
8983         struct net_device *dev = pci_get_drvdata(pdev);
8984         struct tg3 *tp = netdev_priv(dev);
8985         int err;
8986
8987         if (!netif_running(dev))
8988                 return 0;
8989
8990         tg3_netif_stop(tp);
8991
8992         del_timer_sync(&tp->timer);
8993
8994         spin_lock_irq(&tp->lock);
8995         spin_lock(&tp->tx_lock);
8996         tg3_disable_ints(tp);
8997         spin_unlock(&tp->tx_lock);
8998         spin_unlock_irq(&tp->lock);
8999
9000         netif_device_detach(dev);
9001
9002         spin_lock_irq(&tp->lock);
9003         spin_lock(&tp->tx_lock);
9004         tg3_halt(tp);
9005         spin_unlock(&tp->tx_lock);
9006         spin_unlock_irq(&tp->lock);
9007
9008         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9009         if (err) {
9010                 spin_lock_irq(&tp->lock);
9011                 spin_lock(&tp->tx_lock);
9012
9013                 tg3_init_hw(tp);
9014
9015                 tp->timer.expires = jiffies + tp->timer_offset;
9016                 add_timer(&tp->timer);
9017
9018                 netif_device_attach(dev);
9019                 tg3_netif_start(tp);
9020
9021                 spin_unlock(&tp->tx_lock);
9022                 spin_unlock_irq(&tp->lock);
9023         }
9024
9025         return err;
9026 }
9027
9028 static int tg3_resume(struct pci_dev *pdev)
9029 {
9030         struct net_device *dev = pci_get_drvdata(pdev);
9031         struct tg3 *tp = netdev_priv(dev);
9032         int err;
9033
9034         if (!netif_running(dev))
9035                 return 0;
9036
9037         pci_restore_state(tp->pdev);
9038
9039         err = tg3_set_power_state(tp, 0);
9040         if (err)
9041                 return err;
9042
9043         netif_device_attach(dev);
9044
9045         spin_lock_irq(&tp->lock);
9046         spin_lock(&tp->tx_lock);
9047
9048         tg3_init_hw(tp);
9049
9050         tp->timer.expires = jiffies + tp->timer_offset;
9051         add_timer(&tp->timer);
9052
9053         tg3_enable_ints(tp);
9054
9055         tg3_netif_start(tp);
9056
9057         spin_unlock(&tp->tx_lock);
9058         spin_unlock_irq(&tp->lock);
9059
9060         return 0;
9061 }
9062
9063 static struct pci_driver tg3_driver = {
9064         .name           = DRV_MODULE_NAME,
9065         .id_table       = tg3_pci_tbl,
9066         .probe          = tg3_init_one,
9067         .remove         = __devexit_p(tg3_remove_one),
9068         .suspend        = tg3_suspend,
9069         .resume         = tg3_resume
9070 };
9071
9072 static int __init tg3_init(void)
9073 {
9074         return pci_module_init(&tg3_driver);
9075 }
9076
9077 static void __exit tg3_cleanup(void)
9078 {
9079         pci_unregister_driver(&tg3_driver);
9080 }
9081
9082 module_init(tg3_init);
9083 module_exit(tg3_cleanup);