]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Fix bug in tg3_rx()
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.26"
65 #define DRV_MODULE_RELDATE      "April 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 udelay(100);    /* Delay after power state change */
1009
1010                 /* Switch out of Vaux if it is not a LOM */
1011                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1012                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1013                         udelay(100);
1014                 }
1015
1016                 return 0;
1017
1018         case 1:
1019                 power_control |= 1;
1020                 break;
1021
1022         case 2:
1023                 power_control |= 2;
1024                 break;
1025
1026         case 3:
1027                 power_control |= 3;
1028                 break;
1029
1030         default:
1031                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1032                        "requested.\n",
1033                        tp->dev->name, state);
1034                 return -EINVAL;
1035         };
1036
1037         power_control |= PCI_PM_CTRL_PME_ENABLE;
1038
1039         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1040         tw32(TG3PCI_MISC_HOST_CTRL,
1041              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1042
1043         if (tp->link_config.phy_is_low_power == 0) {
1044                 tp->link_config.phy_is_low_power = 1;
1045                 tp->link_config.orig_speed = tp->link_config.speed;
1046                 tp->link_config.orig_duplex = tp->link_config.duplex;
1047                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1048         }
1049
1050         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1051                 tp->link_config.speed = SPEED_10;
1052                 tp->link_config.duplex = DUPLEX_HALF;
1053                 tp->link_config.autoneg = AUTONEG_ENABLE;
1054                 tg3_setup_phy(tp, 0);
1055         }
1056
1057         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1058
1059         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1060                 u32 mac_mode;
1061
1062                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1063                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1064                         udelay(40);
1065
1066                         mac_mode = MAC_MODE_PORT_MODE_MII;
1067
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1069                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1070                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1071                 } else {
1072                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1073                 }
1074
1075                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1076                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1077
1078                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1079                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1080                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1081
1082                 tw32_f(MAC_MODE, mac_mode);
1083                 udelay(100);
1084
1085                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1086                 udelay(10);
1087         }
1088
1089         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1090             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1092                 u32 base_val;
1093
1094                 base_val = tp->pci_clock_ctrl;
1095                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1096                              CLOCK_CTRL_TXCLK_DISABLE);
1097
1098                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1099                      CLOCK_CTRL_ALTCLK |
1100                      CLOCK_CTRL_PWRDOWN_PLL133);
1101                 udelay(40);
1102         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1103                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1104                 u32 newbits1, newbits2;
1105
1106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1108                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1109                                     CLOCK_CTRL_TXCLK_DISABLE |
1110                                     CLOCK_CTRL_ALTCLK);
1111                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1112                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1113                         newbits1 = CLOCK_CTRL_625_CORE;
1114                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1115                 } else {
1116                         newbits1 = CLOCK_CTRL_ALTCLK;
1117                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1118                 }
1119
1120                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1121                 udelay(40);
1122
1123                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1124                 udelay(40);
1125
1126                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1127                         u32 newbits3;
1128
1129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                             CLOCK_CTRL_TXCLK_DISABLE |
1133                                             CLOCK_CTRL_44MHZ_CORE);
1134                         } else {
1135                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1136                         }
1137
1138                         tw32_f(TG3PCI_CLOCK_CTRL,
1139                                          tp->pci_clock_ctrl | newbits3);
1140                         udelay(40);
1141                 }
1142         }
1143
1144         tg3_frob_aux_power(tp);
1145
1146         /* Workaround for unstable PLL clock */
1147         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1148             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1149                 u32 val = tr32(0x7d00);
1150
1151                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1152                 tw32(0x7d00, val);
1153                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154                         tg3_halt_cpu(tp, RX_CPU_BASE);
1155         }
1156
1157         /* Finally, set the new power state. */
1158         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1159         udelay(100);    /* Delay after power state change */
1160
1161         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1162
1163         return 0;
1164 }
1165
1166 static void tg3_link_report(struct tg3 *tp)
1167 {
1168         if (!netif_carrier_ok(tp->dev)) {
1169                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1170         } else {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1181                        "%s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1184                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1185         }
1186 }
1187
1188 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1189 {
1190         u32 new_tg3_flags = 0;
1191         u32 old_rx_mode = tp->rx_mode;
1192         u32 old_tx_mode = tp->tx_mode;
1193
1194         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1195                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1196                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1197                                 if (remote_adv & LPA_PAUSE_CAP)
1198                                         new_tg3_flags |=
1199                                                 (TG3_FLAG_RX_PAUSE |
1200                                                 TG3_FLAG_TX_PAUSE);
1201                                 else if (remote_adv & LPA_PAUSE_ASYM)
1202                                         new_tg3_flags |=
1203                                                 (TG3_FLAG_RX_PAUSE);
1204                         } else {
1205                                 if (remote_adv & LPA_PAUSE_CAP)
1206                                         new_tg3_flags |=
1207                                                 (TG3_FLAG_RX_PAUSE |
1208                                                 TG3_FLAG_TX_PAUSE);
1209                         }
1210                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1211                         if ((remote_adv & LPA_PAUSE_CAP) &&
1212                         (remote_adv & LPA_PAUSE_ASYM))
1213                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1214                 }
1215
1216                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1217                 tp->tg3_flags |= new_tg3_flags;
1218         } else {
1219                 new_tg3_flags = tp->tg3_flags;
1220         }
1221
1222         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1223                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1224         else
1225                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1226
1227         if (old_rx_mode != tp->rx_mode) {
1228                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1229         }
1230         
1231         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1232                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1233         else
1234                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1235
1236         if (old_tx_mode != tp->tx_mode) {
1237                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1238         }
1239 }
1240
1241 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1242 {
1243         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1244         case MII_TG3_AUX_STAT_10HALF:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_HALF;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_10FULL:
1250                 *speed = SPEED_10;
1251                 *duplex = DUPLEX_FULL;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100HALF:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_HALF;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_100FULL:
1260                 *speed = SPEED_100;
1261                 *duplex = DUPLEX_FULL;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000HALF:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_HALF;
1267                 break;
1268
1269         case MII_TG3_AUX_STAT_1000FULL:
1270                 *speed = SPEED_1000;
1271                 *duplex = DUPLEX_FULL;
1272                 break;
1273
1274         default:
1275                 *speed = SPEED_INVALID;
1276                 *duplex = DUPLEX_INVALID;
1277                 break;
1278         };
1279 }
1280
1281 static void tg3_phy_copper_begin(struct tg3 *tp)
1282 {
1283         u32 new_adv;
1284         int i;
1285
1286         if (tp->link_config.phy_is_low_power) {
1287                 /* Entering low power mode.  Disable gigabit and
1288                  * 100baseT advertisements.
1289                  */
1290                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1291
1292                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1293                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1294                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1295                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1296
1297                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1298         } else if (tp->link_config.speed == SPEED_INVALID) {
1299                 tp->link_config.advertising =
1300                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1301                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1302                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1303                          ADVERTISED_Autoneg | ADVERTISED_MII);
1304
1305                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1306                         tp->link_config.advertising &=
1307                                 ~(ADVERTISED_1000baseT_Half |
1308                                   ADVERTISED_1000baseT_Full);
1309
1310                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1311                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1312                         new_adv |= ADVERTISE_10HALF;
1313                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1314                         new_adv |= ADVERTISE_10FULL;
1315                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1316                         new_adv |= ADVERTISE_100HALF;
1317                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1318                         new_adv |= ADVERTISE_100FULL;
1319                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1320
1321                 if (tp->link_config.advertising &
1322                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1323                         new_adv = 0;
1324                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1325                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1326                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1327                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1328                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1329                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1330                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1331                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1332                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1333                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1334                 } else {
1335                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1336                 }
1337         } else {
1338                 /* Asking for a specific link mode. */
1339                 if (tp->link_config.speed == SPEED_1000) {
1340                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1341                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1342
1343                         if (tp->link_config.duplex == DUPLEX_FULL)
1344                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1345                         else
1346                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1347                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1348                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1349                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1350                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1351                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1352                 } else {
1353                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1354
1355                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1356                         if (tp->link_config.speed == SPEED_100) {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_100FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_100HALF;
1361                         } else {
1362                                 if (tp->link_config.duplex == DUPLEX_FULL)
1363                                         new_adv |= ADVERTISE_10FULL;
1364                                 else
1365                                         new_adv |= ADVERTISE_10HALF;
1366                         }
1367                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1368                 }
1369         }
1370
1371         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1372             tp->link_config.speed != SPEED_INVALID) {
1373                 u32 bmcr, orig_bmcr;
1374
1375                 tp->link_config.active_speed = tp->link_config.speed;
1376                 tp->link_config.active_duplex = tp->link_config.duplex;
1377
1378                 bmcr = 0;
1379                 switch (tp->link_config.speed) {
1380                 default:
1381                 case SPEED_10:
1382                         break;
1383
1384                 case SPEED_100:
1385                         bmcr |= BMCR_SPEED100;
1386                         break;
1387
1388                 case SPEED_1000:
1389                         bmcr |= TG3_BMCR_SPEED1000;
1390                         break;
1391                 };
1392
1393                 if (tp->link_config.duplex == DUPLEX_FULL)
1394                         bmcr |= BMCR_FULLDPLX;
1395
1396                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1397                     (bmcr != orig_bmcr)) {
1398                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1399                         for (i = 0; i < 1500; i++) {
1400                                 u32 tmp;
1401
1402                                 udelay(10);
1403                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1404                                     tg3_readphy(tp, MII_BMSR, &tmp))
1405                                         continue;
1406                                 if (!(tmp & BMSR_LSTATUS)) {
1407                                         udelay(40);
1408                                         break;
1409                                 }
1410                         }
1411                         tg3_writephy(tp, MII_BMCR, bmcr);
1412                         udelay(40);
1413                 }
1414         } else {
1415                 tg3_writephy(tp, MII_BMCR,
1416                              BMCR_ANENABLE | BMCR_ANRESTART);
1417         }
1418 }
1419
1420 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1421 {
1422         int err;
1423
1424         /* Turn off tap power management. */
1425         /* Set Extended packet length bit */
1426         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1436
1437         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1438         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1439
1440         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1441         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1442
1443         udelay(40);
1444
1445         return err;
1446 }
1447
1448 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1449 {
1450         u32 adv_reg, all_mask;
1451
1452         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1453                 return 0;
1454
1455         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1456                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1457         if ((adv_reg & all_mask) != all_mask)
1458                 return 0;
1459         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1460                 u32 tg3_ctrl;
1461
1462                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1463                         return 0;
1464
1465                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1466                             MII_TG3_CTRL_ADV_1000_FULL);
1467                 if ((tg3_ctrl & all_mask) != all_mask)
1468                         return 0;
1469         }
1470         return 1;
1471 }
1472
1473 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1474 {
1475         int current_link_up;
1476         u32 bmsr, dummy;
1477         u16 current_speed;
1478         u8 current_duplex;
1479         int i, err;
1480
1481         tw32(MAC_EVENT, 0);
1482
1483         tw32_f(MAC_STATUS,
1484              (MAC_STATUS_SYNC_CHANGED |
1485               MAC_STATUS_CFG_CHANGED |
1486               MAC_STATUS_MI_COMPLETION |
1487               MAC_STATUS_LNKSTATE_CHANGED));
1488         udelay(40);
1489
1490         tp->mi_mode = MAC_MI_MODE_BASE;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1495
1496         /* Some third-party PHYs need to be reset on link going
1497          * down.
1498          */
1499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1500              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1501              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1502             netif_carrier_ok(tp->dev)) {
1503                 tg3_readphy(tp, MII_BMSR, &bmsr);
1504                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1505                     !(bmsr & BMSR_LSTATUS))
1506                         force_reset = 1;
1507         }
1508         if (force_reset)
1509                 tg3_phy_reset(tp);
1510
1511         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1512                 tg3_readphy(tp, MII_BMSR, &bmsr);
1513                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1514                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1515                         bmsr = 0;
1516
1517                 if (!(bmsr & BMSR_LSTATUS)) {
1518                         err = tg3_init_5401phy_dsp(tp);
1519                         if (err)
1520                                 return err;
1521
1522                         tg3_readphy(tp, MII_BMSR, &bmsr);
1523                         for (i = 0; i < 1000; i++) {
1524                                 udelay(10);
1525                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1526                                     (bmsr & BMSR_LSTATUS)) {
1527                                         udelay(40);
1528                                         break;
1529                                 }
1530                         }
1531
1532                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1533                             !(bmsr & BMSR_LSTATUS) &&
1534                             tp->link_config.active_speed == SPEED_1000) {
1535                                 err = tg3_phy_reset(tp);
1536                                 if (!err)
1537                                         err = tg3_init_5401phy_dsp(tp);
1538                                 if (err)
1539                                         return err;
1540                         }
1541                 }
1542         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1544                 /* 5701 {A0,B0} CRC bug workaround */
1545                 tg3_writephy(tp, 0x15, 0x0a75);
1546                 tg3_writephy(tp, 0x1c, 0x8c68);
1547                 tg3_writephy(tp, 0x1c, 0x8d68);
1548                 tg3_writephy(tp, 0x1c, 0x8c68);
1549         }
1550
1551         /* Clear pending interrupts... */
1552         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1553         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1554
1555         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1556                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1557         else
1558                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1562                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1563                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1564                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1565                 else
1566                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1567         }
1568
1569         current_link_up = 0;
1570         current_speed = SPEED_INVALID;
1571         current_duplex = DUPLEX_INVALID;
1572
1573         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1574                 u32 val;
1575
1576                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1577                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1578                 if (!(val & (1 << 10))) {
1579                         val |= (1 << 10);
1580                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1581                         goto relink;
1582                 }
1583         }
1584
1585         bmsr = 0;
1586         for (i = 0; i < 100; i++) {
1587                 tg3_readphy(tp, MII_BMSR, &bmsr);
1588                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1589                     (bmsr & BMSR_LSTATUS))
1590                         break;
1591                 udelay(40);
1592         }
1593
1594         if (bmsr & BMSR_LSTATUS) {
1595                 u32 aux_stat, bmcr;
1596
1597                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1598                 for (i = 0; i < 2000; i++) {
1599                         udelay(10);
1600                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1601                             aux_stat)
1602                                 break;
1603                 }
1604
1605                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1606                                              &current_speed,
1607                                              &current_duplex);
1608
1609                 bmcr = 0;
1610                 for (i = 0; i < 200; i++) {
1611                         tg3_readphy(tp, MII_BMCR, &bmcr);
1612                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1613                                 continue;
1614                         if (bmcr && bmcr != 0x7fff)
1615                                 break;
1616                         udelay(10);
1617                 }
1618
1619                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1620                         if (bmcr & BMCR_ANENABLE) {
1621                                 current_link_up = 1;
1622
1623                                 /* Force autoneg restart if we are exiting
1624                                  * low power mode.
1625                                  */
1626                                 if (!tg3_copper_is_advertising_all(tp))
1627                                         current_link_up = 0;
1628                         } else {
1629                                 current_link_up = 0;
1630                         }
1631                 } else {
1632                         if (!(bmcr & BMCR_ANENABLE) &&
1633                             tp->link_config.speed == current_speed &&
1634                             tp->link_config.duplex == current_duplex) {
1635                                 current_link_up = 1;
1636                         } else {
1637                                 current_link_up = 0;
1638                         }
1639                 }
1640
1641                 tp->link_config.active_speed = current_speed;
1642                 tp->link_config.active_duplex = current_duplex;
1643         }
1644
1645         if (current_link_up == 1 &&
1646             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1647             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1648                 u32 local_adv, remote_adv;
1649
1650                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1651                         local_adv = 0;
1652                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1653
1654                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1655                         remote_adv = 0;
1656
1657                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1658
1659                 /* If we are not advertising full pause capability,
1660                  * something is wrong.  Bring the link down and reconfigure.
1661                  */
1662                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1663                         current_link_up = 0;
1664                 } else {
1665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1666                 }
1667         }
1668 relink:
1669         if (current_link_up == 0) {
1670                 u32 tmp;
1671
1672                 tg3_phy_copper_begin(tp);
1673
1674                 tg3_readphy(tp, MII_BMSR, &tmp);
1675                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1676                     (tmp & BMSR_LSTATUS))
1677                         current_link_up = 1;
1678         }
1679
1680         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1681         if (current_link_up == 1) {
1682                 if (tp->link_config.active_speed == SPEED_100 ||
1683                     tp->link_config.active_speed == SPEED_10)
1684                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1685                 else
1686                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1687         } else
1688                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1689
1690         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1691         if (tp->link_config.active_duplex == DUPLEX_HALF)
1692                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1693
1694         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1696                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1697                     (current_link_up == 1 &&
1698                      tp->link_config.active_speed == SPEED_10))
1699                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1700         } else {
1701                 if (current_link_up == 1)
1702                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1703         }
1704
1705         /* ??? Without this setting Netgear GA302T PHY does not
1706          * ??? send/receive packets...
1707          */
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1709             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1710                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1711                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1712                 udelay(80);
1713         }
1714
1715         tw32_f(MAC_MODE, tp->mac_mode);
1716         udelay(40);
1717
1718         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1719                 /* Polled via timer. */
1720                 tw32_f(MAC_EVENT, 0);
1721         } else {
1722                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1723         }
1724         udelay(40);
1725
1726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1727             current_link_up == 1 &&
1728             tp->link_config.active_speed == SPEED_1000 &&
1729             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1730              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1731                 udelay(120);
1732                 tw32_f(MAC_STATUS,
1733                      (MAC_STATUS_SYNC_CHANGED |
1734                       MAC_STATUS_CFG_CHANGED));
1735                 udelay(40);
1736                 tg3_write_mem(tp,
1737                               NIC_SRAM_FIRMWARE_MBOX,
1738                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1739         }
1740
1741         if (current_link_up != netif_carrier_ok(tp->dev)) {
1742                 if (current_link_up)
1743                         netif_carrier_on(tp->dev);
1744                 else
1745                         netif_carrier_off(tp->dev);
1746                 tg3_link_report(tp);
1747         }
1748
1749         return 0;
1750 }
1751
1752 struct tg3_fiber_aneginfo {
1753         int state;
1754 #define ANEG_STATE_UNKNOWN              0
1755 #define ANEG_STATE_AN_ENABLE            1
1756 #define ANEG_STATE_RESTART_INIT         2
1757 #define ANEG_STATE_RESTART              3
1758 #define ANEG_STATE_DISABLE_LINK_OK      4
1759 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1760 #define ANEG_STATE_ABILITY_DETECT       6
1761 #define ANEG_STATE_ACK_DETECT_INIT      7
1762 #define ANEG_STATE_ACK_DETECT           8
1763 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1764 #define ANEG_STATE_COMPLETE_ACK         10
1765 #define ANEG_STATE_IDLE_DETECT_INIT     11
1766 #define ANEG_STATE_IDLE_DETECT          12
1767 #define ANEG_STATE_LINK_OK              13
1768 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1769 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1770
1771         u32 flags;
1772 #define MR_AN_ENABLE            0x00000001
1773 #define MR_RESTART_AN           0x00000002
1774 #define MR_AN_COMPLETE          0x00000004
1775 #define MR_PAGE_RX              0x00000008
1776 #define MR_NP_LOADED            0x00000010
1777 #define MR_TOGGLE_TX            0x00000020
1778 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1779 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1780 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1781 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1782 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1783 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1784 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1785 #define MR_TOGGLE_RX            0x00002000
1786 #define MR_NP_RX                0x00004000
1787
1788 #define MR_LINK_OK              0x80000000
1789
1790         unsigned long link_time, cur_time;
1791
1792         u32 ability_match_cfg;
1793         int ability_match_count;
1794
1795         char ability_match, idle_match, ack_match;
1796
1797         u32 txconfig, rxconfig;
1798 #define ANEG_CFG_NP             0x00000080
1799 #define ANEG_CFG_ACK            0x00000040
1800 #define ANEG_CFG_RF2            0x00000020
1801 #define ANEG_CFG_RF1            0x00000010
1802 #define ANEG_CFG_PS2            0x00000001
1803 #define ANEG_CFG_PS1            0x00008000
1804 #define ANEG_CFG_HD             0x00004000
1805 #define ANEG_CFG_FD             0x00002000
1806 #define ANEG_CFG_INVAL          0x00001f06
1807
1808 };
1809 #define ANEG_OK         0
1810 #define ANEG_DONE       1
1811 #define ANEG_TIMER_ENAB 2
1812 #define ANEG_FAILED     -1
1813
1814 #define ANEG_STATE_SETTLE_TIME  10000
1815
1816 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1817                                    struct tg3_fiber_aneginfo *ap)
1818 {
1819         unsigned long delta;
1820         u32 rx_cfg_reg;
1821         int ret;
1822
1823         if (ap->state == ANEG_STATE_UNKNOWN) {
1824                 ap->rxconfig = 0;
1825                 ap->link_time = 0;
1826                 ap->cur_time = 0;
1827                 ap->ability_match_cfg = 0;
1828                 ap->ability_match_count = 0;
1829                 ap->ability_match = 0;
1830                 ap->idle_match = 0;
1831                 ap->ack_match = 0;
1832         }
1833         ap->cur_time++;
1834
1835         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1836                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1837
1838                 if (rx_cfg_reg != ap->ability_match_cfg) {
1839                         ap->ability_match_cfg = rx_cfg_reg;
1840                         ap->ability_match = 0;
1841                         ap->ability_match_count = 0;
1842                 } else {
1843                         if (++ap->ability_match_count > 1) {
1844                                 ap->ability_match = 1;
1845                                 ap->ability_match_cfg = rx_cfg_reg;
1846                         }
1847                 }
1848                 if (rx_cfg_reg & ANEG_CFG_ACK)
1849                         ap->ack_match = 1;
1850                 else
1851                         ap->ack_match = 0;
1852
1853                 ap->idle_match = 0;
1854         } else {
1855                 ap->idle_match = 1;
1856                 ap->ability_match_cfg = 0;
1857                 ap->ability_match_count = 0;
1858                 ap->ability_match = 0;
1859                 ap->ack_match = 0;
1860
1861                 rx_cfg_reg = 0;
1862         }
1863
1864         ap->rxconfig = rx_cfg_reg;
1865         ret = ANEG_OK;
1866
1867         switch(ap->state) {
1868         case ANEG_STATE_UNKNOWN:
1869                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1870                         ap->state = ANEG_STATE_AN_ENABLE;
1871
1872                 /* fallthru */
1873         case ANEG_STATE_AN_ENABLE:
1874                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1875                 if (ap->flags & MR_AN_ENABLE) {
1876                         ap->link_time = 0;
1877                         ap->cur_time = 0;
1878                         ap->ability_match_cfg = 0;
1879                         ap->ability_match_count = 0;
1880                         ap->ability_match = 0;
1881                         ap->idle_match = 0;
1882                         ap->ack_match = 0;
1883
1884                         ap->state = ANEG_STATE_RESTART_INIT;
1885                 } else {
1886                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1887                 }
1888                 break;
1889
1890         case ANEG_STATE_RESTART_INIT:
1891                 ap->link_time = ap->cur_time;
1892                 ap->flags &= ~(MR_NP_LOADED);
1893                 ap->txconfig = 0;
1894                 tw32(MAC_TX_AUTO_NEG, 0);
1895                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1896                 tw32_f(MAC_MODE, tp->mac_mode);
1897                 udelay(40);
1898
1899                 ret = ANEG_TIMER_ENAB;
1900                 ap->state = ANEG_STATE_RESTART;
1901
1902                 /* fallthru */
1903         case ANEG_STATE_RESTART:
1904                 delta = ap->cur_time - ap->link_time;
1905                 if (delta > ANEG_STATE_SETTLE_TIME) {
1906                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1907                 } else {
1908                         ret = ANEG_TIMER_ENAB;
1909                 }
1910                 break;
1911
1912         case ANEG_STATE_DISABLE_LINK_OK:
1913                 ret = ANEG_DONE;
1914                 break;
1915
1916         case ANEG_STATE_ABILITY_DETECT_INIT:
1917                 ap->flags &= ~(MR_TOGGLE_TX);
1918                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1919                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1920                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1921                 tw32_f(MAC_MODE, tp->mac_mode);
1922                 udelay(40);
1923
1924                 ap->state = ANEG_STATE_ABILITY_DETECT;
1925                 break;
1926
1927         case ANEG_STATE_ABILITY_DETECT:
1928                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1929                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1930                 }
1931                 break;
1932
1933         case ANEG_STATE_ACK_DETECT_INIT:
1934                 ap->txconfig |= ANEG_CFG_ACK;
1935                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1936                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1937                 tw32_f(MAC_MODE, tp->mac_mode);
1938                 udelay(40);
1939
1940                 ap->state = ANEG_STATE_ACK_DETECT;
1941
1942                 /* fallthru */
1943         case ANEG_STATE_ACK_DETECT:
1944                 if (ap->ack_match != 0) {
1945                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1946                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1947                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1948                         } else {
1949                                 ap->state = ANEG_STATE_AN_ENABLE;
1950                         }
1951                 } else if (ap->ability_match != 0 &&
1952                            ap->rxconfig == 0) {
1953                         ap->state = ANEG_STATE_AN_ENABLE;
1954                 }
1955                 break;
1956
1957         case ANEG_STATE_COMPLETE_ACK_INIT:
1958                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1959                         ret = ANEG_FAILED;
1960                         break;
1961                 }
1962                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1963                                MR_LP_ADV_HALF_DUPLEX |
1964                                MR_LP_ADV_SYM_PAUSE |
1965                                MR_LP_ADV_ASYM_PAUSE |
1966                                MR_LP_ADV_REMOTE_FAULT1 |
1967                                MR_LP_ADV_REMOTE_FAULT2 |
1968                                MR_LP_ADV_NEXT_PAGE |
1969                                MR_TOGGLE_RX |
1970                                MR_NP_RX);
1971                 if (ap->rxconfig & ANEG_CFG_FD)
1972                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1973                 if (ap->rxconfig & ANEG_CFG_HD)
1974                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1975                 if (ap->rxconfig & ANEG_CFG_PS1)
1976                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1977                 if (ap->rxconfig & ANEG_CFG_PS2)
1978                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1979                 if (ap->rxconfig & ANEG_CFG_RF1)
1980                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1981                 if (ap->rxconfig & ANEG_CFG_RF2)
1982                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1983                 if (ap->rxconfig & ANEG_CFG_NP)
1984                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1985
1986                 ap->link_time = ap->cur_time;
1987
1988                 ap->flags ^= (MR_TOGGLE_TX);
1989                 if (ap->rxconfig & 0x0008)
1990                         ap->flags |= MR_TOGGLE_RX;
1991                 if (ap->rxconfig & ANEG_CFG_NP)
1992                         ap->flags |= MR_NP_RX;
1993                 ap->flags |= MR_PAGE_RX;
1994
1995                 ap->state = ANEG_STATE_COMPLETE_ACK;
1996                 ret = ANEG_TIMER_ENAB;
1997                 break;
1998
1999         case ANEG_STATE_COMPLETE_ACK:
2000                 if (ap->ability_match != 0 &&
2001                     ap->rxconfig == 0) {
2002                         ap->state = ANEG_STATE_AN_ENABLE;
2003                         break;
2004                 }
2005                 delta = ap->cur_time - ap->link_time;
2006                 if (delta > ANEG_STATE_SETTLE_TIME) {
2007                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2008                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2009                         } else {
2010                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2011                                     !(ap->flags & MR_NP_RX)) {
2012                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2013                                 } else {
2014                                         ret = ANEG_FAILED;
2015                                 }
2016                         }
2017                 }
2018                 break;
2019
2020         case ANEG_STATE_IDLE_DETECT_INIT:
2021                 ap->link_time = ap->cur_time;
2022                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2023                 tw32_f(MAC_MODE, tp->mac_mode);
2024                 udelay(40);
2025
2026                 ap->state = ANEG_STATE_IDLE_DETECT;
2027                 ret = ANEG_TIMER_ENAB;
2028                 break;
2029
2030         case ANEG_STATE_IDLE_DETECT:
2031                 if (ap->ability_match != 0 &&
2032                     ap->rxconfig == 0) {
2033                         ap->state = ANEG_STATE_AN_ENABLE;
2034                         break;
2035                 }
2036                 delta = ap->cur_time - ap->link_time;
2037                 if (delta > ANEG_STATE_SETTLE_TIME) {
2038                         /* XXX another gem from the Broadcom driver :( */
2039                         ap->state = ANEG_STATE_LINK_OK;
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_LINK_OK:
2044                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2045                 ret = ANEG_DONE;
2046                 break;
2047
2048         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2049                 /* ??? unimplemented */
2050                 break;
2051
2052         case ANEG_STATE_NEXT_PAGE_WAIT:
2053                 /* ??? unimplemented */
2054                 break;
2055
2056         default:
2057                 ret = ANEG_FAILED;
2058                 break;
2059         };
2060
2061         return ret;
2062 }
2063
2064 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2065 {
2066         int res = 0;
2067         struct tg3_fiber_aneginfo aninfo;
2068         int status = ANEG_FAILED;
2069         unsigned int tick;
2070         u32 tmp;
2071
2072         tw32_f(MAC_TX_AUTO_NEG, 0);
2073
2074         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2075         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2076         udelay(40);
2077
2078         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2079         udelay(40);
2080
2081         memset(&aninfo, 0, sizeof(aninfo));
2082         aninfo.flags |= MR_AN_ENABLE;
2083         aninfo.state = ANEG_STATE_UNKNOWN;
2084         aninfo.cur_time = 0;
2085         tick = 0;
2086         while (++tick < 195000) {
2087                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2088                 if (status == ANEG_DONE || status == ANEG_FAILED)
2089                         break;
2090
2091                 udelay(1);
2092         }
2093
2094         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2095         tw32_f(MAC_MODE, tp->mac_mode);
2096         udelay(40);
2097
2098         *flags = aninfo.flags;
2099
2100         if (status == ANEG_DONE &&
2101             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2102                              MR_LP_ADV_FULL_DUPLEX)))
2103                 res = 1;
2104
2105         return res;
2106 }
2107
2108 static void tg3_init_bcm8002(struct tg3 *tp)
2109 {
2110         u32 mac_status = tr32(MAC_STATUS);
2111         int i;
2112
2113         /* Reset when initting first time or we have a link. */
2114         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2115             !(mac_status & MAC_STATUS_PCS_SYNCED))
2116                 return;
2117
2118         /* Set PLL lock range. */
2119         tg3_writephy(tp, 0x16, 0x8007);
2120
2121         /* SW reset */
2122         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2123
2124         /* Wait for reset to complete. */
2125         /* XXX schedule_timeout() ... */
2126         for (i = 0; i < 500; i++)
2127                 udelay(10);
2128
2129         /* Config mode; select PMA/Ch 1 regs. */
2130         tg3_writephy(tp, 0x10, 0x8411);
2131
2132         /* Enable auto-lock and comdet, select txclk for tx. */
2133         tg3_writephy(tp, 0x11, 0x0a10);
2134
2135         tg3_writephy(tp, 0x18, 0x00a0);
2136         tg3_writephy(tp, 0x16, 0x41ff);
2137
2138         /* Assert and deassert POR. */
2139         tg3_writephy(tp, 0x13, 0x0400);
2140         udelay(40);
2141         tg3_writephy(tp, 0x13, 0x0000);
2142
2143         tg3_writephy(tp, 0x11, 0x0a50);
2144         udelay(40);
2145         tg3_writephy(tp, 0x11, 0x0a10);
2146
2147         /* Wait for signal to stabilize */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 15000; i++)
2150                 udelay(10);
2151
2152         /* Deselect the channel register so we can read the PHYID
2153          * later.
2154          */
2155         tg3_writephy(tp, 0x10, 0x8011);
2156 }
2157
2158 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2159 {
2160         u32 sg_dig_ctrl, sg_dig_status;
2161         u32 serdes_cfg, expected_sg_dig_ctrl;
2162         int workaround, port_a;
2163         int current_link_up;
2164
2165         serdes_cfg = 0;
2166         expected_sg_dig_ctrl = 0;
2167         workaround = 0;
2168         port_a = 1;
2169         current_link_up = 0;
2170
2171         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2172             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2173                 workaround = 1;
2174                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2175                         port_a = 0;
2176
2177                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2178                 /* preserve bits 20-23 for voltage regulator */
2179                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2180         }
2181
2182         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2183
2184         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2185                 if (sg_dig_ctrl & (1 << 31)) {
2186                         if (workaround) {
2187                                 u32 val = serdes_cfg;
2188
2189                                 if (port_a)
2190                                         val |= 0xc010000;
2191                                 else
2192                                         val |= 0x4010000;
2193                                 tw32_f(MAC_SERDES_CFG, val);
2194                         }
2195                         tw32_f(SG_DIG_CTRL, 0x01388400);
2196                 }
2197                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2198                         tg3_setup_flow_control(tp, 0, 0);
2199                         current_link_up = 1;
2200                 }
2201                 goto out;
2202         }
2203
2204         /* Want auto-negotiation.  */
2205         expected_sg_dig_ctrl = 0x81388400;
2206
2207         /* Pause capability */
2208         expected_sg_dig_ctrl |= (1 << 11);
2209
2210         /* Asymettric pause */
2211         expected_sg_dig_ctrl |= (1 << 12);
2212
2213         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2214                 if (workaround)
2215                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2216                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2217                 udelay(5);
2218                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2219
2220                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2221         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2222                                  MAC_STATUS_SIGNAL_DET)) {
2223                 int i;
2224
2225                 /* Giver time to negotiate (~200ms) */
2226                 for (i = 0; i < 40000; i++) {
2227                         sg_dig_status = tr32(SG_DIG_STATUS);
2228                         if (sg_dig_status & (0x3))
2229                                 break;
2230                         udelay(5);
2231                 }
2232                 mac_status = tr32(MAC_STATUS);
2233
2234                 if ((sg_dig_status & (1 << 1)) &&
2235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2236                         u32 local_adv, remote_adv;
2237
2238                         local_adv = ADVERTISE_PAUSE_CAP;
2239                         remote_adv = 0;
2240                         if (sg_dig_status & (1 << 19))
2241                                 remote_adv |= LPA_PAUSE_CAP;
2242                         if (sg_dig_status & (1 << 20))
2243                                 remote_adv |= LPA_PAUSE_ASYM;
2244
2245                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2246                         current_link_up = 1;
2247                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2248                 } else if (!(sg_dig_status & (1 << 1))) {
2249                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2250                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2251                         else {
2252                                 if (workaround) {
2253                                         u32 val = serdes_cfg;
2254
2255                                         if (port_a)
2256                                                 val |= 0xc010000;
2257                                         else
2258                                                 val |= 0x4010000;
2259
2260                                         tw32_f(MAC_SERDES_CFG, val);
2261                                 }
2262
2263                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2264                                 udelay(40);
2265
2266                                 /* Link parallel detection - link is up */
2267                                 /* only if we have PCS_SYNC and not */
2268                                 /* receiving config code words */
2269                                 mac_status = tr32(MAC_STATUS);
2270                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2271                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2272                                         tg3_setup_flow_control(tp, 0, 0);
2273                                         current_link_up = 1;
2274                                 }
2275                         }
2276                 }
2277         }
2278
2279 out:
2280         return current_link_up;
2281 }
2282
2283 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2284 {
2285         int current_link_up = 0;
2286
2287         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2288                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2289                 goto out;
2290         }
2291
2292         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2293                 u32 flags;
2294                 int i;
2295   
2296                 if (fiber_autoneg(tp, &flags)) {
2297                         u32 local_adv, remote_adv;
2298
2299                         local_adv = ADVERTISE_PAUSE_CAP;
2300                         remote_adv = 0;
2301                         if (flags & MR_LP_ADV_SYM_PAUSE)
2302                                 remote_adv |= LPA_PAUSE_CAP;
2303                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2304                                 remote_adv |= LPA_PAUSE_ASYM;
2305
2306                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2307
2308                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2309                         current_link_up = 1;
2310                 }
2311                 for (i = 0; i < 30; i++) {
2312                         udelay(20);
2313                         tw32_f(MAC_STATUS,
2314                                (MAC_STATUS_SYNC_CHANGED |
2315                                 MAC_STATUS_CFG_CHANGED));
2316                         udelay(40);
2317                         if ((tr32(MAC_STATUS) &
2318                              (MAC_STATUS_SYNC_CHANGED |
2319                               MAC_STATUS_CFG_CHANGED)) == 0)
2320                                 break;
2321                 }
2322
2323                 mac_status = tr32(MAC_STATUS);
2324                 if (current_link_up == 0 &&
2325                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                     !(mac_status & MAC_STATUS_RCVD_CFG))
2327                         current_link_up = 1;
2328         } else {
2329                 /* Forcing 1000FD link up. */
2330                 current_link_up = 1;
2331                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332
2333                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2334                 udelay(40);
2335         }
2336
2337 out:
2338         return current_link_up;
2339 }
2340
2341 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2342 {
2343         u32 orig_pause_cfg;
2344         u16 orig_active_speed;
2345         u8 orig_active_duplex;
2346         u32 mac_status;
2347         int current_link_up;
2348         int i;
2349
2350         orig_pause_cfg =
2351                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2352                                   TG3_FLAG_TX_PAUSE));
2353         orig_active_speed = tp->link_config.active_speed;
2354         orig_active_duplex = tp->link_config.active_duplex;
2355
2356         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2357             netif_carrier_ok(tp->dev) &&
2358             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2359                 mac_status = tr32(MAC_STATUS);
2360                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2361                                MAC_STATUS_SIGNAL_DET |
2362                                MAC_STATUS_CFG_CHANGED |
2363                                MAC_STATUS_RCVD_CFG);
2364                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2365                                    MAC_STATUS_SIGNAL_DET)) {
2366                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2367                                             MAC_STATUS_CFG_CHANGED));
2368                         return 0;
2369                 }
2370         }
2371
2372         tw32_f(MAC_TX_AUTO_NEG, 0);
2373
2374         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2375         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2376         tw32_f(MAC_MODE, tp->mac_mode);
2377         udelay(40);
2378
2379         if (tp->phy_id == PHY_ID_BCM8002)
2380                 tg3_init_bcm8002(tp);
2381
2382         /* Enable link change event even when serdes polling.  */
2383         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2384         udelay(40);
2385
2386         current_link_up = 0;
2387         mac_status = tr32(MAC_STATUS);
2388
2389         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2390                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2391         else
2392                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2393
2394         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2395         tw32_f(MAC_MODE, tp->mac_mode);
2396         udelay(40);
2397
2398         tp->hw_status->status =
2399                 (SD_STATUS_UPDATED |
2400                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2401
2402         for (i = 0; i < 100; i++) {
2403                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2404                                     MAC_STATUS_CFG_CHANGED));
2405                 udelay(5);
2406                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2407                                          MAC_STATUS_CFG_CHANGED)) == 0)
2408                         break;
2409         }
2410
2411         mac_status = tr32(MAC_STATUS);
2412         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2413                 current_link_up = 0;
2414                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2415                         tw32_f(MAC_MODE, (tp->mac_mode |
2416                                           MAC_MODE_SEND_CONFIGS));
2417                         udelay(1);
2418                         tw32_f(MAC_MODE, tp->mac_mode);
2419                 }
2420         }
2421
2422         if (current_link_up == 1) {
2423                 tp->link_config.active_speed = SPEED_1000;
2424                 tp->link_config.active_duplex = DUPLEX_FULL;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_1000MBPS_ON));
2428         } else {
2429                 tp->link_config.active_speed = SPEED_INVALID;
2430                 tp->link_config.active_duplex = DUPLEX_INVALID;
2431                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2432                                     LED_CTRL_LNKLED_OVERRIDE |
2433                                     LED_CTRL_TRAFFIC_OVERRIDE));
2434         }
2435
2436         if (current_link_up != netif_carrier_ok(tp->dev)) {
2437                 if (current_link_up)
2438                         netif_carrier_on(tp->dev);
2439                 else
2440                         netif_carrier_off(tp->dev);
2441                 tg3_link_report(tp);
2442         } else {
2443                 u32 now_pause_cfg =
2444                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2445                                          TG3_FLAG_TX_PAUSE);
2446                 if (orig_pause_cfg != now_pause_cfg ||
2447                     orig_active_speed != tp->link_config.active_speed ||
2448                     orig_active_duplex != tp->link_config.active_duplex)
2449                         tg3_link_report(tp);
2450         }
2451
2452         return 0;
2453 }
2454
2455 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2456 {
2457         int err;
2458
2459         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2460                 err = tg3_setup_fiber_phy(tp, force_reset);
2461         } else {
2462                 err = tg3_setup_copper_phy(tp, force_reset);
2463         }
2464
2465         if (tp->link_config.active_speed == SPEED_1000 &&
2466             tp->link_config.active_duplex == DUPLEX_HALF)
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471         else
2472                 tw32(MAC_TX_LENGTHS,
2473                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2474                       (6 << TX_LENGTHS_IPG_SHIFT) |
2475                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2476
2477         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2478                 if (netif_carrier_ok(tp->dev)) {
2479                         tw32(HOSTCC_STAT_COAL_TICKS,
2480                              DEFAULT_STAT_COAL_TICKS);
2481                 } else {
2482                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2483                 }
2484         }
2485
2486         return err;
2487 }
2488
2489 /* Tigon3 never reports partial packet sends.  So we do not
2490  * need special logic to handle SKBs that have not had all
2491  * of their frags sent yet, like SunGEM does.
2492  */
2493 static void tg3_tx(struct tg3 *tp)
2494 {
2495         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2496         u32 sw_idx = tp->tx_cons;
2497
2498         while (sw_idx != hw_idx) {
2499                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2500                 struct sk_buff *skb = ri->skb;
2501                 int i;
2502
2503                 if (unlikely(skb == NULL))
2504                         BUG();
2505
2506                 pci_unmap_single(tp->pdev,
2507                                  pci_unmap_addr(ri, mapping),
2508                                  skb_headlen(skb),
2509                                  PCI_DMA_TODEVICE);
2510
2511                 ri->skb = NULL;
2512
2513                 sw_idx = NEXT_TX(sw_idx);
2514
2515                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2516                         if (unlikely(sw_idx == hw_idx))
2517                                 BUG();
2518
2519                         ri = &tp->tx_buffers[sw_idx];
2520                         if (unlikely(ri->skb != NULL))
2521                                 BUG();
2522
2523                         pci_unmap_page(tp->pdev,
2524                                        pci_unmap_addr(ri, mapping),
2525                                        skb_shinfo(skb)->frags[i].size,
2526                                        PCI_DMA_TODEVICE);
2527
2528                         sw_idx = NEXT_TX(sw_idx);
2529                 }
2530
2531                 dev_kfree_skb_irq(skb);
2532         }
2533
2534         tp->tx_cons = sw_idx;
2535
2536         if (netif_queue_stopped(tp->dev) &&
2537             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2538                 netif_wake_queue(tp->dev);
2539 }
2540
2541 /* Returns size of skb allocated or < 0 on error.
2542  *
2543  * We only need to fill in the address because the other members
2544  * of the RX descriptor are invariant, see tg3_init_rings.
2545  *
2546  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2547  * posting buffers we only dirty the first cache line of the RX
2548  * descriptor (containing the address).  Whereas for the RX status
2549  * buffers the cpu only reads the last cacheline of the RX descriptor
2550  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2551  */
2552 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2553                             int src_idx, u32 dest_idx_unmasked)
2554 {
2555         struct tg3_rx_buffer_desc *desc;
2556         struct ring_info *map, *src_map;
2557         struct sk_buff *skb;
2558         dma_addr_t mapping;
2559         int skb_size, dest_idx;
2560
2561         src_map = NULL;
2562         switch (opaque_key) {
2563         case RXD_OPAQUE_RING_STD:
2564                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2565                 desc = &tp->rx_std[dest_idx];
2566                 map = &tp->rx_std_buffers[dest_idx];
2567                 if (src_idx >= 0)
2568                         src_map = &tp->rx_std_buffers[src_idx];
2569                 skb_size = RX_PKT_BUF_SZ;
2570                 break;
2571
2572         case RXD_OPAQUE_RING_JUMBO:
2573                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2574                 desc = &tp->rx_jumbo[dest_idx];
2575                 map = &tp->rx_jumbo_buffers[dest_idx];
2576                 if (src_idx >= 0)
2577                         src_map = &tp->rx_jumbo_buffers[src_idx];
2578                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2579                 break;
2580
2581         default:
2582                 return -EINVAL;
2583         };
2584
2585         /* Do not overwrite any of the map or rp information
2586          * until we are sure we can commit to a new buffer.
2587          *
2588          * Callers depend upon this behavior and assume that
2589          * we leave everything unchanged if we fail.
2590          */
2591         skb = dev_alloc_skb(skb_size);
2592         if (skb == NULL)
2593                 return -ENOMEM;
2594
2595         skb->dev = tp->dev;
2596         skb_reserve(skb, tp->rx_offset);
2597
2598         mapping = pci_map_single(tp->pdev, skb->data,
2599                                  skb_size - tp->rx_offset,
2600                                  PCI_DMA_FROMDEVICE);
2601
2602         map->skb = skb;
2603         pci_unmap_addr_set(map, mapping, mapping);
2604
2605         if (src_map != NULL)
2606                 src_map->skb = NULL;
2607
2608         desc->addr_hi = ((u64)mapping >> 32);
2609         desc->addr_lo = ((u64)mapping & 0xffffffff);
2610
2611         return skb_size;
2612 }
2613
2614 /* We only need to move over in the address because the other
2615  * members of the RX descriptor are invariant.  See notes above
2616  * tg3_alloc_rx_skb for full details.
2617  */
2618 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2619                            int src_idx, u32 dest_idx_unmasked)
2620 {
2621         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2622         struct ring_info *src_map, *dest_map;
2623         int dest_idx;
2624
2625         switch (opaque_key) {
2626         case RXD_OPAQUE_RING_STD:
2627                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2628                 dest_desc = &tp->rx_std[dest_idx];
2629                 dest_map = &tp->rx_std_buffers[dest_idx];
2630                 src_desc = &tp->rx_std[src_idx];
2631                 src_map = &tp->rx_std_buffers[src_idx];
2632                 break;
2633
2634         case RXD_OPAQUE_RING_JUMBO:
2635                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2636                 dest_desc = &tp->rx_jumbo[dest_idx];
2637                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2638                 src_desc = &tp->rx_jumbo[src_idx];
2639                 src_map = &tp->rx_jumbo_buffers[src_idx];
2640                 break;
2641
2642         default:
2643                 return;
2644         };
2645
2646         dest_map->skb = src_map->skb;
2647         pci_unmap_addr_set(dest_map, mapping,
2648                            pci_unmap_addr(src_map, mapping));
2649         dest_desc->addr_hi = src_desc->addr_hi;
2650         dest_desc->addr_lo = src_desc->addr_lo;
2651
2652         src_map->skb = NULL;
2653 }
2654
2655 #if TG3_VLAN_TAG_USED
2656 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2657 {
2658         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2659 }
2660 #endif
2661
2662 /* The RX ring scheme is composed of multiple rings which post fresh
2663  * buffers to the chip, and one special ring the chip uses to report
2664  * status back to the host.
2665  *
2666  * The special ring reports the status of received packets to the
2667  * host.  The chip does not write into the original descriptor the
2668  * RX buffer was obtained from.  The chip simply takes the original
2669  * descriptor as provided by the host, updates the status and length
2670  * field, then writes this into the next status ring entry.
2671  *
2672  * Each ring the host uses to post buffers to the chip is described
2673  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2674  * it is first placed into the on-chip ram.  When the packet's length
2675  * is known, it walks down the TG3_BDINFO entries to select the ring.
2676  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2677  * which is within the range of the new packet's length is chosen.
2678  *
2679  * The "separate ring for rx status" scheme may sound queer, but it makes
2680  * sense from a cache coherency perspective.  If only the host writes
2681  * to the buffer post rings, and only the chip writes to the rx status
2682  * rings, then cache lines never move beyond shared-modified state.
2683  * If both the host and chip were to write into the same ring, cache line
2684  * eviction could occur since both entities want it in an exclusive state.
2685  */
2686 static int tg3_rx(struct tg3 *tp, int budget)
2687 {
2688         u32 work_mask;
2689         u32 sw_idx = tp->rx_rcb_ptr;
2690         u16 hw_idx;
2691         int received;
2692
2693         hw_idx = tp->hw_status->idx[0].rx_producer;
2694         /*
2695          * We need to order the read of hw_idx and the read of
2696          * the opaque cookie.
2697          */
2698         rmb();
2699         work_mask = 0;
2700         received = 0;
2701         while (sw_idx != hw_idx && budget > 0) {
2702                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2703                 unsigned int len;
2704                 struct sk_buff *skb;
2705                 dma_addr_t dma_addr;
2706                 u32 opaque_key, desc_idx, *post_ptr;
2707
2708                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2709                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2710                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2711                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2712                                                   mapping);
2713                         skb = tp->rx_std_buffers[desc_idx].skb;
2714                         post_ptr = &tp->rx_std_ptr;
2715                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2716                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2717                                                   mapping);
2718                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2719                         post_ptr = &tp->rx_jumbo_ptr;
2720                 }
2721                 else {
2722                         goto next_pkt_nopost;
2723                 }
2724
2725                 work_mask |= opaque_key;
2726
2727                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2728                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2729                 drop_it:
2730                         tg3_recycle_rx(tp, opaque_key,
2731                                        desc_idx, *post_ptr);
2732                 drop_it_no_recycle:
2733                         /* Other statistics kept track of by card. */
2734                         tp->net_stats.rx_dropped++;
2735                         goto next_pkt;
2736                 }
2737
2738                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2739
2740                 if (len > RX_COPY_THRESHOLD 
2741                         && tp->rx_offset == 2
2742                         /* rx_offset != 2 iff this is a 5701 card running
2743                          * in PCI-X mode [see tg3_get_invariants()] */
2744                 ) {
2745                         int skb_size;
2746
2747                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2748                                                     desc_idx, *post_ptr);
2749                         if (skb_size < 0)
2750                                 goto drop_it;
2751
2752                         pci_unmap_single(tp->pdev, dma_addr,
2753                                          skb_size - tp->rx_offset,
2754                                          PCI_DMA_FROMDEVICE);
2755
2756                         skb_put(skb, len);
2757                 } else {
2758                         struct sk_buff *copy_skb;
2759
2760                         tg3_recycle_rx(tp, opaque_key,
2761                                        desc_idx, *post_ptr);
2762
2763                         copy_skb = dev_alloc_skb(len + 2);
2764                         if (copy_skb == NULL)
2765                                 goto drop_it_no_recycle;
2766
2767                         copy_skb->dev = tp->dev;
2768                         skb_reserve(copy_skb, 2);
2769                         skb_put(copy_skb, len);
2770                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2771                         memcpy(copy_skb->data, skb->data, len);
2772                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2773
2774                         /* We'll reuse the original ring buffer. */
2775                         skb = copy_skb;
2776                 }
2777
2778                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2779                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2780                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2781                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2782                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2783                 else
2784                         skb->ip_summed = CHECKSUM_NONE;
2785
2786                 skb->protocol = eth_type_trans(skb, tp->dev);
2787 #if TG3_VLAN_TAG_USED
2788                 if (tp->vlgrp != NULL &&
2789                     desc->type_flags & RXD_FLAG_VLAN) {
2790                         tg3_vlan_rx(tp, skb,
2791                                     desc->err_vlan & RXD_VLAN_MASK);
2792                 } else
2793 #endif
2794                         netif_receive_skb(skb);
2795
2796                 tp->dev->last_rx = jiffies;
2797                 received++;
2798                 budget--;
2799
2800 next_pkt:
2801                 (*post_ptr)++;
2802 next_pkt_nopost:
2803                 sw_idx++;
2804                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2805         }
2806
2807         /* ACK the status ring. */
2808         tp->rx_rcb_ptr = sw_idx;
2809         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2810
2811         /* Refill RX ring(s). */
2812         if (work_mask & RXD_OPAQUE_RING_STD) {
2813                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2814                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2815                              sw_idx);
2816         }
2817         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2818                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2819                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2820                              sw_idx);
2821         }
2822         mmiowb();
2823
2824         return received;
2825 }
2826
2827 static int tg3_poll(struct net_device *netdev, int *budget)
2828 {
2829         struct tg3 *tp = netdev_priv(netdev);
2830         struct tg3_hw_status *sblk = tp->hw_status;
2831         unsigned long flags;
2832         int done;
2833
2834         spin_lock_irqsave(&tp->lock, flags);
2835
2836         /* handle link change and other phy events */
2837         if (!(tp->tg3_flags &
2838               (TG3_FLAG_USE_LINKCHG_REG |
2839                TG3_FLAG_POLL_SERDES))) {
2840                 if (sblk->status & SD_STATUS_LINK_CHG) {
2841                         sblk->status = SD_STATUS_UPDATED |
2842                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2843                         tg3_setup_phy(tp, 0);
2844                 }
2845         }
2846
2847         /* run TX completion thread */
2848         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2849                 spin_lock(&tp->tx_lock);
2850                 tg3_tx(tp);
2851                 spin_unlock(&tp->tx_lock);
2852         }
2853
2854         spin_unlock_irqrestore(&tp->lock, flags);
2855
2856         /* run RX thread, within the bounds set by NAPI.
2857          * All RX "locking" is done by ensuring outside
2858          * code synchronizes with dev->poll()
2859          */
2860         done = 1;
2861         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2862                 int orig_budget = *budget;
2863                 int work_done;
2864
2865                 if (orig_budget > netdev->quota)
2866                         orig_budget = netdev->quota;
2867
2868                 work_done = tg3_rx(tp, orig_budget);
2869
2870                 *budget -= work_done;
2871                 netdev->quota -= work_done;
2872
2873                 if (work_done >= orig_budget)
2874                         done = 0;
2875         }
2876
2877         /* if no more work, tell net stack and NIC we're done */
2878         if (done) {
2879                 spin_lock_irqsave(&tp->lock, flags);
2880                 __netif_rx_complete(netdev);
2881                 tg3_restart_ints(tp);
2882                 spin_unlock_irqrestore(&tp->lock, flags);
2883         }
2884
2885         return (done ? 0 : 1);
2886 }
2887
2888 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2889 {
2890         struct tg3_hw_status *sblk = tp->hw_status;
2891         unsigned int work_exists = 0;
2892
2893         /* check for phy events */
2894         if (!(tp->tg3_flags &
2895               (TG3_FLAG_USE_LINKCHG_REG |
2896                TG3_FLAG_POLL_SERDES))) {
2897                 if (sblk->status & SD_STATUS_LINK_CHG)
2898                         work_exists = 1;
2899         }
2900         /* check for RX/TX work to do */
2901         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2902             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2903                 work_exists = 1;
2904
2905         return work_exists;
2906 }
2907
2908 /* MSI ISR - No need to check for interrupt sharing and no need to
2909  * flush status block and interrupt mailbox. PCI ordering rules
2910  * guarantee that MSI will arrive after the status block.
2911  */
2912 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2913 {
2914         struct net_device *dev = dev_id;
2915         struct tg3 *tp = netdev_priv(dev);
2916         struct tg3_hw_status *sblk = tp->hw_status;
2917         unsigned long flags;
2918
2919         spin_lock_irqsave(&tp->lock, flags);
2920
2921         /*
2922          * writing any value to intr-mbox-0 clears PCI INTA# and
2923          * chip-internal interrupt pending events.
2924          * writing non-zero to intr-mbox-0 additional tells the
2925          * NIC to stop sending us irqs, engaging "in-intr-handler"
2926          * event coalescing.
2927          */
2928         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2929         sblk->status &= ~SD_STATUS_UPDATED;
2930
2931         if (likely(tg3_has_work(dev, tp)))
2932                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2933         else {
2934                 /* no work, re-enable interrupts
2935                  */
2936                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2937                              0x00000000);
2938         }
2939
2940         spin_unlock_irqrestore(&tp->lock, flags);
2941
2942         return IRQ_RETVAL(1);
2943 }
2944
2945 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2946 {
2947         struct net_device *dev = dev_id;
2948         struct tg3 *tp = netdev_priv(dev);
2949         struct tg3_hw_status *sblk = tp->hw_status;
2950         unsigned long flags;
2951         unsigned int handled = 1;
2952
2953         spin_lock_irqsave(&tp->lock, flags);
2954
2955         /* In INTx mode, it is possible for the interrupt to arrive at
2956          * the CPU before the status block posted prior to the interrupt.
2957          * Reading the PCI State register will confirm whether the
2958          * interrupt is ours and will flush the status block.
2959          */
2960         if ((sblk->status & SD_STATUS_UPDATED) ||
2961             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2962                 /*
2963                  * writing any value to intr-mbox-0 clears PCI INTA# and
2964                  * chip-internal interrupt pending events.
2965                  * writing non-zero to intr-mbox-0 additional tells the
2966                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2967                  * event coalescing.
2968                  */
2969                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2970                              0x00000001);
2971                 /*
2972                  * Flush PCI write.  This also guarantees that our
2973                  * status block has been flushed to host memory.
2974                  */
2975                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2976                 sblk->status &= ~SD_STATUS_UPDATED;
2977
2978                 if (likely(tg3_has_work(dev, tp)))
2979                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2980                 else {
2981                         /* no work, shared interrupt perhaps?  re-enable
2982                          * interrupts, and flush that PCI write
2983                          */
2984                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2985                                 0x00000000);
2986                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2987                 }
2988         } else {        /* shared interrupt */
2989                 handled = 0;
2990         }
2991
2992         spin_unlock_irqrestore(&tp->lock, flags);
2993
2994         return IRQ_RETVAL(handled);
2995 }
2996
2997 /* ISR for interrupt test */
2998 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
2999                 struct pt_regs *regs)
3000 {
3001         struct net_device *dev = dev_id;
3002         struct tg3 *tp = netdev_priv(dev);
3003         struct tg3_hw_status *sblk = tp->hw_status;
3004
3005         if (sblk->status & SD_STATUS_UPDATED) {
3006                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3007                              0x00000001);
3008                 return IRQ_RETVAL(1);
3009         }
3010         return IRQ_RETVAL(0);
3011 }
3012
3013 static int tg3_init_hw(struct tg3 *);
3014 static int tg3_halt(struct tg3 *);
3015
3016 #ifdef CONFIG_NET_POLL_CONTROLLER
3017 static void tg3_poll_controller(struct net_device *dev)
3018 {
3019         struct tg3 *tp = netdev_priv(dev);
3020
3021         tg3_interrupt(tp->pdev->irq, dev, NULL);
3022 }
3023 #endif
3024
3025 static void tg3_reset_task(void *_data)
3026 {
3027         struct tg3 *tp = _data;
3028         unsigned int restart_timer;
3029
3030         tg3_netif_stop(tp);
3031
3032         spin_lock_irq(&tp->lock);
3033         spin_lock(&tp->tx_lock);
3034
3035         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3036         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3037
3038         tg3_halt(tp);
3039         tg3_init_hw(tp);
3040
3041         tg3_netif_start(tp);
3042
3043         spin_unlock(&tp->tx_lock);
3044         spin_unlock_irq(&tp->lock);
3045
3046         if (restart_timer)
3047                 mod_timer(&tp->timer, jiffies + 1);
3048 }
3049
3050 static void tg3_tx_timeout(struct net_device *dev)
3051 {
3052         struct tg3 *tp = netdev_priv(dev);
3053
3054         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3055                dev->name);
3056
3057         schedule_work(&tp->reset_task);
3058 }
3059
3060 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3061
3062 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3063                                        u32 guilty_entry, int guilty_len,
3064                                        u32 last_plus_one, u32 *start, u32 mss)
3065 {
3066         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3067         dma_addr_t new_addr;
3068         u32 entry = *start;
3069         int i;
3070
3071         if (!new_skb) {
3072                 dev_kfree_skb(skb);
3073                 return -1;
3074         }
3075
3076         /* New SKB is guaranteed to be linear. */
3077         entry = *start;
3078         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3079                                   PCI_DMA_TODEVICE);
3080         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3081                     (skb->ip_summed == CHECKSUM_HW) ?
3082                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3083         *start = NEXT_TX(entry);
3084
3085         /* Now clean up the sw ring entries. */
3086         i = 0;
3087         while (entry != last_plus_one) {
3088                 int len;
3089
3090                 if (i == 0)
3091                         len = skb_headlen(skb);
3092                 else
3093                         len = skb_shinfo(skb)->frags[i-1].size;
3094                 pci_unmap_single(tp->pdev,
3095                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3096                                  len, PCI_DMA_TODEVICE);
3097                 if (i == 0) {
3098                         tp->tx_buffers[entry].skb = new_skb;
3099                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3100                 } else {
3101                         tp->tx_buffers[entry].skb = NULL;
3102                 }
3103                 entry = NEXT_TX(entry);
3104                 i++;
3105         }
3106
3107         dev_kfree_skb(skb);
3108
3109         return 0;
3110 }
3111
3112 static void tg3_set_txd(struct tg3 *tp, int entry,
3113                         dma_addr_t mapping, int len, u32 flags,
3114                         u32 mss_and_is_end)
3115 {
3116         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3117         int is_end = (mss_and_is_end & 0x1);
3118         u32 mss = (mss_and_is_end >> 1);
3119         u32 vlan_tag = 0;
3120
3121         if (is_end)
3122                 flags |= TXD_FLAG_END;
3123         if (flags & TXD_FLAG_VLAN) {
3124                 vlan_tag = flags >> 16;
3125                 flags &= 0xffff;
3126         }
3127         vlan_tag |= (mss << TXD_MSS_SHIFT);
3128
3129         txd->addr_hi = ((u64) mapping >> 32);
3130         txd->addr_lo = ((u64) mapping & 0xffffffff);
3131         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3132         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3133 }
3134
3135 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3136 {
3137         u32 base = (u32) mapping & 0xffffffff;
3138
3139         return ((base > 0xffffdcc0) &&
3140                 (base + len + 8 < base));
3141 }
3142
3143 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3144 {
3145         struct tg3 *tp = netdev_priv(dev);
3146         dma_addr_t mapping;
3147         unsigned int i;
3148         u32 len, entry, base_flags, mss;
3149         int would_hit_hwbug;
3150         unsigned long flags;
3151
3152         len = skb_headlen(skb);
3153
3154         /* No BH disabling for tx_lock here.  We are running in BH disabled
3155          * context and TX reclaim runs via tp->poll inside of a software
3156          * interrupt.  Rejoice!
3157          *
3158          * Actually, things are not so simple.  If we are to take a hw
3159          * IRQ here, we can deadlock, consider:
3160          *
3161          *       CPU1           CPU2
3162          *   tg3_start_xmit
3163          *   take tp->tx_lock
3164          *                      tg3_timer
3165          *                      take tp->lock
3166          *   tg3_interrupt
3167          *   spin on tp->lock
3168          *                      spin on tp->tx_lock
3169          *
3170          * So we really do need to disable interrupts when taking
3171          * tx_lock here.
3172          */
3173         local_irq_save(flags);
3174         if (!spin_trylock(&tp->tx_lock)) { 
3175                 local_irq_restore(flags);
3176                 return NETDEV_TX_LOCKED; 
3177         } 
3178
3179         /* This is a hard error, log it. */
3180         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3181                 netif_stop_queue(dev);
3182                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3183                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3184                        dev->name);
3185                 return NETDEV_TX_BUSY;
3186         }
3187
3188         entry = tp->tx_prod;
3189         base_flags = 0;
3190         if (skb->ip_summed == CHECKSUM_HW)
3191                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3192 #if TG3_TSO_SUPPORT != 0
3193         mss = 0;
3194         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3195             (mss = skb_shinfo(skb)->tso_size) != 0) {
3196                 int tcp_opt_len, ip_tcp_len;
3197
3198                 if (skb_header_cloned(skb) &&
3199                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3200                         dev_kfree_skb(skb);
3201                         goto out_unlock;
3202                 }
3203
3204                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3205                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3206
3207                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3208                                TXD_FLAG_CPU_POST_DMA);
3209
3210                 skb->nh.iph->check = 0;
3211                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3212                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3213                         skb->h.th->check = 0;
3214                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3215                 }
3216                 else {
3217                         skb->h.th->check =
3218                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3219                                                    skb->nh.iph->daddr,
3220                                                    0, IPPROTO_TCP, 0);
3221                 }
3222
3223                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3224                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3225                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3226                                 int tsflags;
3227
3228                                 tsflags = ((skb->nh.iph->ihl - 5) +
3229                                            (tcp_opt_len >> 2));
3230                                 mss |= (tsflags << 11);
3231                         }
3232                 } else {
3233                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3234                                 int tsflags;
3235
3236                                 tsflags = ((skb->nh.iph->ihl - 5) +
3237                                            (tcp_opt_len >> 2));
3238                                 base_flags |= tsflags << 12;
3239                         }
3240                 }
3241         }
3242 #else
3243         mss = 0;
3244 #endif
3245 #if TG3_VLAN_TAG_USED
3246         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3247                 base_flags |= (TXD_FLAG_VLAN |
3248                                (vlan_tx_tag_get(skb) << 16));
3249 #endif
3250
3251         /* Queue skb data, a.k.a. the main skb fragment. */
3252         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3253
3254         tp->tx_buffers[entry].skb = skb;
3255         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3256
3257         would_hit_hwbug = 0;
3258
3259         if (tg3_4g_overflow_test(mapping, len))
3260                 would_hit_hwbug = entry + 1;
3261
3262         tg3_set_txd(tp, entry, mapping, len, base_flags,
3263                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3264
3265         entry = NEXT_TX(entry);
3266
3267         /* Now loop through additional data fragments, and queue them. */
3268         if (skb_shinfo(skb)->nr_frags > 0) {
3269                 unsigned int i, last;
3270
3271                 last = skb_shinfo(skb)->nr_frags - 1;
3272                 for (i = 0; i <= last; i++) {
3273                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3274
3275                         len = frag->size;
3276                         mapping = pci_map_page(tp->pdev,
3277                                                frag->page,
3278                                                frag->page_offset,
3279                                                len, PCI_DMA_TODEVICE);
3280
3281                         tp->tx_buffers[entry].skb = NULL;
3282                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3283
3284                         if (tg3_4g_overflow_test(mapping, len)) {
3285                                 /* Only one should match. */
3286                                 if (would_hit_hwbug)
3287                                         BUG();
3288                                 would_hit_hwbug = entry + 1;
3289                         }
3290
3291                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3292                                 tg3_set_txd(tp, entry, mapping, len,
3293                                             base_flags, (i == last)|(mss << 1));
3294                         else
3295                                 tg3_set_txd(tp, entry, mapping, len,
3296                                             base_flags, (i == last));
3297
3298                         entry = NEXT_TX(entry);
3299                 }
3300         }
3301
3302         if (would_hit_hwbug) {
3303                 u32 last_plus_one = entry;
3304                 u32 start;
3305                 unsigned int len = 0;
3306
3307                 would_hit_hwbug -= 1;
3308                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3309                 entry &= (TG3_TX_RING_SIZE - 1);
3310                 start = entry;
3311                 i = 0;
3312                 while (entry != last_plus_one) {
3313                         if (i == 0)
3314                                 len = skb_headlen(skb);
3315                         else
3316                                 len = skb_shinfo(skb)->frags[i-1].size;
3317
3318                         if (entry == would_hit_hwbug)
3319                                 break;
3320
3321                         i++;
3322                         entry = NEXT_TX(entry);
3323
3324                 }
3325
3326                 /* If the workaround fails due to memory/mapping
3327                  * failure, silently drop this packet.
3328                  */
3329                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3330                                                 entry, len,
3331                                                 last_plus_one,
3332                                                 &start, mss))
3333                         goto out_unlock;
3334
3335                 entry = start;
3336         }
3337
3338         /* Packets are ready, update Tx producer idx local and on card. */
3339         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3340
3341         tp->tx_prod = entry;
3342         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3343                 netif_stop_queue(dev);
3344
3345 out_unlock:
3346         mmiowb();
3347         spin_unlock_irqrestore(&tp->tx_lock, flags);
3348
3349         dev->trans_start = jiffies;
3350
3351         return NETDEV_TX_OK;
3352 }
3353
3354 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3355                                int new_mtu)
3356 {
3357         dev->mtu = new_mtu;
3358
3359         if (new_mtu > ETH_DATA_LEN)
3360                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3361         else
3362                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3363 }
3364
3365 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3366 {
3367         struct tg3 *tp = netdev_priv(dev);
3368
3369         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3370                 return -EINVAL;
3371
3372         if (!netif_running(dev)) {
3373                 /* We'll just catch it later when the
3374                  * device is up'd.
3375                  */
3376                 tg3_set_mtu(dev, tp, new_mtu);
3377                 return 0;
3378         }
3379
3380         tg3_netif_stop(tp);
3381         spin_lock_irq(&tp->lock);
3382         spin_lock(&tp->tx_lock);
3383
3384         tg3_halt(tp);
3385
3386         tg3_set_mtu(dev, tp, new_mtu);
3387
3388         tg3_init_hw(tp);
3389
3390         tg3_netif_start(tp);
3391
3392         spin_unlock(&tp->tx_lock);
3393         spin_unlock_irq(&tp->lock);
3394
3395         return 0;
3396 }
3397
3398 /* Free up pending packets in all rx/tx rings.
3399  *
3400  * The chip has been shut down and the driver detached from
3401  * the networking, so no interrupts or new tx packets will
3402  * end up in the driver.  tp->{tx,}lock is not held and we are not
3403  * in an interrupt context and thus may sleep.
3404  */
3405 static void tg3_free_rings(struct tg3 *tp)
3406 {
3407         struct ring_info *rxp;
3408         int i;
3409
3410         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3411                 rxp = &tp->rx_std_buffers[i];
3412
3413                 if (rxp->skb == NULL)
3414                         continue;
3415                 pci_unmap_single(tp->pdev,
3416                                  pci_unmap_addr(rxp, mapping),
3417                                  RX_PKT_BUF_SZ - tp->rx_offset,
3418                                  PCI_DMA_FROMDEVICE);
3419                 dev_kfree_skb_any(rxp->skb);
3420                 rxp->skb = NULL;
3421         }
3422
3423         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3424                 rxp = &tp->rx_jumbo_buffers[i];
3425
3426                 if (rxp->skb == NULL)
3427                         continue;
3428                 pci_unmap_single(tp->pdev,
3429                                  pci_unmap_addr(rxp, mapping),
3430                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3431                                  PCI_DMA_FROMDEVICE);
3432                 dev_kfree_skb_any(rxp->skb);
3433                 rxp->skb = NULL;
3434         }
3435
3436         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3437                 struct tx_ring_info *txp;
3438                 struct sk_buff *skb;
3439                 int j;
3440
3441                 txp = &tp->tx_buffers[i];
3442                 skb = txp->skb;
3443
3444                 if (skb == NULL) {
3445                         i++;
3446                         continue;
3447                 }
3448
3449                 pci_unmap_single(tp->pdev,
3450                                  pci_unmap_addr(txp, mapping),
3451                                  skb_headlen(skb),
3452                                  PCI_DMA_TODEVICE);
3453                 txp->skb = NULL;
3454
3455                 i++;
3456
3457                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3458                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3459                         pci_unmap_page(tp->pdev,
3460                                        pci_unmap_addr(txp, mapping),
3461                                        skb_shinfo(skb)->frags[j].size,
3462                                        PCI_DMA_TODEVICE);
3463                         i++;
3464                 }
3465
3466                 dev_kfree_skb_any(skb);
3467         }
3468 }
3469
3470 /* Initialize tx/rx rings for packet processing.
3471  *
3472  * The chip has been shut down and the driver detached from
3473  * the networking, so no interrupts or new tx packets will
3474  * end up in the driver.  tp->{tx,}lock are held and thus
3475  * we may not sleep.
3476  */
3477 static void tg3_init_rings(struct tg3 *tp)
3478 {
3479         u32 i;
3480
3481         /* Free up all the SKBs. */
3482         tg3_free_rings(tp);
3483
3484         /* Zero out all descriptors. */
3485         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3486         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3487         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3488         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3489
3490         /* Initialize invariants of the rings, we only set this
3491          * stuff once.  This works because the card does not
3492          * write into the rx buffer posting rings.
3493          */
3494         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3495                 struct tg3_rx_buffer_desc *rxd;
3496
3497                 rxd = &tp->rx_std[i];
3498                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3499                         << RXD_LEN_SHIFT;
3500                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3501                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3502                                (i << RXD_OPAQUE_INDEX_SHIFT));
3503         }
3504
3505         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3506                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3507                         struct tg3_rx_buffer_desc *rxd;
3508
3509                         rxd = &tp->rx_jumbo[i];
3510                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3511                                 << RXD_LEN_SHIFT;
3512                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3513                                 RXD_FLAG_JUMBO;
3514                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3515                                (i << RXD_OPAQUE_INDEX_SHIFT));
3516                 }
3517         }
3518
3519         /* Now allocate fresh SKBs for each rx ring. */
3520         for (i = 0; i < tp->rx_pending; i++) {
3521                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3522                                      -1, i) < 0)
3523                         break;
3524         }
3525
3526         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3527                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3528                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3529                                              -1, i) < 0)
3530                                 break;
3531                 }
3532         }
3533 }
3534
3535 /*
3536  * Must not be invoked with interrupt sources disabled and
3537  * the hardware shutdown down.
3538  */
3539 static void tg3_free_consistent(struct tg3 *tp)
3540 {
3541         if (tp->rx_std_buffers) {
3542                 kfree(tp->rx_std_buffers);
3543                 tp->rx_std_buffers = NULL;
3544         }
3545         if (tp->rx_std) {
3546                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3547                                     tp->rx_std, tp->rx_std_mapping);
3548                 tp->rx_std = NULL;
3549         }
3550         if (tp->rx_jumbo) {
3551                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3552                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3553                 tp->rx_jumbo = NULL;
3554         }
3555         if (tp->rx_rcb) {
3556                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3557                                     tp->rx_rcb, tp->rx_rcb_mapping);
3558                 tp->rx_rcb = NULL;
3559         }
3560         if (tp->tx_ring) {
3561                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3562                         tp->tx_ring, tp->tx_desc_mapping);
3563                 tp->tx_ring = NULL;
3564         }
3565         if (tp->hw_status) {
3566                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3567                                     tp->hw_status, tp->status_mapping);
3568                 tp->hw_status = NULL;
3569         }
3570         if (tp->hw_stats) {
3571                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3572                                     tp->hw_stats, tp->stats_mapping);
3573                 tp->hw_stats = NULL;
3574         }
3575 }
3576
3577 /*
3578  * Must not be invoked with interrupt sources disabled and
3579  * the hardware shutdown down.  Can sleep.
3580  */
3581 static int tg3_alloc_consistent(struct tg3 *tp)
3582 {
3583         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3584                                       (TG3_RX_RING_SIZE +
3585                                        TG3_RX_JUMBO_RING_SIZE)) +
3586                                      (sizeof(struct tx_ring_info) *
3587                                       TG3_TX_RING_SIZE),
3588                                      GFP_KERNEL);
3589         if (!tp->rx_std_buffers)
3590                 return -ENOMEM;
3591
3592         memset(tp->rx_std_buffers, 0,
3593                (sizeof(struct ring_info) *
3594                 (TG3_RX_RING_SIZE +
3595                  TG3_RX_JUMBO_RING_SIZE)) +
3596                (sizeof(struct tx_ring_info) *
3597                 TG3_TX_RING_SIZE));
3598
3599         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3600         tp->tx_buffers = (struct tx_ring_info *)
3601                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3602
3603         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3604                                           &tp->rx_std_mapping);
3605         if (!tp->rx_std)
3606                 goto err_out;
3607
3608         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3609                                             &tp->rx_jumbo_mapping);
3610
3611         if (!tp->rx_jumbo)
3612                 goto err_out;
3613
3614         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3615                                           &tp->rx_rcb_mapping);
3616         if (!tp->rx_rcb)
3617                 goto err_out;
3618
3619         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3620                                            &tp->tx_desc_mapping);
3621         if (!tp->tx_ring)
3622                 goto err_out;
3623
3624         tp->hw_status = pci_alloc_consistent(tp->pdev,
3625                                              TG3_HW_STATUS_SIZE,
3626                                              &tp->status_mapping);
3627         if (!tp->hw_status)
3628                 goto err_out;
3629
3630         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3631                                             sizeof(struct tg3_hw_stats),
3632                                             &tp->stats_mapping);
3633         if (!tp->hw_stats)
3634                 goto err_out;
3635
3636         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3637         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3638
3639         return 0;
3640
3641 err_out:
3642         tg3_free_consistent(tp);
3643         return -ENOMEM;
3644 }
3645
3646 #define MAX_WAIT_CNT 1000
3647
3648 /* To stop a block, clear the enable bit and poll till it
3649  * clears.  tp->lock is held.
3650  */
3651 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3652 {
3653         unsigned int i;
3654         u32 val;
3655
3656         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3657                 switch (ofs) {
3658                 case RCVLSC_MODE:
3659                 case DMAC_MODE:
3660                 case MBFREE_MODE:
3661                 case BUFMGR_MODE:
3662                 case MEMARB_MODE:
3663                         /* We can't enable/disable these bits of the
3664                          * 5705/5750, just say success.
3665                          */
3666                         return 0;
3667
3668                 default:
3669                         break;
3670                 };
3671         }
3672
3673         val = tr32(ofs);
3674         val &= ~enable_bit;
3675         tw32_f(ofs, val);
3676
3677         for (i = 0; i < MAX_WAIT_CNT; i++) {
3678                 udelay(100);
3679                 val = tr32(ofs);
3680                 if ((val & enable_bit) == 0)
3681                         break;
3682         }
3683
3684         if (i == MAX_WAIT_CNT) {
3685                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3686                        "ofs=%lx enable_bit=%x\n",
3687                        ofs, enable_bit);
3688                 return -ENODEV;
3689         }
3690
3691         return 0;
3692 }
3693
3694 /* tp->lock is held. */
3695 static int tg3_abort_hw(struct tg3 *tp)
3696 {
3697         int i, err;
3698
3699         tg3_disable_ints(tp);
3700
3701         tp->rx_mode &= ~RX_MODE_ENABLE;
3702         tw32_f(MAC_RX_MODE, tp->rx_mode);
3703         udelay(10);
3704
3705         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3706         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3707         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3708         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3709         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3710         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3711
3712         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3713         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3714         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3715         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3716         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3717         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3718         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3719         if (err)
3720                 goto out;
3721
3722         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3723         tw32_f(MAC_MODE, tp->mac_mode);
3724         udelay(40);
3725
3726         tp->tx_mode &= ~TX_MODE_ENABLE;
3727         tw32_f(MAC_TX_MODE, tp->tx_mode);
3728
3729         for (i = 0; i < MAX_WAIT_CNT; i++) {
3730                 udelay(100);
3731                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3732                         break;
3733         }
3734         if (i >= MAX_WAIT_CNT) {
3735                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3736                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3737                        tp->dev->name, tr32(MAC_TX_MODE));
3738                 return -ENODEV;
3739         }
3740
3741         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3742         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3743         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3744
3745         tw32(FTQ_RESET, 0xffffffff);
3746         tw32(FTQ_RESET, 0x00000000);
3747
3748         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3749         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3750         if (err)
3751                 goto out;
3752
3753         if (tp->hw_status)
3754                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3755         if (tp->hw_stats)
3756                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3757
3758 out:
3759         return err;
3760 }
3761
3762 /* tp->lock is held. */
3763 static int tg3_nvram_lock(struct tg3 *tp)
3764 {
3765         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3766                 int i;
3767
3768                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3769                 for (i = 0; i < 8000; i++) {
3770                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3771                                 break;
3772                         udelay(20);
3773                 }
3774                 if (i == 8000)
3775                         return -ENODEV;
3776         }
3777         return 0;
3778 }
3779
3780 /* tp->lock is held. */
3781 static void tg3_nvram_unlock(struct tg3 *tp)
3782 {
3783         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3784                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3785 }
3786
3787 /* tp->lock is held. */
3788 static void tg3_enable_nvram_access(struct tg3 *tp)
3789 {
3790         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3791             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3792                 u32 nvaccess = tr32(NVRAM_ACCESS);
3793
3794                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3795         }
3796 }
3797
3798 /* tp->lock is held. */
3799 static void tg3_disable_nvram_access(struct tg3 *tp)
3800 {
3801         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3802             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3803                 u32 nvaccess = tr32(NVRAM_ACCESS);
3804
3805                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3806         }
3807 }
3808
3809 /* tp->lock is held. */
3810 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3811 {
3812         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3813                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3814                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3815
3816         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3817                 switch (kind) {
3818                 case RESET_KIND_INIT:
3819                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3820                                       DRV_STATE_START);
3821                         break;
3822
3823                 case RESET_KIND_SHUTDOWN:
3824                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3825                                       DRV_STATE_UNLOAD);
3826                         break;
3827
3828                 case RESET_KIND_SUSPEND:
3829                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3830                                       DRV_STATE_SUSPEND);
3831                         break;
3832
3833                 default:
3834                         break;
3835                 };
3836         }
3837 }
3838
3839 /* tp->lock is held. */
3840 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3841 {
3842         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3843                 switch (kind) {
3844                 case RESET_KIND_INIT:
3845                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3846                                       DRV_STATE_START_DONE);
3847                         break;
3848
3849                 case RESET_KIND_SHUTDOWN:
3850                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3851                                       DRV_STATE_UNLOAD_DONE);
3852                         break;
3853
3854                 default:
3855                         break;
3856                 };
3857         }
3858 }
3859
3860 /* tp->lock is held. */
3861 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3862 {
3863         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3864                 switch (kind) {
3865                 case RESET_KIND_INIT:
3866                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3867                                       DRV_STATE_START);
3868                         break;
3869
3870                 case RESET_KIND_SHUTDOWN:
3871                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3872                                       DRV_STATE_UNLOAD);
3873                         break;
3874
3875                 case RESET_KIND_SUSPEND:
3876                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3877                                       DRV_STATE_SUSPEND);
3878                         break;
3879
3880                 default:
3881                         break;
3882                 };
3883         }
3884 }
3885
3886 static void tg3_stop_fw(struct tg3 *);
3887
3888 /* tp->lock is held. */
3889 static int tg3_chip_reset(struct tg3 *tp)
3890 {
3891         u32 val;
3892         u32 flags_save;
3893         int i;
3894
3895         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3896                 tg3_nvram_lock(tp);
3897
3898         /*
3899          * We must avoid the readl() that normally takes place.
3900          * It locks machines, causes machine checks, and other
3901          * fun things.  So, temporarily disable the 5701
3902          * hardware workaround, while we do the reset.
3903          */
3904         flags_save = tp->tg3_flags;
3905         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3906
3907         /* do the reset */
3908         val = GRC_MISC_CFG_CORECLK_RESET;
3909
3910         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3911                 if (tr32(0x7e2c) == 0x60) {
3912                         tw32(0x7e2c, 0x20);
3913                 }
3914                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3915                         tw32(GRC_MISC_CFG, (1 << 29));
3916                         val |= (1 << 29);
3917                 }
3918         }
3919
3920         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3921                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3922         tw32(GRC_MISC_CFG, val);
3923
3924         /* restore 5701 hardware bug workaround flag */
3925         tp->tg3_flags = flags_save;
3926
3927         /* Unfortunately, we have to delay before the PCI read back.
3928          * Some 575X chips even will not respond to a PCI cfg access
3929          * when the reset command is given to the chip.
3930          *
3931          * How do these hardware designers expect things to work
3932          * properly if the PCI write is posted for a long period
3933          * of time?  It is always necessary to have some method by
3934          * which a register read back can occur to push the write
3935          * out which does the reset.
3936          *
3937          * For most tg3 variants the trick below was working.
3938          * Ho hum...
3939          */
3940         udelay(120);
3941
3942         /* Flush PCI posted writes.  The normal MMIO registers
3943          * are inaccessible at this time so this is the only
3944          * way to make this reliably (actually, this is no longer
3945          * the case, see above).  I tried to use indirect
3946          * register read/write but this upset some 5701 variants.
3947          */
3948         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3949
3950         udelay(120);
3951
3952         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3953                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3954                         int i;
3955                         u32 cfg_val;
3956
3957                         /* Wait for link training to complete.  */
3958                         for (i = 0; i < 5000; i++)
3959                                 udelay(100);
3960
3961                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3962                         pci_write_config_dword(tp->pdev, 0xc4,
3963                                                cfg_val | (1 << 15));
3964                 }
3965                 /* Set PCIE max payload size and clear error status.  */
3966                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3967         }
3968
3969         /* Re-enable indirect register accesses. */
3970         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3971                                tp->misc_host_ctrl);
3972
3973         /* Set MAX PCI retry to zero. */
3974         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3975         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3976             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3977                 val |= PCISTATE_RETRY_SAME_DMA;
3978         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3979
3980         pci_restore_state(tp->pdev);
3981
3982         /* Make sure PCI-X relaxed ordering bit is clear. */
3983         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3984         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3985         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3986
3987         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3988
3989         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3990                 tg3_stop_fw(tp);
3991                 tw32(0x5000, 0x400);
3992         }
3993
3994         tw32(GRC_MODE, tp->grc_mode);
3995
3996         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3997                 u32 val = tr32(0xc4);
3998
3999                 tw32(0xc4, val | (1 << 15));
4000         }
4001
4002         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4003             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4004                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4005                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4006                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4007                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4008         }
4009
4010         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4011                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4012                 tw32_f(MAC_MODE, tp->mac_mode);
4013         } else
4014                 tw32_f(MAC_MODE, 0);
4015         udelay(40);
4016
4017         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4018                 /* Wait for firmware initialization to complete. */
4019                 for (i = 0; i < 100000; i++) {
4020                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4021                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4022                                 break;
4023                         udelay(10);
4024                 }
4025                 if (i >= 100000) {
4026                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4027                                "firmware will not restart magic=%08x\n",
4028                                tp->dev->name, val);
4029                         return -ENODEV;
4030                 }
4031         }
4032
4033         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4034             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4035                 u32 val = tr32(0x7c00);
4036
4037                 tw32(0x7c00, val | (1 << 25));
4038         }
4039
4040         /* Reprobe ASF enable state.  */
4041         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4042         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4043         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4044         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4045                 u32 nic_cfg;
4046
4047                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4048                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4049                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4050                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4051                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4052                 }
4053         }
4054
4055         return 0;
4056 }
4057
4058 /* tp->lock is held. */
4059 static void tg3_stop_fw(struct tg3 *tp)
4060 {
4061         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4062                 u32 val;
4063                 int i;
4064
4065                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4066                 val = tr32(GRC_RX_CPU_EVENT);
4067                 val |= (1 << 14);
4068                 tw32(GRC_RX_CPU_EVENT, val);
4069
4070                 /* Wait for RX cpu to ACK the event.  */
4071                 for (i = 0; i < 100; i++) {
4072                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4073                                 break;
4074                         udelay(1);
4075                 }
4076         }
4077 }
4078
4079 /* tp->lock is held. */
4080 static int tg3_halt(struct tg3 *tp)
4081 {
4082         int err;
4083
4084         tg3_stop_fw(tp);
4085
4086         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4087
4088         tg3_abort_hw(tp);
4089         err = tg3_chip_reset(tp);
4090
4091         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4092         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4093
4094         if (err)
4095                 return err;
4096
4097         return 0;
4098 }
4099
4100 #define TG3_FW_RELEASE_MAJOR    0x0
4101 #define TG3_FW_RELASE_MINOR     0x0
4102 #define TG3_FW_RELEASE_FIX      0x0
4103 #define TG3_FW_START_ADDR       0x08000000
4104 #define TG3_FW_TEXT_ADDR        0x08000000
4105 #define TG3_FW_TEXT_LEN         0x9c0
4106 #define TG3_FW_RODATA_ADDR      0x080009c0
4107 #define TG3_FW_RODATA_LEN       0x60
4108 #define TG3_FW_DATA_ADDR        0x08000a40
4109 #define TG3_FW_DATA_LEN         0x20
4110 #define TG3_FW_SBSS_ADDR        0x08000a60
4111 #define TG3_FW_SBSS_LEN         0xc
4112 #define TG3_FW_BSS_ADDR         0x08000a70
4113 #define TG3_FW_BSS_LEN          0x10
4114
4115 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4116         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4117         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4118         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4119         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4120         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4121         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4122         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4123         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4124         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4125         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4126         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4127         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4128         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4129         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4130         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4131         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4132         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4133         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4134         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4135         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4136         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4137         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4138         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4139         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4140         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4141         0, 0, 0, 0, 0, 0,
4142         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4143         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4144         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4145         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4146         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4147         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4148         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4149         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4150         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4151         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4152         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4153         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4154         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4155         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4156         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4157         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4158         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4159         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4160         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4161         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4162         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4163         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4164         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4165         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4166         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4167         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4168         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4169         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4170         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4171         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4172         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4173         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4174         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4175         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4176         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4177         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4178         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4179         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4180         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4181         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4182         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4183         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4184         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4185         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4186         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4187         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4188         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4189         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4190         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4191         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4192         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4193         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4194         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4195         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4196         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4197         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4198         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4199         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4200         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4201         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4202         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4203         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4204         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4205         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4206         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4207 };
4208
4209 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4210         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4211         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4212         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4213         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4214         0x00000000
4215 };
4216
4217 #if 0 /* All zeros, don't eat up space with it. */
4218 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4219         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4220         0x00000000, 0x00000000, 0x00000000, 0x00000000
4221 };
4222 #endif
4223
4224 #define RX_CPU_SCRATCH_BASE     0x30000
4225 #define RX_CPU_SCRATCH_SIZE     0x04000
4226 #define TX_CPU_SCRATCH_BASE     0x34000
4227 #define TX_CPU_SCRATCH_SIZE     0x04000
4228
4229 /* tp->lock is held. */
4230 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4231 {
4232         int i;
4233
4234         if (offset == TX_CPU_BASE &&
4235             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4236                 BUG();
4237
4238         if (offset == RX_CPU_BASE) {
4239                 for (i = 0; i < 10000; i++) {
4240                         tw32(offset + CPU_STATE, 0xffffffff);
4241                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4242                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4243                                 break;
4244                 }
4245
4246                 tw32(offset + CPU_STATE, 0xffffffff);
4247                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4248                 udelay(10);
4249         } else {
4250                 for (i = 0; i < 10000; i++) {
4251                         tw32(offset + CPU_STATE, 0xffffffff);
4252                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4253                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4254                                 break;
4255                 }
4256         }
4257
4258         if (i >= 10000) {
4259                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4260                        "and %s CPU\n",
4261                        tp->dev->name,
4262                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4263                 return -ENODEV;
4264         }
4265         return 0;
4266 }
4267
4268 struct fw_info {
4269         unsigned int text_base;
4270         unsigned int text_len;
4271         u32 *text_data;
4272         unsigned int rodata_base;
4273         unsigned int rodata_len;
4274         u32 *rodata_data;
4275         unsigned int data_base;
4276         unsigned int data_len;
4277         u32 *data_data;
4278 };
4279
4280 /* tp->lock is held. */
4281 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4282                                  int cpu_scratch_size, struct fw_info *info)
4283 {
4284         int err, i;
4285         u32 orig_tg3_flags = tp->tg3_flags;
4286         void (*write_op)(struct tg3 *, u32, u32);
4287
4288         if (cpu_base == TX_CPU_BASE &&
4289             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4290                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4291                        "TX cpu firmware on %s which is 5705.\n",
4292                        tp->dev->name);
4293                 return -EINVAL;
4294         }
4295
4296         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4297                 write_op = tg3_write_mem;
4298         else
4299                 write_op = tg3_write_indirect_reg32;
4300
4301         /* Force use of PCI config space for indirect register
4302          * write calls.
4303          */
4304         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4305
4306         err = tg3_halt_cpu(tp, cpu_base);
4307         if (err)
4308                 goto out;
4309
4310         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4311                 write_op(tp, cpu_scratch_base + i, 0);
4312         tw32(cpu_base + CPU_STATE, 0xffffffff);
4313         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4314         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4315                 write_op(tp, (cpu_scratch_base +
4316                               (info->text_base & 0xffff) +
4317                               (i * sizeof(u32))),
4318                          (info->text_data ?
4319                           info->text_data[i] : 0));
4320         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4321                 write_op(tp, (cpu_scratch_base +
4322                               (info->rodata_base & 0xffff) +
4323                               (i * sizeof(u32))),
4324                          (info->rodata_data ?
4325                           info->rodata_data[i] : 0));
4326         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4327                 write_op(tp, (cpu_scratch_base +
4328                               (info->data_base & 0xffff) +
4329                               (i * sizeof(u32))),
4330                          (info->data_data ?
4331                           info->data_data[i] : 0));
4332
4333         err = 0;
4334
4335 out:
4336         tp->tg3_flags = orig_tg3_flags;
4337         return err;
4338 }
4339
4340 /* tp->lock is held. */
4341 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4342 {
4343         struct fw_info info;
4344         int err, i;
4345
4346         info.text_base = TG3_FW_TEXT_ADDR;
4347         info.text_len = TG3_FW_TEXT_LEN;
4348         info.text_data = &tg3FwText[0];
4349         info.rodata_base = TG3_FW_RODATA_ADDR;
4350         info.rodata_len = TG3_FW_RODATA_LEN;
4351         info.rodata_data = &tg3FwRodata[0];
4352         info.data_base = TG3_FW_DATA_ADDR;
4353         info.data_len = TG3_FW_DATA_LEN;
4354         info.data_data = NULL;
4355
4356         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4357                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4358                                     &info);
4359         if (err)
4360                 return err;
4361
4362         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4363                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4364                                     &info);
4365         if (err)
4366                 return err;
4367
4368         /* Now startup only the RX cpu. */
4369         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4370         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4371
4372         for (i = 0; i < 5; i++) {
4373                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4374                         break;
4375                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4376                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4377                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4378                 udelay(1000);
4379         }
4380         if (i >= 5) {
4381                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4382                        "to set RX CPU PC, is %08x should be %08x\n",
4383                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4384                        TG3_FW_TEXT_ADDR);
4385                 return -ENODEV;
4386         }
4387         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4388         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4389
4390         return 0;
4391 }
4392
4393 #if TG3_TSO_SUPPORT != 0
4394
4395 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4396 #define TG3_TSO_FW_RELASE_MINOR         0x6
4397 #define TG3_TSO_FW_RELEASE_FIX          0x0
4398 #define TG3_TSO_FW_START_ADDR           0x08000000
4399 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4400 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4401 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4402 #define TG3_TSO_FW_RODATA_LEN           0x60
4403 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4404 #define TG3_TSO_FW_DATA_LEN             0x30
4405 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4406 #define TG3_TSO_FW_SBSS_LEN             0x2c
4407 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4408 #define TG3_TSO_FW_BSS_LEN              0x894
4409
4410 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4411         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4412         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4413         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4414         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4415         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4416         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4417         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4418         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4419         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4420         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4421         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4422         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4423         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4424         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4425         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4426         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4427         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4428         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4429         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4430         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4431         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4432         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4433         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4434         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4435         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4436         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4437         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4438         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4439         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4440         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4441         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4442         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4443         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4444         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4445         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4446         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4447         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4448         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4449         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4450         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4451         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4452         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4453         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4454         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4455         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4456         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4457         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4458         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4459         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4460         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4461         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4462         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4463         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4464         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4465         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4466         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4467         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4468         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4469         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4470         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4471         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4472         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4473         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4474         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4475         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4476         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4477         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4478         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4479         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4480         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4481         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4482         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4483         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4484         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4485         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4486         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4487         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4488         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4489         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4490         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4491         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4492         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4493         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4494         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4495         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4496         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4497         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4498         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4499         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4500         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4501         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4502         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4503         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4504         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4505         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4506         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4507         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4508         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4509         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4510         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4511         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4512         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4513         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4514         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4515         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4516         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4517         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4518         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4519         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4520         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4521         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4522         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4523         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4524         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4525         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4526         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4527         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4528         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4529         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4530         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4531         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4532         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4533         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4534         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4535         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4536         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4537         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4538         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4539         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4540         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4541         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4542         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4543         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4544         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4545         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4546         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4547         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4548         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4549         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4550         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4551         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4552         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4553         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4554         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4555         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4556         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4557         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4558         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4559         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4560         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4561         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4562         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4563         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4564         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4565         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4566         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4567         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4568         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4569         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4570         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4571         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4572         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4573         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4574         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4575         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4576         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4577         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4578         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4579         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4580         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4581         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4582         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4583         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4584         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4585         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4586         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4587         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4588         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4589         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4590         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4591         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4592         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4593         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4594         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4595         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4596         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4597         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4598         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4599         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4600         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4601         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4602         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4603         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4604         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4605         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4606         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4607         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4608         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4609         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4610         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4611         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4612         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4613         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4614         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4615         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4616         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4617         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4618         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4619         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4620         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4621         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4622         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4623         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4624         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4625         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4626         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4627         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4628         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4629         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4630         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4631         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4632         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4633         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4634         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4635         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4636         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4637         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4638         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4639         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4640         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4641         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4642         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4643         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4644         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4645         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4646         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4647         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4648         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4649         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4650         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4651         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4652         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4653         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4654         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4655         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4656         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4657         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4658         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4659         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4660         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4661         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4662         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4663         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4664         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4665         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4666         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4667         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4668         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4669         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4670         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4671         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4672         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4673         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4674         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4675         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4676         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4677         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4678         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4679         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4680         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4681         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4682         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4683         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4684         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4685         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4686         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4687         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4688         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4689         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4690         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4691         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4692         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4693         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4694         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4695 };
4696
4697 static u32 tg3TsoFwRodata[] = {
4698         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4699         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4700         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4701         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4702         0x00000000,
4703 };
4704
4705 static u32 tg3TsoFwData[] = {
4706         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4707         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4708         0x00000000,
4709 };
4710
4711 /* 5705 needs a special version of the TSO firmware.  */
4712 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4713 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4714 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4715 #define TG3_TSO5_FW_START_ADDR          0x00010000
4716 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4717 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4718 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4719 #define TG3_TSO5_FW_RODATA_LEN          0x50
4720 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4721 #define TG3_TSO5_FW_DATA_LEN            0x20
4722 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4723 #define TG3_TSO5_FW_SBSS_LEN            0x28
4724 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4725 #define TG3_TSO5_FW_BSS_LEN             0x88
4726
4727 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4728         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4729         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4730         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4731         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4732         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4733         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4734         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4735         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4736         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4737         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4738         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4739         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4740         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4741         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4742         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4743         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4744         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4745         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4746         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4747         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4748         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4749         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4750         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4751         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4752         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4753         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4754         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4755         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4756         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4757         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4758         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4759         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4760         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4761         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4762         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4763         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4764         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4765         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4766         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4767         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4768         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4769         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4770         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4771         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4772         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4773         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4774         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4775         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4776         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4777         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4778         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4779         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4780         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4781         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4782         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4783         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4784         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4785         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4786         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4787         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4788         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4789         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4790         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4791         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4792         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4793         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4794         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4795         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4796         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4797         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4798         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4799         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4800         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4801         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4802         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4803         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4804         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4805         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4806         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4807         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4808         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4809         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4810         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4811         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4812         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4813         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4814         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4815         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4816         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4817         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4818         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4819         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4820         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4821         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4822         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4823         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4824         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4825         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4826         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4827         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4828         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4829         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4830         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4831         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4832         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4833         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4834         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4835         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4836         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4837         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4838         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4839         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4840         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4841         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4842         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4843         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4844         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4845         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4846         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4847         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4848         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4849         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4850         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4851         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4852         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4853         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4854         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4855         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4856         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4857         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4858         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4859         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4860         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4861         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4862         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4863         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4864         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4865         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4866         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4867         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4868         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4869         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4870         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4871         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4872         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4873         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4874         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4875         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4876         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4877         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4878         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4879         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4880         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4881         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4882         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4883         0x00000000, 0x00000000, 0x00000000,
4884 };
4885
4886 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4887         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4888         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4889         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4890         0x00000000, 0x00000000, 0x00000000,
4891 };
4892
4893 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4894         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4895         0x00000000, 0x00000000, 0x00000000,
4896 };
4897
4898 /* tp->lock is held. */
4899 static int tg3_load_tso_firmware(struct tg3 *tp)
4900 {
4901         struct fw_info info;
4902         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4903         int err, i;
4904
4905         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4906                 return 0;
4907
4908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4909                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4910                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4911                 info.text_data = &tg3Tso5FwText[0];
4912                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4913                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4914                 info.rodata_data = &tg3Tso5FwRodata[0];
4915                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4916                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4917                 info.data_data = &tg3Tso5FwData[0];
4918                 cpu_base = RX_CPU_BASE;
4919                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4920                 cpu_scratch_size = (info.text_len +
4921                                     info.rodata_len +
4922                                     info.data_len +
4923                                     TG3_TSO5_FW_SBSS_LEN +
4924                                     TG3_TSO5_FW_BSS_LEN);
4925         } else {
4926                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4927                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4928                 info.text_data = &tg3TsoFwText[0];
4929                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4930                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4931                 info.rodata_data = &tg3TsoFwRodata[0];
4932                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4933                 info.data_len = TG3_TSO_FW_DATA_LEN;
4934                 info.data_data = &tg3TsoFwData[0];
4935                 cpu_base = TX_CPU_BASE;
4936                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4937                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4938         }
4939
4940         err = tg3_load_firmware_cpu(tp, cpu_base,
4941                                     cpu_scratch_base, cpu_scratch_size,
4942                                     &info);
4943         if (err)
4944                 return err;
4945
4946         /* Now startup the cpu. */
4947         tw32(cpu_base + CPU_STATE, 0xffffffff);
4948         tw32_f(cpu_base + CPU_PC,    info.text_base);
4949
4950         for (i = 0; i < 5; i++) {
4951                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4952                         break;
4953                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4954                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4955                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4956                 udelay(1000);
4957         }
4958         if (i >= 5) {
4959                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4960                        "to set CPU PC, is %08x should be %08x\n",
4961                        tp->dev->name, tr32(cpu_base + CPU_PC),
4962                        info.text_base);
4963                 return -ENODEV;
4964         }
4965         tw32(cpu_base + CPU_STATE, 0xffffffff);
4966         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4967         return 0;
4968 }
4969
4970 #endif /* TG3_TSO_SUPPORT != 0 */
4971
4972 /* tp->lock is held. */
4973 static void __tg3_set_mac_addr(struct tg3 *tp)
4974 {
4975         u32 addr_high, addr_low;
4976         int i;
4977
4978         addr_high = ((tp->dev->dev_addr[0] << 8) |
4979                      tp->dev->dev_addr[1]);
4980         addr_low = ((tp->dev->dev_addr[2] << 24) |
4981                     (tp->dev->dev_addr[3] << 16) |
4982                     (tp->dev->dev_addr[4] <<  8) |
4983                     (tp->dev->dev_addr[5] <<  0));
4984         for (i = 0; i < 4; i++) {
4985                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4986                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4987         }
4988
4989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4991                 for (i = 0; i < 12; i++) {
4992                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4993                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4994                 }
4995         }
4996
4997         addr_high = (tp->dev->dev_addr[0] +
4998                      tp->dev->dev_addr[1] +
4999                      tp->dev->dev_addr[2] +
5000                      tp->dev->dev_addr[3] +
5001                      tp->dev->dev_addr[4] +
5002                      tp->dev->dev_addr[5]) &
5003                 TX_BACKOFF_SEED_MASK;
5004         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5005 }
5006
5007 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5008 {
5009         struct tg3 *tp = netdev_priv(dev);
5010         struct sockaddr *addr = p;
5011
5012         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5013
5014         spin_lock_irq(&tp->lock);
5015         __tg3_set_mac_addr(tp);
5016         spin_unlock_irq(&tp->lock);
5017
5018         return 0;
5019 }
5020
5021 /* tp->lock is held. */
5022 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5023                            dma_addr_t mapping, u32 maxlen_flags,
5024                            u32 nic_addr)
5025 {
5026         tg3_write_mem(tp,
5027                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5028                       ((u64) mapping >> 32));
5029         tg3_write_mem(tp,
5030                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5031                       ((u64) mapping & 0xffffffff));
5032         tg3_write_mem(tp,
5033                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5034                        maxlen_flags);
5035
5036         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5037                 tg3_write_mem(tp,
5038                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5039                               nic_addr);
5040 }
5041
5042 static void __tg3_set_rx_mode(struct net_device *);
5043
5044 /* tp->lock is held. */
5045 static int tg3_reset_hw(struct tg3 *tp)
5046 {
5047         u32 val, rdmac_mode;
5048         int i, err, limit;
5049
5050         tg3_disable_ints(tp);
5051
5052         tg3_stop_fw(tp);
5053
5054         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5055
5056         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5057                 err = tg3_abort_hw(tp);
5058                 if (err)
5059                         return err;
5060         }
5061
5062         err = tg3_chip_reset(tp);
5063         if (err)
5064                 return err;
5065
5066         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5067
5068         /* This works around an issue with Athlon chipsets on
5069          * B3 tigon3 silicon.  This bit has no effect on any
5070          * other revision.  But do not set this on PCI Express
5071          * chips.
5072          */
5073         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5074                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5075         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5076
5077         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5078             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5079                 val = tr32(TG3PCI_PCISTATE);
5080                 val |= PCISTATE_RETRY_SAME_DMA;
5081                 tw32(TG3PCI_PCISTATE, val);
5082         }
5083
5084         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5085                 /* Enable some hw fixes.  */
5086                 val = tr32(TG3PCI_MSI_DATA);
5087                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5088                 tw32(TG3PCI_MSI_DATA, val);
5089         }
5090
5091         /* Descriptor ring init may make accesses to the
5092          * NIC SRAM area to setup the TX descriptors, so we
5093          * can only do this after the hardware has been
5094          * successfully reset.
5095          */
5096         tg3_init_rings(tp);
5097
5098         /* This value is determined during the probe time DMA
5099          * engine test, tg3_test_dma.
5100          */
5101         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5102
5103         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5104                           GRC_MODE_4X_NIC_SEND_RINGS |
5105                           GRC_MODE_NO_TX_PHDR_CSUM |
5106                           GRC_MODE_NO_RX_PHDR_CSUM);
5107         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5108         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5109                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5110         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5111                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5112
5113         tw32(GRC_MODE,
5114              tp->grc_mode |
5115              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5116
5117         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5118         val = tr32(GRC_MISC_CFG);
5119         val &= ~0xff;
5120         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5121         tw32(GRC_MISC_CFG, val);
5122
5123         /* Initialize MBUF/DESC pool. */
5124         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5125                 /* Do nothing.  */
5126         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5127                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5128                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5129                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5130                 else
5131                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5132                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5133                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5134         }
5135 #if TG3_TSO_SUPPORT != 0
5136         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5137                 int fw_len;
5138
5139                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5140                           TG3_TSO5_FW_RODATA_LEN +
5141                           TG3_TSO5_FW_DATA_LEN +
5142                           TG3_TSO5_FW_SBSS_LEN +
5143                           TG3_TSO5_FW_BSS_LEN);
5144                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5145                 tw32(BUFMGR_MB_POOL_ADDR,
5146                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5147                 tw32(BUFMGR_MB_POOL_SIZE,
5148                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5149         }
5150 #endif
5151
5152         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5153                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5154                      tp->bufmgr_config.mbuf_read_dma_low_water);
5155                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5156                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5157                 tw32(BUFMGR_MB_HIGH_WATER,
5158                      tp->bufmgr_config.mbuf_high_water);
5159         } else {
5160                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5161                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5162                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5163                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5164                 tw32(BUFMGR_MB_HIGH_WATER,
5165                      tp->bufmgr_config.mbuf_high_water_jumbo);
5166         }
5167         tw32(BUFMGR_DMA_LOW_WATER,
5168              tp->bufmgr_config.dma_low_water);
5169         tw32(BUFMGR_DMA_HIGH_WATER,
5170              tp->bufmgr_config.dma_high_water);
5171
5172         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5173         for (i = 0; i < 2000; i++) {
5174                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5175                         break;
5176                 udelay(10);
5177         }
5178         if (i >= 2000) {
5179                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5180                        tp->dev->name);
5181                 return -ENODEV;
5182         }
5183
5184         /* Setup replenish threshold. */
5185         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5186
5187         /* Initialize TG3_BDINFO's at:
5188          *  RCVDBDI_STD_BD:     standard eth size rx ring
5189          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5190          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5191          *
5192          * like so:
5193          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5194          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5195          *                              ring attribute flags
5196          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5197          *
5198          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5199          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5200          *
5201          * The size of each ring is fixed in the firmware, but the location is
5202          * configurable.
5203          */
5204         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5205              ((u64) tp->rx_std_mapping >> 32));
5206         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5207              ((u64) tp->rx_std_mapping & 0xffffffff));
5208         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5209              NIC_SRAM_RX_BUFFER_DESC);
5210
5211         /* Don't even try to program the JUMBO/MINI buffer descriptor
5212          * configs on 5705.
5213          */
5214         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5215                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5216                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5217         } else {
5218                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5219                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5220
5221                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5222                      BDINFO_FLAGS_DISABLED);
5223
5224                 /* Setup replenish threshold. */
5225                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5226
5227                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5228                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5229                              ((u64) tp->rx_jumbo_mapping >> 32));
5230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5231                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5232                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5233                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5234                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5235                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5236                 } else {
5237                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5238                              BDINFO_FLAGS_DISABLED);
5239                 }
5240
5241         }
5242
5243         /* There is only one send ring on 5705/5750, no need to explicitly
5244          * disable the others.
5245          */
5246         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5247                 /* Clear out send RCB ring in SRAM. */
5248                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5249                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5250                                       BDINFO_FLAGS_DISABLED);
5251         }
5252
5253         tp->tx_prod = 0;
5254         tp->tx_cons = 0;
5255         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5256         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5257
5258         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5259                        tp->tx_desc_mapping,
5260                        (TG3_TX_RING_SIZE <<
5261                         BDINFO_FLAGS_MAXLEN_SHIFT),
5262                        NIC_SRAM_TX_BUFFER_DESC);
5263
5264         /* There is only one receive return ring on 5705/5750, no need
5265          * to explicitly disable the others.
5266          */
5267         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5268                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5269                      i += TG3_BDINFO_SIZE) {
5270                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5271                                       BDINFO_FLAGS_DISABLED);
5272                 }
5273         }
5274
5275         tp->rx_rcb_ptr = 0;
5276         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5277
5278         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5279                        tp->rx_rcb_mapping,
5280                        (TG3_RX_RCB_RING_SIZE(tp) <<
5281                         BDINFO_FLAGS_MAXLEN_SHIFT),
5282                        0);
5283
5284         tp->rx_std_ptr = tp->rx_pending;
5285         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5286                      tp->rx_std_ptr);
5287
5288         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5289                                                 tp->rx_jumbo_pending : 0;
5290         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5291                      tp->rx_jumbo_ptr);
5292
5293         /* Initialize MAC address and backoff seed. */
5294         __tg3_set_mac_addr(tp);
5295
5296         /* MTU + ethernet header + FCS + optional VLAN tag */
5297         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5298
5299         /* The slot time is changed by tg3_setup_phy if we
5300          * run at gigabit with half duplex.
5301          */
5302         tw32(MAC_TX_LENGTHS,
5303              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5304              (6 << TX_LENGTHS_IPG_SHIFT) |
5305              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5306
5307         /* Receive rules. */
5308         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5309         tw32(RCVLPC_CONFIG, 0x0181);
5310
5311         /* Calculate RDMAC_MODE setting early, we need it to determine
5312          * the RCVLPC_STATE_ENABLE mask.
5313          */
5314         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5315                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5316                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5317                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5318                       RDMAC_MODE_LNGREAD_ENAB);
5319         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5320                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5321
5322         /* If statement applies to 5705 and 5750 PCI devices only */
5323         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5324              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5325             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5326                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5327                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5328                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5329                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5330                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5331                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5332                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5333                 }
5334         }
5335
5336         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5337                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5338
5339 #if TG3_TSO_SUPPORT != 0
5340         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5341                 rdmac_mode |= (1 << 27);
5342 #endif
5343
5344         /* Receive/send statistics. */
5345         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5346             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5347                 val = tr32(RCVLPC_STATS_ENABLE);
5348                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5349                 tw32(RCVLPC_STATS_ENABLE, val);
5350         } else {
5351                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5352         }
5353         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5354         tw32(SNDDATAI_STATSENAB, 0xffffff);
5355         tw32(SNDDATAI_STATSCTRL,
5356              (SNDDATAI_SCTRL_ENABLE |
5357               SNDDATAI_SCTRL_FASTUPD));
5358
5359         /* Setup host coalescing engine. */
5360         tw32(HOSTCC_MODE, 0);
5361         for (i = 0; i < 2000; i++) {
5362                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5363                         break;
5364                 udelay(10);
5365         }
5366
5367         tw32(HOSTCC_RXCOL_TICKS, 0);
5368         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5369         tw32(HOSTCC_RXMAX_FRAMES, 1);
5370         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5371         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5372                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5373                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5374         }
5375         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5376         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5377
5378         /* set status block DMA address */
5379         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5380              ((u64) tp->status_mapping >> 32));
5381         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5382              ((u64) tp->status_mapping & 0xffffffff));
5383
5384         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5385                 /* Status/statistics block address.  See tg3_timer,
5386                  * the tg3_periodic_fetch_stats call there, and
5387                  * tg3_get_stats to see how this works for 5705/5750 chips.
5388                  */
5389                 tw32(HOSTCC_STAT_COAL_TICKS,
5390                      DEFAULT_STAT_COAL_TICKS);
5391                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5392                      ((u64) tp->stats_mapping >> 32));
5393                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5394                      ((u64) tp->stats_mapping & 0xffffffff));
5395                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5396                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5397         }
5398
5399         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5400
5401         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5402         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5403         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5404                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5405
5406         /* Clear statistics/status block in chip, and status block in ram. */
5407         for (i = NIC_SRAM_STATS_BLK;
5408              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5409              i += sizeof(u32)) {
5410                 tg3_write_mem(tp, i, 0);
5411                 udelay(40);
5412         }
5413         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5414
5415         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5416                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5417         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5418         udelay(40);
5419
5420         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5421          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5422          * register to preserve the GPIO settings for LOMs. The GPIOs,
5423          * whether used as inputs or outputs, are set by boot code after
5424          * reset.
5425          */
5426         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5427                 u32 gpio_mask;
5428
5429                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5430                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5431
5432                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5433                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5434                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5435
5436                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5437
5438                 /* GPIO1 must be driven high for eeprom write protect */
5439                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5440                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5441         }
5442         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5443         udelay(100);
5444
5445         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5446         tr32(MAILBOX_INTERRUPT_0);
5447
5448         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5449                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5450                 udelay(40);
5451         }
5452
5453         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5454                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5455                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5456                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5457                WDMAC_MODE_LNGREAD_ENAB);
5458
5459         /* If statement applies to 5705 and 5750 PCI devices only */
5460         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5461              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5463                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5464                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5465                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5466                         /* nothing */
5467                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5468                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5469                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5470                         val |= WDMAC_MODE_RX_ACCEL;
5471                 }
5472         }
5473
5474         tw32_f(WDMAC_MODE, val);
5475         udelay(40);
5476
5477         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5478                 val = tr32(TG3PCI_X_CAPS);
5479                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5480                         val &= ~PCIX_CAPS_BURST_MASK;
5481                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5482                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5483                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5484                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5485                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5486                                 val |= (tp->split_mode_max_reqs <<
5487                                         PCIX_CAPS_SPLIT_SHIFT);
5488                 }
5489                 tw32(TG3PCI_X_CAPS, val);
5490         }
5491
5492         tw32_f(RDMAC_MODE, rdmac_mode);
5493         udelay(40);
5494
5495         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5496         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5497                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5498         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5499         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5500         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5501         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5502         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5503 #if TG3_TSO_SUPPORT != 0
5504         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5505                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5506 #endif
5507         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5508         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5509
5510         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5511                 err = tg3_load_5701_a0_firmware_fix(tp);
5512                 if (err)
5513                         return err;
5514         }
5515
5516 #if TG3_TSO_SUPPORT != 0
5517         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5518                 err = tg3_load_tso_firmware(tp);
5519                 if (err)
5520                         return err;
5521         }
5522 #endif
5523
5524         tp->tx_mode = TX_MODE_ENABLE;
5525         tw32_f(MAC_TX_MODE, tp->tx_mode);
5526         udelay(100);
5527
5528         tp->rx_mode = RX_MODE_ENABLE;
5529         tw32_f(MAC_RX_MODE, tp->rx_mode);
5530         udelay(10);
5531
5532         if (tp->link_config.phy_is_low_power) {
5533                 tp->link_config.phy_is_low_power = 0;
5534                 tp->link_config.speed = tp->link_config.orig_speed;
5535                 tp->link_config.duplex = tp->link_config.orig_duplex;
5536                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5537         }
5538
5539         tp->mi_mode = MAC_MI_MODE_BASE;
5540         tw32_f(MAC_MI_MODE, tp->mi_mode);
5541         udelay(80);
5542
5543         tw32(MAC_LED_CTRL, tp->led_ctrl);
5544
5545         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5546         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5547                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5548                 udelay(10);
5549         }
5550         tw32_f(MAC_RX_MODE, tp->rx_mode);
5551         udelay(10);
5552
5553         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5554                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5555                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5556                         /* Set drive transmission level to 1.2V  */
5557                         /* only if the signal pre-emphasis bit is not set  */
5558                         val = tr32(MAC_SERDES_CFG);
5559                         val &= 0xfffff000;
5560                         val |= 0x880;
5561                         tw32(MAC_SERDES_CFG, val);
5562                 }
5563                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5564                         tw32(MAC_SERDES_CFG, 0x616000);
5565         }
5566
5567         /* Prevent chip from dropping frames when flow control
5568          * is enabled.
5569          */
5570         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5571
5572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5573             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5574                 /* Use hardware link auto-negotiation */
5575                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5576         }
5577
5578         err = tg3_setup_phy(tp, 1);
5579         if (err)
5580                 return err;
5581
5582         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5583                 u32 tmp;
5584
5585                 /* Clear CRC stats. */
5586                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5587                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5588                         tg3_readphy(tp, 0x14, &tmp);
5589                 }
5590         }
5591
5592         __tg3_set_rx_mode(tp->dev);
5593
5594         /* Initialize receive rules. */
5595         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5596         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5597         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5598         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5599
5600         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5601                 limit = 8;
5602         else
5603                 limit = 16;
5604         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5605                 limit -= 4;
5606         switch (limit) {
5607         case 16:
5608                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5609         case 15:
5610                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5611         case 14:
5612                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5613         case 13:
5614                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5615         case 12:
5616                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5617         case 11:
5618                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5619         case 10:
5620                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5621         case 9:
5622                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5623         case 8:
5624                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5625         case 7:
5626                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5627         case 6:
5628                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5629         case 5:
5630                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5631         case 4:
5632                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5633         case 3:
5634                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5635         case 2:
5636         case 1:
5637
5638         default:
5639                 break;
5640         };
5641
5642         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5643
5644         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5645                 tg3_enable_ints(tp);
5646
5647         return 0;
5648 }
5649
5650 /* Called at device open time to get the chip ready for
5651  * packet processing.  Invoked with tp->lock held.
5652  */
5653 static int tg3_init_hw(struct tg3 *tp)
5654 {
5655         int err;
5656
5657         /* Force the chip into D0. */
5658         err = tg3_set_power_state(tp, 0);
5659         if (err)
5660                 goto out;
5661
5662         tg3_switch_clocks(tp);
5663
5664         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5665
5666         err = tg3_reset_hw(tp);
5667
5668 out:
5669         return err;
5670 }
5671
5672 #define TG3_STAT_ADD32(PSTAT, REG) \
5673 do {    u32 __val = tr32(REG); \
5674         (PSTAT)->low += __val; \
5675         if ((PSTAT)->low < __val) \
5676                 (PSTAT)->high += 1; \
5677 } while (0)
5678
5679 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5680 {
5681         struct tg3_hw_stats *sp = tp->hw_stats;
5682
5683         if (!netif_carrier_ok(tp->dev))
5684                 return;
5685
5686         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5687         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5688         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5689         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5690         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5691         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5692         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5693         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5694         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5695         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5696         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5697         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5698         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5699
5700         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5701         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5702         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5703         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5704         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5705         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5706         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5707         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5708         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5709         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5710         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5711         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5712         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5713         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5714 }
5715
5716 static void tg3_timer(unsigned long __opaque)
5717 {
5718         struct tg3 *tp = (struct tg3 *) __opaque;
5719         unsigned long flags;
5720
5721         spin_lock_irqsave(&tp->lock, flags);
5722         spin_lock(&tp->tx_lock);
5723
5724         /* All of this garbage is because when using non-tagged
5725          * IRQ status the mailbox/status_block protocol the chip
5726          * uses with the cpu is race prone.
5727          */
5728         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5729                 tw32(GRC_LOCAL_CTRL,
5730                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5731         } else {
5732                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5733                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5734         }
5735
5736         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5737                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5738                 spin_unlock(&tp->tx_lock);
5739                 spin_unlock_irqrestore(&tp->lock, flags);
5740                 schedule_work(&tp->reset_task);
5741                 return;
5742         }
5743
5744         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5745                 tg3_periodic_fetch_stats(tp);
5746
5747         /* This part only runs once per second. */
5748         if (!--tp->timer_counter) {
5749                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5750                         u32 mac_stat;
5751                         int phy_event;
5752
5753                         mac_stat = tr32(MAC_STATUS);
5754
5755                         phy_event = 0;
5756                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5757                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5758                                         phy_event = 1;
5759                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5760                                 phy_event = 1;
5761
5762                         if (phy_event)
5763                                 tg3_setup_phy(tp, 0);
5764                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5765                         u32 mac_stat = tr32(MAC_STATUS);
5766                         int need_setup = 0;
5767
5768                         if (netif_carrier_ok(tp->dev) &&
5769                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5770                                 need_setup = 1;
5771                         }
5772                         if (! netif_carrier_ok(tp->dev) &&
5773                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5774                                          MAC_STATUS_SIGNAL_DET))) {
5775                                 need_setup = 1;
5776                         }
5777                         if (need_setup) {
5778                                 tw32_f(MAC_MODE,
5779                                      (tp->mac_mode &
5780                                       ~MAC_MODE_PORT_MODE_MASK));
5781                                 udelay(40);
5782                                 tw32_f(MAC_MODE, tp->mac_mode);
5783                                 udelay(40);
5784                                 tg3_setup_phy(tp, 0);
5785                         }
5786                 }
5787
5788                 tp->timer_counter = tp->timer_multiplier;
5789         }
5790
5791         /* Heartbeat is only sent once every 120 seconds.  */
5792         if (!--tp->asf_counter) {
5793                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5794                         u32 val;
5795
5796                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5797                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5798                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5799                         val = tr32(GRC_RX_CPU_EVENT);
5800                         val |= (1 << 14);
5801                         tw32(GRC_RX_CPU_EVENT, val);
5802                 }
5803                 tp->asf_counter = tp->asf_multiplier;
5804         }
5805
5806         spin_unlock(&tp->tx_lock);
5807         spin_unlock_irqrestore(&tp->lock, flags);
5808
5809         tp->timer.expires = jiffies + tp->timer_offset;
5810         add_timer(&tp->timer);
5811 }
5812
5813 static int tg3_test_interrupt(struct tg3 *tp)
5814 {
5815         struct net_device *dev = tp->dev;
5816         int err, i;
5817         u32 int_mbox = 0;
5818
5819         tg3_disable_ints(tp);
5820
5821         free_irq(tp->pdev->irq, dev);
5822
5823         err = request_irq(tp->pdev->irq, tg3_test_isr,
5824                           SA_SHIRQ, dev->name, dev);
5825         if (err)
5826                 return err;
5827
5828         tg3_enable_ints(tp);
5829
5830         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5831                HOSTCC_MODE_NOW);
5832
5833         for (i = 0; i < 5; i++) {
5834                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5835                 if (int_mbox != 0)
5836                         break;
5837                 msleep(10);
5838         }
5839
5840         tg3_disable_ints(tp);
5841
5842         free_irq(tp->pdev->irq, dev);
5843         
5844         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5845                 err = request_irq(tp->pdev->irq, tg3_msi,
5846                                   0, dev->name, dev);
5847         else
5848                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5849                                   SA_SHIRQ, dev->name, dev);
5850
5851         if (err)
5852                 return err;
5853
5854         if (int_mbox != 0)
5855                 return 0;
5856
5857         return -EIO;
5858 }
5859
5860 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5861  * successfully restored
5862  */
5863 static int tg3_test_msi(struct tg3 *tp)
5864 {
5865         struct net_device *dev = tp->dev;
5866         int err;
5867         u16 pci_cmd;
5868
5869         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5870                 return 0;
5871
5872         /* Turn off SERR reporting in case MSI terminates with Master
5873          * Abort.
5874          */
5875         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5876         pci_write_config_word(tp->pdev, PCI_COMMAND,
5877                               pci_cmd & ~PCI_COMMAND_SERR);
5878
5879         err = tg3_test_interrupt(tp);
5880
5881         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5882
5883         if (!err)
5884                 return 0;
5885
5886         /* other failures */
5887         if (err != -EIO)
5888                 return err;
5889
5890         /* MSI test failed, go back to INTx mode */
5891         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5892                "switching to INTx mode. Please report this failure to "
5893                "the PCI maintainer and include system chipset information.\n",
5894                        tp->dev->name);
5895
5896         free_irq(tp->pdev->irq, dev);
5897         pci_disable_msi(tp->pdev);
5898
5899         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5900
5901         err = request_irq(tp->pdev->irq, tg3_interrupt,
5902                           SA_SHIRQ, dev->name, dev);
5903
5904         if (err)
5905                 return err;
5906
5907         /* Need to reset the chip because the MSI cycle may have terminated
5908          * with Master Abort.
5909          */
5910         spin_lock_irq(&tp->lock);
5911         spin_lock(&tp->tx_lock);
5912
5913         tg3_halt(tp);
5914         err = tg3_init_hw(tp);
5915
5916         spin_unlock(&tp->tx_lock);
5917         spin_unlock_irq(&tp->lock);
5918
5919         if (err)
5920                 free_irq(tp->pdev->irq, dev);
5921
5922         return err;
5923 }
5924
5925 static int tg3_open(struct net_device *dev)
5926 {
5927         struct tg3 *tp = netdev_priv(dev);
5928         int err;
5929
5930         spin_lock_irq(&tp->lock);
5931         spin_lock(&tp->tx_lock);
5932
5933         tg3_disable_ints(tp);
5934         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5935
5936         spin_unlock(&tp->tx_lock);
5937         spin_unlock_irq(&tp->lock);
5938
5939         /* The placement of this call is tied
5940          * to the setup and use of Host TX descriptors.
5941          */
5942         err = tg3_alloc_consistent(tp);
5943         if (err)
5944                 return err;
5945
5946         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5947             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5948             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5949                 if (pci_enable_msi(tp->pdev) == 0) {
5950                         u32 msi_mode;
5951
5952                         msi_mode = tr32(MSGINT_MODE);
5953                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
5954                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
5955                 }
5956         }
5957         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5958                 err = request_irq(tp->pdev->irq, tg3_msi,
5959                                   0, dev->name, dev);
5960         else
5961                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5962                                   SA_SHIRQ, dev->name, dev);
5963
5964         if (err) {
5965                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5966                         pci_disable_msi(tp->pdev);
5967                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5968                 }
5969                 tg3_free_consistent(tp);
5970                 return err;
5971         }
5972
5973         spin_lock_irq(&tp->lock);
5974         spin_lock(&tp->tx_lock);
5975
5976         err = tg3_init_hw(tp);
5977         if (err) {
5978                 tg3_halt(tp);
5979                 tg3_free_rings(tp);
5980         } else {
5981                 tp->timer_offset = HZ / 10;
5982                 tp->timer_counter = tp->timer_multiplier = 10;
5983                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5984
5985                 init_timer(&tp->timer);
5986                 tp->timer.expires = jiffies + tp->timer_offset;
5987                 tp->timer.data = (unsigned long) tp;
5988                 tp->timer.function = tg3_timer;
5989         }
5990
5991         spin_unlock(&tp->tx_lock);
5992         spin_unlock_irq(&tp->lock);
5993
5994         if (err) {
5995                 free_irq(tp->pdev->irq, dev);
5996                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5997                         pci_disable_msi(tp->pdev);
5998                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5999                 }
6000                 tg3_free_consistent(tp);
6001                 return err;
6002         }
6003
6004         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6005                 err = tg3_test_msi(tp);
6006                 if (err) {
6007                         spin_lock_irq(&tp->lock);
6008                         spin_lock(&tp->tx_lock);
6009
6010                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6011                                 pci_disable_msi(tp->pdev);
6012                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6013                         }
6014                         tg3_halt(tp);
6015                         tg3_free_rings(tp);
6016                         tg3_free_consistent(tp);
6017
6018                         spin_unlock(&tp->tx_lock);
6019                         spin_unlock_irq(&tp->lock);
6020
6021                         return err;
6022                 }
6023         }
6024
6025         spin_lock_irq(&tp->lock);
6026         spin_lock(&tp->tx_lock);
6027
6028         add_timer(&tp->timer);
6029         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6030         tg3_enable_ints(tp);
6031
6032         spin_unlock(&tp->tx_lock);
6033         spin_unlock_irq(&tp->lock);
6034
6035         netif_start_queue(dev);
6036
6037         return 0;
6038 }
6039
6040 #if 0
6041 /*static*/ void tg3_dump_state(struct tg3 *tp)
6042 {
6043         u32 val32, val32_2, val32_3, val32_4, val32_5;
6044         u16 val16;
6045         int i;
6046
6047         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6048         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6049         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6050                val16, val32);
6051
6052         /* MAC block */
6053         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6054                tr32(MAC_MODE), tr32(MAC_STATUS));
6055         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6056                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6057         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6058                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6059         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6060                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6061
6062         /* Send data initiator control block */
6063         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6064                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6065         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6066                tr32(SNDDATAI_STATSCTRL));
6067
6068         /* Send data completion control block */
6069         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6070
6071         /* Send BD ring selector block */
6072         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6073                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6074
6075         /* Send BD initiator control block */
6076         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6077                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6078
6079         /* Send BD completion control block */
6080         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6081
6082         /* Receive list placement control block */
6083         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6084                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6085         printk("       RCVLPC_STATSCTRL[%08x]\n",
6086                tr32(RCVLPC_STATSCTRL));
6087
6088         /* Receive data and receive BD initiator control block */
6089         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6090                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6091
6092         /* Receive data completion control block */
6093         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6094                tr32(RCVDCC_MODE));
6095
6096         /* Receive BD initiator control block */
6097         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6098                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6099
6100         /* Receive BD completion control block */
6101         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6102                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6103
6104         /* Receive list selector control block */
6105         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6106                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6107
6108         /* Mbuf cluster free block */
6109         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6110                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6111
6112         /* Host coalescing control block */
6113         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6114                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6115         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6116                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6117                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6118         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6119                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6120                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6121         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6122                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6123         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6124                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6125
6126         /* Memory arbiter control block */
6127         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6128                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6129
6130         /* Buffer manager control block */
6131         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6132                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6133         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6134                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6135         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6136                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6137                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6138                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6139
6140         /* Read DMA control block */
6141         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6142                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6143
6144         /* Write DMA control block */
6145         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6146                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6147
6148         /* DMA completion block */
6149         printk("DEBUG: DMAC_MODE[%08x]\n",
6150                tr32(DMAC_MODE));
6151
6152         /* GRC block */
6153         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6154                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6155         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6156                tr32(GRC_LOCAL_CTRL));
6157
6158         /* TG3_BDINFOs */
6159         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6160                tr32(RCVDBDI_JUMBO_BD + 0x0),
6161                tr32(RCVDBDI_JUMBO_BD + 0x4),
6162                tr32(RCVDBDI_JUMBO_BD + 0x8),
6163                tr32(RCVDBDI_JUMBO_BD + 0xc));
6164         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6165                tr32(RCVDBDI_STD_BD + 0x0),
6166                tr32(RCVDBDI_STD_BD + 0x4),
6167                tr32(RCVDBDI_STD_BD + 0x8),
6168                tr32(RCVDBDI_STD_BD + 0xc));
6169         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6170                tr32(RCVDBDI_MINI_BD + 0x0),
6171                tr32(RCVDBDI_MINI_BD + 0x4),
6172                tr32(RCVDBDI_MINI_BD + 0x8),
6173                tr32(RCVDBDI_MINI_BD + 0xc));
6174
6175         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6176         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6177         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6178         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6179         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6180                val32, val32_2, val32_3, val32_4);
6181
6182         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6183         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6184         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6185         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6186         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6187                val32, val32_2, val32_3, val32_4);
6188
6189         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6190         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6191         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6192         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6193         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6194         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6195                val32, val32_2, val32_3, val32_4, val32_5);
6196
6197         /* SW status block */
6198         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6199                tp->hw_status->status,
6200                tp->hw_status->status_tag,
6201                tp->hw_status->rx_jumbo_consumer,
6202                tp->hw_status->rx_consumer,
6203                tp->hw_status->rx_mini_consumer,
6204                tp->hw_status->idx[0].rx_producer,
6205                tp->hw_status->idx[0].tx_consumer);
6206
6207         /* SW statistics block */
6208         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6209                ((u32 *)tp->hw_stats)[0],
6210                ((u32 *)tp->hw_stats)[1],
6211                ((u32 *)tp->hw_stats)[2],
6212                ((u32 *)tp->hw_stats)[3]);
6213
6214         /* Mailboxes */
6215         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6216                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6217                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6218                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6219                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6220
6221         /* NIC side send descriptors. */
6222         for (i = 0; i < 6; i++) {
6223                 unsigned long txd;
6224
6225                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6226                         + (i * sizeof(struct tg3_tx_buffer_desc));
6227                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6228                        i,
6229                        readl(txd + 0x0), readl(txd + 0x4),
6230                        readl(txd + 0x8), readl(txd + 0xc));
6231         }
6232
6233         /* NIC side RX descriptors. */
6234         for (i = 0; i < 6; i++) {
6235                 unsigned long rxd;
6236
6237                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6238                         + (i * sizeof(struct tg3_rx_buffer_desc));
6239                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6240                        i,
6241                        readl(rxd + 0x0), readl(rxd + 0x4),
6242                        readl(rxd + 0x8), readl(rxd + 0xc));
6243                 rxd += (4 * sizeof(u32));
6244                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6245                        i,
6246                        readl(rxd + 0x0), readl(rxd + 0x4),
6247                        readl(rxd + 0x8), readl(rxd + 0xc));
6248         }
6249
6250         for (i = 0; i < 6; i++) {
6251                 unsigned long rxd;
6252
6253                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6254                         + (i * sizeof(struct tg3_rx_buffer_desc));
6255                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6256                        i,
6257                        readl(rxd + 0x0), readl(rxd + 0x4),
6258                        readl(rxd + 0x8), readl(rxd + 0xc));
6259                 rxd += (4 * sizeof(u32));
6260                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6261                        i,
6262                        readl(rxd + 0x0), readl(rxd + 0x4),
6263                        readl(rxd + 0x8), readl(rxd + 0xc));
6264         }
6265 }
6266 #endif
6267
6268 static struct net_device_stats *tg3_get_stats(struct net_device *);
6269 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6270
6271 static int tg3_close(struct net_device *dev)
6272 {
6273         struct tg3 *tp = netdev_priv(dev);
6274
6275         netif_stop_queue(dev);
6276
6277         del_timer_sync(&tp->timer);
6278
6279         spin_lock_irq(&tp->lock);
6280         spin_lock(&tp->tx_lock);
6281 #if 0
6282         tg3_dump_state(tp);
6283 #endif
6284
6285         tg3_disable_ints(tp);
6286
6287         tg3_halt(tp);
6288         tg3_free_rings(tp);
6289         tp->tg3_flags &=
6290                 ~(TG3_FLAG_INIT_COMPLETE |
6291                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6292         netif_carrier_off(tp->dev);
6293
6294         spin_unlock(&tp->tx_lock);
6295         spin_unlock_irq(&tp->lock);
6296
6297         free_irq(tp->pdev->irq, dev);
6298         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6299                 pci_disable_msi(tp->pdev);
6300                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6301         }
6302
6303         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6304                sizeof(tp->net_stats_prev));
6305         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6306                sizeof(tp->estats_prev));
6307
6308         tg3_free_consistent(tp);
6309
6310         return 0;
6311 }
6312
6313 static inline unsigned long get_stat64(tg3_stat64_t *val)
6314 {
6315         unsigned long ret;
6316
6317 #if (BITS_PER_LONG == 32)
6318         ret = val->low;
6319 #else
6320         ret = ((u64)val->high << 32) | ((u64)val->low);
6321 #endif
6322         return ret;
6323 }
6324
6325 static unsigned long calc_crc_errors(struct tg3 *tp)
6326 {
6327         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6328
6329         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6330             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6331              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6332                 unsigned long flags;
6333                 u32 val;
6334
6335                 spin_lock_irqsave(&tp->lock, flags);
6336                 if (!tg3_readphy(tp, 0x1e, &val)) {
6337                         tg3_writephy(tp, 0x1e, val | 0x8000);
6338                         tg3_readphy(tp, 0x14, &val);
6339                 } else
6340                         val = 0;
6341                 spin_unlock_irqrestore(&tp->lock, flags);
6342
6343                 tp->phy_crc_errors += val;
6344
6345                 return tp->phy_crc_errors;
6346         }
6347
6348         return get_stat64(&hw_stats->rx_fcs_errors);
6349 }
6350
6351 #define ESTAT_ADD(member) \
6352         estats->member =        old_estats->member + \
6353                                 get_stat64(&hw_stats->member)
6354
6355 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6356 {
6357         struct tg3_ethtool_stats *estats = &tp->estats;
6358         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6359         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6360
6361         if (!hw_stats)
6362                 return old_estats;
6363
6364         ESTAT_ADD(rx_octets);
6365         ESTAT_ADD(rx_fragments);
6366         ESTAT_ADD(rx_ucast_packets);
6367         ESTAT_ADD(rx_mcast_packets);
6368         ESTAT_ADD(rx_bcast_packets);
6369         ESTAT_ADD(rx_fcs_errors);
6370         ESTAT_ADD(rx_align_errors);
6371         ESTAT_ADD(rx_xon_pause_rcvd);
6372         ESTAT_ADD(rx_xoff_pause_rcvd);
6373         ESTAT_ADD(rx_mac_ctrl_rcvd);
6374         ESTAT_ADD(rx_xoff_entered);
6375         ESTAT_ADD(rx_frame_too_long_errors);
6376         ESTAT_ADD(rx_jabbers);
6377         ESTAT_ADD(rx_undersize_packets);
6378         ESTAT_ADD(rx_in_length_errors);
6379         ESTAT_ADD(rx_out_length_errors);
6380         ESTAT_ADD(rx_64_or_less_octet_packets);
6381         ESTAT_ADD(rx_65_to_127_octet_packets);
6382         ESTAT_ADD(rx_128_to_255_octet_packets);
6383         ESTAT_ADD(rx_256_to_511_octet_packets);
6384         ESTAT_ADD(rx_512_to_1023_octet_packets);
6385         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6386         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6387         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6388         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6389         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6390
6391         ESTAT_ADD(tx_octets);
6392         ESTAT_ADD(tx_collisions);
6393         ESTAT_ADD(tx_xon_sent);
6394         ESTAT_ADD(tx_xoff_sent);
6395         ESTAT_ADD(tx_flow_control);
6396         ESTAT_ADD(tx_mac_errors);
6397         ESTAT_ADD(tx_single_collisions);
6398         ESTAT_ADD(tx_mult_collisions);
6399         ESTAT_ADD(tx_deferred);
6400         ESTAT_ADD(tx_excessive_collisions);
6401         ESTAT_ADD(tx_late_collisions);
6402         ESTAT_ADD(tx_collide_2times);
6403         ESTAT_ADD(tx_collide_3times);
6404         ESTAT_ADD(tx_collide_4times);
6405         ESTAT_ADD(tx_collide_5times);
6406         ESTAT_ADD(tx_collide_6times);
6407         ESTAT_ADD(tx_collide_7times);
6408         ESTAT_ADD(tx_collide_8times);
6409         ESTAT_ADD(tx_collide_9times);
6410         ESTAT_ADD(tx_collide_10times);
6411         ESTAT_ADD(tx_collide_11times);
6412         ESTAT_ADD(tx_collide_12times);
6413         ESTAT_ADD(tx_collide_13times);
6414         ESTAT_ADD(tx_collide_14times);
6415         ESTAT_ADD(tx_collide_15times);
6416         ESTAT_ADD(tx_ucast_packets);
6417         ESTAT_ADD(tx_mcast_packets);
6418         ESTAT_ADD(tx_bcast_packets);
6419         ESTAT_ADD(tx_carrier_sense_errors);
6420         ESTAT_ADD(tx_discards);
6421         ESTAT_ADD(tx_errors);
6422
6423         ESTAT_ADD(dma_writeq_full);
6424         ESTAT_ADD(dma_write_prioq_full);
6425         ESTAT_ADD(rxbds_empty);
6426         ESTAT_ADD(rx_discards);
6427         ESTAT_ADD(rx_errors);
6428         ESTAT_ADD(rx_threshold_hit);
6429
6430         ESTAT_ADD(dma_readq_full);
6431         ESTAT_ADD(dma_read_prioq_full);
6432         ESTAT_ADD(tx_comp_queue_full);
6433
6434         ESTAT_ADD(ring_set_send_prod_index);
6435         ESTAT_ADD(ring_status_update);
6436         ESTAT_ADD(nic_irqs);
6437         ESTAT_ADD(nic_avoided_irqs);
6438         ESTAT_ADD(nic_tx_threshold_hit);
6439
6440         return estats;
6441 }
6442
6443 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6444 {
6445         struct tg3 *tp = netdev_priv(dev);
6446         struct net_device_stats *stats = &tp->net_stats;
6447         struct net_device_stats *old_stats = &tp->net_stats_prev;
6448         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6449
6450         if (!hw_stats)
6451                 return old_stats;
6452
6453         stats->rx_packets = old_stats->rx_packets +
6454                 get_stat64(&hw_stats->rx_ucast_packets) +
6455                 get_stat64(&hw_stats->rx_mcast_packets) +
6456                 get_stat64(&hw_stats->rx_bcast_packets);
6457                 
6458         stats->tx_packets = old_stats->tx_packets +
6459                 get_stat64(&hw_stats->tx_ucast_packets) +
6460                 get_stat64(&hw_stats->tx_mcast_packets) +
6461                 get_stat64(&hw_stats->tx_bcast_packets);
6462
6463         stats->rx_bytes = old_stats->rx_bytes +
6464                 get_stat64(&hw_stats->rx_octets);
6465         stats->tx_bytes = old_stats->tx_bytes +
6466                 get_stat64(&hw_stats->tx_octets);
6467
6468         stats->rx_errors = old_stats->rx_errors +
6469                 get_stat64(&hw_stats->rx_errors) +
6470                 get_stat64(&hw_stats->rx_discards);
6471         stats->tx_errors = old_stats->tx_errors +
6472                 get_stat64(&hw_stats->tx_errors) +
6473                 get_stat64(&hw_stats->tx_mac_errors) +
6474                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6475                 get_stat64(&hw_stats->tx_discards);
6476
6477         stats->multicast = old_stats->multicast +
6478                 get_stat64(&hw_stats->rx_mcast_packets);
6479         stats->collisions = old_stats->collisions +
6480                 get_stat64(&hw_stats->tx_collisions);
6481
6482         stats->rx_length_errors = old_stats->rx_length_errors +
6483                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6484                 get_stat64(&hw_stats->rx_undersize_packets);
6485
6486         stats->rx_over_errors = old_stats->rx_over_errors +
6487                 get_stat64(&hw_stats->rxbds_empty);
6488         stats->rx_frame_errors = old_stats->rx_frame_errors +
6489                 get_stat64(&hw_stats->rx_align_errors);
6490         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6491                 get_stat64(&hw_stats->tx_discards);
6492         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6493                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6494
6495         stats->rx_crc_errors = old_stats->rx_crc_errors +
6496                 calc_crc_errors(tp);
6497
6498         return stats;
6499 }
6500
6501 static inline u32 calc_crc(unsigned char *buf, int len)
6502 {
6503         u32 reg;
6504         u32 tmp;
6505         int j, k;
6506
6507         reg = 0xffffffff;
6508
6509         for (j = 0; j < len; j++) {
6510                 reg ^= buf[j];
6511
6512                 for (k = 0; k < 8; k++) {
6513                         tmp = reg & 0x01;
6514
6515                         reg >>= 1;
6516
6517                         if (tmp) {
6518                                 reg ^= 0xedb88320;
6519                         }
6520                 }
6521         }
6522
6523         return ~reg;
6524 }
6525
6526 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6527 {
6528         /* accept or reject all multicast frames */
6529         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6530         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6531         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6532         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6533 }
6534
6535 static void __tg3_set_rx_mode(struct net_device *dev)
6536 {
6537         struct tg3 *tp = netdev_priv(dev);
6538         u32 rx_mode;
6539
6540         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6541                                   RX_MODE_KEEP_VLAN_TAG);
6542
6543         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6544          * flag clear.
6545          */
6546 #if TG3_VLAN_TAG_USED
6547         if (!tp->vlgrp &&
6548             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6549                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6550 #else
6551         /* By definition, VLAN is disabled always in this
6552          * case.
6553          */
6554         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6555                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6556 #endif
6557
6558         if (dev->flags & IFF_PROMISC) {
6559                 /* Promiscuous mode. */
6560                 rx_mode |= RX_MODE_PROMISC;
6561         } else if (dev->flags & IFF_ALLMULTI) {
6562                 /* Accept all multicast. */
6563                 tg3_set_multi (tp, 1);
6564         } else if (dev->mc_count < 1) {
6565                 /* Reject all multicast. */
6566                 tg3_set_multi (tp, 0);
6567         } else {
6568                 /* Accept one or more multicast(s). */
6569                 struct dev_mc_list *mclist;
6570                 unsigned int i;
6571                 u32 mc_filter[4] = { 0, };
6572                 u32 regidx;
6573                 u32 bit;
6574                 u32 crc;
6575
6576                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6577                      i++, mclist = mclist->next) {
6578
6579                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6580                         bit = ~crc & 0x7f;
6581                         regidx = (bit & 0x60) >> 5;
6582                         bit &= 0x1f;
6583                         mc_filter[regidx] |= (1 << bit);
6584                 }
6585
6586                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6587                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6588                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6589                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6590         }
6591
6592         if (rx_mode != tp->rx_mode) {
6593                 tp->rx_mode = rx_mode;
6594                 tw32_f(MAC_RX_MODE, rx_mode);
6595                 udelay(10);
6596         }
6597 }
6598
6599 static void tg3_set_rx_mode(struct net_device *dev)
6600 {
6601         struct tg3 *tp = netdev_priv(dev);
6602
6603         spin_lock_irq(&tp->lock);
6604         spin_lock(&tp->tx_lock);
6605         __tg3_set_rx_mode(dev);
6606         spin_unlock(&tp->tx_lock);
6607         spin_unlock_irq(&tp->lock);
6608 }
6609
6610 #define TG3_REGDUMP_LEN         (32 * 1024)
6611
6612 static int tg3_get_regs_len(struct net_device *dev)
6613 {
6614         return TG3_REGDUMP_LEN;
6615 }
6616
6617 static void tg3_get_regs(struct net_device *dev,
6618                 struct ethtool_regs *regs, void *_p)
6619 {
6620         u32 *p = _p;
6621         struct tg3 *tp = netdev_priv(dev);
6622         u8 *orig_p = _p;
6623         int i;
6624
6625         regs->version = 0;
6626
6627         memset(p, 0, TG3_REGDUMP_LEN);
6628
6629         spin_lock_irq(&tp->lock);
6630         spin_lock(&tp->tx_lock);
6631
6632 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6633 #define GET_REG32_LOOP(base,len)                \
6634 do {    p = (u32 *)(orig_p + (base));           \
6635         for (i = 0; i < len; i += 4)            \
6636                 __GET_REG32((base) + i);        \
6637 } while (0)
6638 #define GET_REG32_1(reg)                        \
6639 do {    p = (u32 *)(orig_p + (reg));            \
6640         __GET_REG32((reg));                     \
6641 } while (0)
6642
6643         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6644         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6645         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6646         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6647         GET_REG32_1(SNDDATAC_MODE);
6648         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6649         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6650         GET_REG32_1(SNDBDC_MODE);
6651         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6652         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6653         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6654         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6655         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6656         GET_REG32_1(RCVDCC_MODE);
6657         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6658         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6659         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6660         GET_REG32_1(MBFREE_MODE);
6661         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6662         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6663         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6664         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6665         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6666         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6667         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6668         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6669         GET_REG32_LOOP(FTQ_RESET, 0x120);
6670         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6671         GET_REG32_1(DMAC_MODE);
6672         GET_REG32_LOOP(GRC_MODE, 0x4c);
6673         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6674                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6675
6676 #undef __GET_REG32
6677 #undef GET_REG32_LOOP
6678 #undef GET_REG32_1
6679
6680         spin_unlock(&tp->tx_lock);
6681         spin_unlock_irq(&tp->lock);
6682 }
6683
6684 static int tg3_get_eeprom_len(struct net_device *dev)
6685 {
6686         struct tg3 *tp = netdev_priv(dev);
6687
6688         return tp->nvram_size;
6689 }
6690
6691 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6692
6693 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6694 {
6695         struct tg3 *tp = netdev_priv(dev);
6696         int ret;
6697         u8  *pd;
6698         u32 i, offset, len, val, b_offset, b_count;
6699
6700         offset = eeprom->offset;
6701         len = eeprom->len;
6702         eeprom->len = 0;
6703
6704         eeprom->magic = TG3_EEPROM_MAGIC;
6705
6706         if (offset & 3) {
6707                 /* adjustments to start on required 4 byte boundary */
6708                 b_offset = offset & 3;
6709                 b_count = 4 - b_offset;
6710                 if (b_count > len) {
6711                         /* i.e. offset=1 len=2 */
6712                         b_count = len;
6713                 }
6714                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6715                 if (ret)
6716                         return ret;
6717                 val = cpu_to_le32(val);
6718                 memcpy(data, ((char*)&val) + b_offset, b_count);
6719                 len -= b_count;
6720                 offset += b_count;
6721                 eeprom->len += b_count;
6722         }
6723
6724         /* read bytes upto the last 4 byte boundary */
6725         pd = &data[eeprom->len];
6726         for (i = 0; i < (len - (len & 3)); i += 4) {
6727                 ret = tg3_nvram_read(tp, offset + i, &val);
6728                 if (ret) {
6729                         eeprom->len += i;
6730                         return ret;
6731                 }
6732                 val = cpu_to_le32(val);
6733                 memcpy(pd + i, &val, 4);
6734         }
6735         eeprom->len += i;
6736
6737         if (len & 3) {
6738                 /* read last bytes not ending on 4 byte boundary */
6739                 pd = &data[eeprom->len];
6740                 b_count = len & 3;
6741                 b_offset = offset + len - b_count;
6742                 ret = tg3_nvram_read(tp, b_offset, &val);
6743                 if (ret)
6744                         return ret;
6745                 val = cpu_to_le32(val);
6746                 memcpy(pd, ((char*)&val), b_count);
6747                 eeprom->len += b_count;
6748         }
6749         return 0;
6750 }
6751
6752 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6753
6754 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6755 {
6756         struct tg3 *tp = netdev_priv(dev);
6757         int ret;
6758         u32 offset, len, b_offset, odd_len, start, end;
6759         u8 *buf;
6760
6761         if (eeprom->magic != TG3_EEPROM_MAGIC)
6762                 return -EINVAL;
6763
6764         offset = eeprom->offset;
6765         len = eeprom->len;
6766
6767         if ((b_offset = (offset & 3))) {
6768                 /* adjustments to start on required 4 byte boundary */
6769                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6770                 if (ret)
6771                         return ret;
6772                 start = cpu_to_le32(start);
6773                 len += b_offset;
6774                 offset &= ~3;
6775                 if (len < 4)
6776                         len = 4;
6777         }
6778
6779         odd_len = 0;
6780         if (len & 3) {
6781                 /* adjustments to end on required 4 byte boundary */
6782                 odd_len = 1;
6783                 len = (len + 3) & ~3;
6784                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6785                 if (ret)
6786                         return ret;
6787                 end = cpu_to_le32(end);
6788         }
6789
6790         buf = data;
6791         if (b_offset || odd_len) {
6792                 buf = kmalloc(len, GFP_KERNEL);
6793                 if (buf == 0)
6794                         return -ENOMEM;
6795                 if (b_offset)
6796                         memcpy(buf, &start, 4);
6797                 if (odd_len)
6798                         memcpy(buf+len-4, &end, 4);
6799                 memcpy(buf + b_offset, data, eeprom->len);
6800         }
6801
6802         ret = tg3_nvram_write_block(tp, offset, len, buf);
6803
6804         if (buf != data)
6805                 kfree(buf);
6806
6807         return ret;
6808 }
6809
6810 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6811 {
6812         struct tg3 *tp = netdev_priv(dev);
6813   
6814         cmd->supported = (SUPPORTED_Autoneg);
6815
6816         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6817                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6818                                    SUPPORTED_1000baseT_Full);
6819
6820         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6821                 cmd->supported |= (SUPPORTED_100baseT_Half |
6822                                   SUPPORTED_100baseT_Full |
6823                                   SUPPORTED_10baseT_Half |
6824                                   SUPPORTED_10baseT_Full |
6825                                   SUPPORTED_MII);
6826         else
6827                 cmd->supported |= SUPPORTED_FIBRE;
6828   
6829         cmd->advertising = tp->link_config.advertising;
6830         if (netif_running(dev)) {
6831                 cmd->speed = tp->link_config.active_speed;
6832                 cmd->duplex = tp->link_config.active_duplex;
6833         }
6834         cmd->port = 0;
6835         cmd->phy_address = PHY_ADDR;
6836         cmd->transceiver = 0;
6837         cmd->autoneg = tp->link_config.autoneg;
6838         cmd->maxtxpkt = 0;
6839         cmd->maxrxpkt = 0;
6840         return 0;
6841 }
6842   
6843 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6844 {
6845         struct tg3 *tp = netdev_priv(dev);
6846   
6847         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6848                 /* These are the only valid advertisement bits allowed.  */
6849                 if (cmd->autoneg == AUTONEG_ENABLE &&
6850                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6851                                           ADVERTISED_1000baseT_Full |
6852                                           ADVERTISED_Autoneg |
6853                                           ADVERTISED_FIBRE)))
6854                         return -EINVAL;
6855         }
6856
6857         spin_lock_irq(&tp->lock);
6858         spin_lock(&tp->tx_lock);
6859
6860         tp->link_config.autoneg = cmd->autoneg;
6861         if (cmd->autoneg == AUTONEG_ENABLE) {
6862                 tp->link_config.advertising = cmd->advertising;
6863                 tp->link_config.speed = SPEED_INVALID;
6864                 tp->link_config.duplex = DUPLEX_INVALID;
6865         } else {
6866                 tp->link_config.advertising = 0;
6867                 tp->link_config.speed = cmd->speed;
6868                 tp->link_config.duplex = cmd->duplex;
6869         }
6870   
6871         if (netif_running(dev))
6872                 tg3_setup_phy(tp, 1);
6873
6874         spin_unlock(&tp->tx_lock);
6875         spin_unlock_irq(&tp->lock);
6876   
6877         return 0;
6878 }
6879   
6880 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6881 {
6882         struct tg3 *tp = netdev_priv(dev);
6883   
6884         strcpy(info->driver, DRV_MODULE_NAME);
6885         strcpy(info->version, DRV_MODULE_VERSION);
6886         strcpy(info->bus_info, pci_name(tp->pdev));
6887 }
6888   
6889 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6890 {
6891         struct tg3 *tp = netdev_priv(dev);
6892   
6893         wol->supported = WAKE_MAGIC;
6894         wol->wolopts = 0;
6895         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6896                 wol->wolopts = WAKE_MAGIC;
6897         memset(&wol->sopass, 0, sizeof(wol->sopass));
6898 }
6899   
6900 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6901 {
6902         struct tg3 *tp = netdev_priv(dev);
6903   
6904         if (wol->wolopts & ~WAKE_MAGIC)
6905                 return -EINVAL;
6906         if ((wol->wolopts & WAKE_MAGIC) &&
6907             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6908             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6909                 return -EINVAL;
6910   
6911         spin_lock_irq(&tp->lock);
6912         if (wol->wolopts & WAKE_MAGIC)
6913                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6914         else
6915                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6916         spin_unlock_irq(&tp->lock);
6917   
6918         return 0;
6919 }
6920   
6921 static u32 tg3_get_msglevel(struct net_device *dev)
6922 {
6923         struct tg3 *tp = netdev_priv(dev);
6924         return tp->msg_enable;
6925 }
6926   
6927 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6928 {
6929         struct tg3 *tp = netdev_priv(dev);
6930         tp->msg_enable = value;
6931 }
6932   
6933 #if TG3_TSO_SUPPORT != 0
6934 static int tg3_set_tso(struct net_device *dev, u32 value)
6935 {
6936         struct tg3 *tp = netdev_priv(dev);
6937
6938         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6939                 if (value)
6940                         return -EINVAL;
6941                 return 0;
6942         }
6943         return ethtool_op_set_tso(dev, value);
6944 }
6945 #endif
6946   
6947 static int tg3_nway_reset(struct net_device *dev)
6948 {
6949         struct tg3 *tp = netdev_priv(dev);
6950         u32 bmcr;
6951         int r;
6952   
6953         if (!netif_running(dev))
6954                 return -EAGAIN;
6955
6956         spin_lock_irq(&tp->lock);
6957         r = -EINVAL;
6958         tg3_readphy(tp, MII_BMCR, &bmcr);
6959         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6960             (bmcr & BMCR_ANENABLE)) {
6961                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6962                 r = 0;
6963         }
6964         spin_unlock_irq(&tp->lock);
6965   
6966         return r;
6967 }
6968   
6969 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6970 {
6971         struct tg3 *tp = netdev_priv(dev);
6972   
6973         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6974         ering->rx_mini_max_pending = 0;
6975         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6976
6977         ering->rx_pending = tp->rx_pending;
6978         ering->rx_mini_pending = 0;
6979         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6980         ering->tx_pending = tp->tx_pending;
6981 }
6982   
6983 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6984 {
6985         struct tg3 *tp = netdev_priv(dev);
6986   
6987         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6988             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6989             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6990                 return -EINVAL;
6991   
6992         if (netif_running(dev))
6993                 tg3_netif_stop(tp);
6994
6995         spin_lock_irq(&tp->lock);
6996         spin_lock(&tp->tx_lock);
6997   
6998         tp->rx_pending = ering->rx_pending;
6999
7000         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7001             tp->rx_pending > 63)
7002                 tp->rx_pending = 63;
7003         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7004         tp->tx_pending = ering->tx_pending;
7005
7006         if (netif_running(dev)) {
7007                 tg3_halt(tp);
7008                 tg3_init_hw(tp);
7009                 tg3_netif_start(tp);
7010         }
7011
7012         spin_unlock(&tp->tx_lock);
7013         spin_unlock_irq(&tp->lock);
7014   
7015         return 0;
7016 }
7017   
7018 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7019 {
7020         struct tg3 *tp = netdev_priv(dev);
7021   
7022         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7023         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7024         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7025 }
7026   
7027 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030   
7031         if (netif_running(dev))
7032                 tg3_netif_stop(tp);
7033
7034         spin_lock_irq(&tp->lock);
7035         spin_lock(&tp->tx_lock);
7036         if (epause->autoneg)
7037                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7038         else
7039                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7040         if (epause->rx_pause)
7041                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7042         else
7043                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7044         if (epause->tx_pause)
7045                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7046         else
7047                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7048
7049         if (netif_running(dev)) {
7050                 tg3_halt(tp);
7051                 tg3_init_hw(tp);
7052                 tg3_netif_start(tp);
7053         }
7054         spin_unlock(&tp->tx_lock);
7055         spin_unlock_irq(&tp->lock);
7056   
7057         return 0;
7058 }
7059   
7060 static u32 tg3_get_rx_csum(struct net_device *dev)
7061 {
7062         struct tg3 *tp = netdev_priv(dev);
7063         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7064 }
7065   
7066 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7067 {
7068         struct tg3 *tp = netdev_priv(dev);
7069   
7070         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7071                 if (data != 0)
7072                         return -EINVAL;
7073                 return 0;
7074         }
7075   
7076         spin_lock_irq(&tp->lock);
7077         if (data)
7078                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7079         else
7080                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7081         spin_unlock_irq(&tp->lock);
7082   
7083         return 0;
7084 }
7085   
7086 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7087 {
7088         struct tg3 *tp = netdev_priv(dev);
7089   
7090         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7091                 if (data != 0)
7092                         return -EINVAL;
7093                 return 0;
7094         }
7095   
7096         if (data)
7097                 dev->features |= NETIF_F_IP_CSUM;
7098         else
7099                 dev->features &= ~NETIF_F_IP_CSUM;
7100
7101         return 0;
7102 }
7103
7104 static int tg3_get_stats_count (struct net_device *dev)
7105 {
7106         return TG3_NUM_STATS;
7107 }
7108
7109 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7110 {
7111         switch (stringset) {
7112         case ETH_SS_STATS:
7113                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7114                 break;
7115         default:
7116                 WARN_ON(1);     /* we need a WARN() */
7117                 break;
7118         }
7119 }
7120
7121 static void tg3_get_ethtool_stats (struct net_device *dev,
7122                                    struct ethtool_stats *estats, u64 *tmp_stats)
7123 {
7124         struct tg3 *tp = netdev_priv(dev);
7125         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7126 }
7127
7128 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7129 {
7130         struct mii_ioctl_data *data = if_mii(ifr);
7131         struct tg3 *tp = netdev_priv(dev);
7132         int err;
7133
7134         switch(cmd) {
7135         case SIOCGMIIPHY:
7136                 data->phy_id = PHY_ADDR;
7137
7138                 /* fallthru */
7139         case SIOCGMIIREG: {
7140                 u32 mii_regval;
7141
7142                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7143                         break;                  /* We have no PHY */
7144
7145                 spin_lock_irq(&tp->lock);
7146                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7147                 spin_unlock_irq(&tp->lock);
7148
7149                 data->val_out = mii_regval;
7150
7151                 return err;
7152         }
7153
7154         case SIOCSMIIREG:
7155                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7156                         break;                  /* We have no PHY */
7157
7158                 if (!capable(CAP_NET_ADMIN))
7159                         return -EPERM;
7160
7161                 spin_lock_irq(&tp->lock);
7162                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7163                 spin_unlock_irq(&tp->lock);
7164
7165                 return err;
7166
7167         default:
7168                 /* do nothing */
7169                 break;
7170         }
7171         return -EOPNOTSUPP;
7172 }
7173
7174 #if TG3_VLAN_TAG_USED
7175 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7176 {
7177         struct tg3 *tp = netdev_priv(dev);
7178
7179         spin_lock_irq(&tp->lock);
7180         spin_lock(&tp->tx_lock);
7181
7182         tp->vlgrp = grp;
7183
7184         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7185         __tg3_set_rx_mode(dev);
7186
7187         spin_unlock(&tp->tx_lock);
7188         spin_unlock_irq(&tp->lock);
7189 }
7190
7191 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7192 {
7193         struct tg3 *tp = netdev_priv(dev);
7194
7195         spin_lock_irq(&tp->lock);
7196         spin_lock(&tp->tx_lock);
7197         if (tp->vlgrp)
7198                 tp->vlgrp->vlan_devices[vid] = NULL;
7199         spin_unlock(&tp->tx_lock);
7200         spin_unlock_irq(&tp->lock);
7201 }
7202 #endif
7203
7204 static struct ethtool_ops tg3_ethtool_ops = {
7205         .get_settings           = tg3_get_settings,
7206         .set_settings           = tg3_set_settings,
7207         .get_drvinfo            = tg3_get_drvinfo,
7208         .get_regs_len           = tg3_get_regs_len,
7209         .get_regs               = tg3_get_regs,
7210         .get_wol                = tg3_get_wol,
7211         .set_wol                = tg3_set_wol,
7212         .get_msglevel           = tg3_get_msglevel,
7213         .set_msglevel           = tg3_set_msglevel,
7214         .nway_reset             = tg3_nway_reset,
7215         .get_link               = ethtool_op_get_link,
7216         .get_eeprom_len         = tg3_get_eeprom_len,
7217         .get_eeprom             = tg3_get_eeprom,
7218         .set_eeprom             = tg3_set_eeprom,
7219         .get_ringparam          = tg3_get_ringparam,
7220         .set_ringparam          = tg3_set_ringparam,
7221         .get_pauseparam         = tg3_get_pauseparam,
7222         .set_pauseparam         = tg3_set_pauseparam,
7223         .get_rx_csum            = tg3_get_rx_csum,
7224         .set_rx_csum            = tg3_set_rx_csum,
7225         .get_tx_csum            = ethtool_op_get_tx_csum,
7226         .set_tx_csum            = tg3_set_tx_csum,
7227         .get_sg                 = ethtool_op_get_sg,
7228         .set_sg                 = ethtool_op_set_sg,
7229 #if TG3_TSO_SUPPORT != 0
7230         .get_tso                = ethtool_op_get_tso,
7231         .set_tso                = tg3_set_tso,
7232 #endif
7233         .get_strings            = tg3_get_strings,
7234         .get_stats_count        = tg3_get_stats_count,
7235         .get_ethtool_stats      = tg3_get_ethtool_stats,
7236 };
7237
7238 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7239 {
7240         u32 cursize, val;
7241
7242         tp->nvram_size = EEPROM_CHIP_SIZE;
7243
7244         if (tg3_nvram_read(tp, 0, &val) != 0)
7245                 return;
7246
7247         if (swab32(val) != TG3_EEPROM_MAGIC)
7248                 return;
7249
7250         /*
7251          * Size the chip by reading offsets at increasing powers of two.
7252          * When we encounter our validation signature, we know the addressing
7253          * has wrapped around, and thus have our chip size.
7254          */
7255         cursize = 0x800;
7256
7257         while (cursize < tp->nvram_size) {
7258                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7259                         return;
7260
7261                 if (swab32(val) == TG3_EEPROM_MAGIC)
7262                         break;
7263
7264                 cursize <<= 1;
7265         }
7266
7267         tp->nvram_size = cursize;
7268 }
7269                 
7270 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7271 {
7272         u32 val;
7273
7274         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7275                 if (val != 0) {
7276                         tp->nvram_size = (val >> 16) * 1024;
7277                         return;
7278                 }
7279         }
7280         tp->nvram_size = 0x20000;
7281 }
7282
7283 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7284 {
7285         u32 nvcfg1;
7286
7287         nvcfg1 = tr32(NVRAM_CFG1);
7288         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7289                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7290         }
7291         else {
7292                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7293                 tw32(NVRAM_CFG1, nvcfg1);
7294         }
7295
7296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7297                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7298                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7299                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7300                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7301                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7302                                 break;
7303                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7304                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7305                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7306                                 break;
7307                         case FLASH_VENDOR_ATMEL_EEPROM:
7308                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7309                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7310                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7311                                 break;
7312                         case FLASH_VENDOR_ST:
7313                                 tp->nvram_jedecnum = JEDEC_ST;
7314                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7315                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7316                                 break;
7317                         case FLASH_VENDOR_SAIFUN:
7318                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7319                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7320                                 break;
7321                         case FLASH_VENDOR_SST_SMALL:
7322                         case FLASH_VENDOR_SST_LARGE:
7323                                 tp->nvram_jedecnum = JEDEC_SST;
7324                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7325                                 break;
7326                 }
7327         }
7328         else {
7329                 tp->nvram_jedecnum = JEDEC_ATMEL;
7330                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7331                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7332         }
7333 }
7334
7335 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7336 {
7337         u32 nvcfg1;
7338
7339         nvcfg1 = tr32(NVRAM_CFG1);
7340
7341         /* NVRAM protection for TPM */
7342         if (nvcfg1 & (1 << 27))
7343                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7344
7345         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7346                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7347                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7348                         tp->nvram_jedecnum = JEDEC_ATMEL;
7349                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7350                         break;
7351                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7352                         tp->nvram_jedecnum = JEDEC_ATMEL;
7353                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7354                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7355                         break;
7356                 case FLASH_5752VENDOR_ST_M45PE10:
7357                 case FLASH_5752VENDOR_ST_M45PE20:
7358                 case FLASH_5752VENDOR_ST_M45PE40:
7359                         tp->nvram_jedecnum = JEDEC_ST;
7360                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7361                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7362                         break;
7363         }
7364
7365         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7366                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7367                         case FLASH_5752PAGE_SIZE_256:
7368                                 tp->nvram_pagesize = 256;
7369                                 break;
7370                         case FLASH_5752PAGE_SIZE_512:
7371                                 tp->nvram_pagesize = 512;
7372                                 break;
7373                         case FLASH_5752PAGE_SIZE_1K:
7374                                 tp->nvram_pagesize = 1024;
7375                                 break;
7376                         case FLASH_5752PAGE_SIZE_2K:
7377                                 tp->nvram_pagesize = 2048;
7378                                 break;
7379                         case FLASH_5752PAGE_SIZE_4K:
7380                                 tp->nvram_pagesize = 4096;
7381                                 break;
7382                         case FLASH_5752PAGE_SIZE_264:
7383                                 tp->nvram_pagesize = 264;
7384                                 break;
7385                 }
7386         }
7387         else {
7388                 /* For eeprom, set pagesize to maximum eeprom size */
7389                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7390
7391                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7392                 tw32(NVRAM_CFG1, nvcfg1);
7393         }
7394 }
7395
7396 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7397 static void __devinit tg3_nvram_init(struct tg3 *tp)
7398 {
7399         int j;
7400
7401         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7402                 return;
7403
7404         tw32_f(GRC_EEPROM_ADDR,
7405              (EEPROM_ADDR_FSM_RESET |
7406               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7407                EEPROM_ADDR_CLKPERD_SHIFT)));
7408
7409         /* XXX schedule_timeout() ... */
7410         for (j = 0; j < 100; j++)
7411                 udelay(10);
7412
7413         /* Enable seeprom accesses. */
7414         tw32_f(GRC_LOCAL_CTRL,
7415              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7416         udelay(100);
7417
7418         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7419             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7420                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7421
7422                 tg3_enable_nvram_access(tp);
7423
7424                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7425                         tg3_get_5752_nvram_info(tp);
7426                 else
7427                         tg3_get_nvram_info(tp);
7428
7429                 tg3_get_nvram_size(tp);
7430
7431                 tg3_disable_nvram_access(tp);
7432
7433         } else {
7434                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7435
7436                 tg3_get_eeprom_size(tp);
7437         }
7438 }
7439
7440 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7441                                         u32 offset, u32 *val)
7442 {
7443         u32 tmp;
7444         int i;
7445
7446         if (offset > EEPROM_ADDR_ADDR_MASK ||
7447             (offset % 4) != 0)
7448                 return -EINVAL;
7449
7450         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7451                                         EEPROM_ADDR_DEVID_MASK |
7452                                         EEPROM_ADDR_READ);
7453         tw32(GRC_EEPROM_ADDR,
7454              tmp |
7455              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7456              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7457               EEPROM_ADDR_ADDR_MASK) |
7458              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7459
7460         for (i = 0; i < 10000; i++) {
7461                 tmp = tr32(GRC_EEPROM_ADDR);
7462
7463                 if (tmp & EEPROM_ADDR_COMPLETE)
7464                         break;
7465                 udelay(100);
7466         }
7467         if (!(tmp & EEPROM_ADDR_COMPLETE))
7468                 return -EBUSY;
7469
7470         *val = tr32(GRC_EEPROM_DATA);
7471         return 0;
7472 }
7473
7474 #define NVRAM_CMD_TIMEOUT 10000
7475
7476 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7477 {
7478         int i;
7479
7480         tw32(NVRAM_CMD, nvram_cmd);
7481         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7482                 udelay(10);
7483                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7484                         udelay(10);
7485                         break;
7486                 }
7487         }
7488         if (i == NVRAM_CMD_TIMEOUT) {
7489                 return -EBUSY;
7490         }
7491         return 0;
7492 }
7493
7494 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7495 {
7496         int ret;
7497
7498         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7499                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7500                 return -EINVAL;
7501         }
7502
7503         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7504                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7505
7506         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7507                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7508                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7509
7510                 offset = ((offset / tp->nvram_pagesize) <<
7511                           ATMEL_AT45DB0X1B_PAGE_POS) +
7512                         (offset % tp->nvram_pagesize);
7513         }
7514
7515         if (offset > NVRAM_ADDR_MSK)
7516                 return -EINVAL;
7517
7518         tg3_nvram_lock(tp);
7519
7520         tg3_enable_nvram_access(tp);
7521
7522         tw32(NVRAM_ADDR, offset);
7523         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7524                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7525
7526         if (ret == 0)
7527                 *val = swab32(tr32(NVRAM_RDDATA));
7528
7529         tg3_nvram_unlock(tp);
7530
7531         tg3_disable_nvram_access(tp);
7532
7533         return ret;
7534 }
7535
7536 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7537                                     u32 offset, u32 len, u8 *buf)
7538 {
7539         int i, j, rc = 0;
7540         u32 val;
7541
7542         for (i = 0; i < len; i += 4) {
7543                 u32 addr, data;
7544
7545                 addr = offset + i;
7546
7547                 memcpy(&data, buf + i, 4);
7548
7549                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7550
7551                 val = tr32(GRC_EEPROM_ADDR);
7552                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7553
7554                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7555                         EEPROM_ADDR_READ);
7556                 tw32(GRC_EEPROM_ADDR, val |
7557                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7558                         (addr & EEPROM_ADDR_ADDR_MASK) |
7559                         EEPROM_ADDR_START |
7560                         EEPROM_ADDR_WRITE);
7561                 
7562                 for (j = 0; j < 10000; j++) {
7563                         val = tr32(GRC_EEPROM_ADDR);
7564
7565                         if (val & EEPROM_ADDR_COMPLETE)
7566                                 break;
7567                         udelay(100);
7568                 }
7569                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7570                         rc = -EBUSY;
7571                         break;
7572                 }
7573         }
7574
7575         return rc;
7576 }
7577
7578 /* offset and length are dword aligned */
7579 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7580                 u8 *buf)
7581 {
7582         int ret = 0;
7583         u32 pagesize = tp->nvram_pagesize;
7584         u32 pagemask = pagesize - 1;
7585         u32 nvram_cmd;
7586         u8 *tmp;
7587
7588         tmp = kmalloc(pagesize, GFP_KERNEL);
7589         if (tmp == NULL)
7590                 return -ENOMEM;
7591
7592         while (len) {
7593                 int j;
7594                 u32 phy_addr, page_off, size;
7595
7596                 phy_addr = offset & ~pagemask;
7597         
7598                 for (j = 0; j < pagesize; j += 4) {
7599                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7600                                                 (u32 *) (tmp + j))))
7601                                 break;
7602                 }
7603                 if (ret)
7604                         break;
7605
7606                 page_off = offset & pagemask;
7607                 size = pagesize;
7608                 if (len < size)
7609                         size = len;
7610
7611                 len -= size;
7612
7613                 memcpy(tmp + page_off, buf, size);
7614
7615                 offset = offset + (pagesize - page_off);
7616
7617                 tg3_enable_nvram_access(tp);
7618
7619                 /*
7620                  * Before we can erase the flash page, we need
7621                  * to issue a special "write enable" command.
7622                  */
7623                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7624
7625                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7626                         break;
7627
7628                 /* Erase the target page */
7629                 tw32(NVRAM_ADDR, phy_addr);
7630
7631                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7632                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7633
7634                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7635                         break;
7636
7637                 /* Issue another write enable to start the write. */
7638                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7639
7640                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7641                         break;
7642
7643                 for (j = 0; j < pagesize; j += 4) {
7644                         u32 data;
7645
7646                         data = *((u32 *) (tmp + j));
7647                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7648
7649                         tw32(NVRAM_ADDR, phy_addr + j);
7650
7651                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7652                                 NVRAM_CMD_WR;
7653
7654                         if (j == 0)
7655                                 nvram_cmd |= NVRAM_CMD_FIRST;
7656                         else if (j == (pagesize - 4))
7657                                 nvram_cmd |= NVRAM_CMD_LAST;
7658
7659                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7660                                 break;
7661                 }
7662                 if (ret)
7663                         break;
7664         }
7665
7666         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7667         tg3_nvram_exec_cmd(tp, nvram_cmd);
7668
7669         kfree(tmp);
7670
7671         return ret;
7672 }
7673
7674 /* offset and length are dword aligned */
7675 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7676                 u8 *buf)
7677 {
7678         int i, ret = 0;
7679
7680         for (i = 0; i < len; i += 4, offset += 4) {
7681                 u32 data, page_off, phy_addr, nvram_cmd;
7682
7683                 memcpy(&data, buf + i, 4);
7684                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7685
7686                 page_off = offset % tp->nvram_pagesize;
7687
7688                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7689                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7690
7691                         phy_addr = ((offset / tp->nvram_pagesize) <<
7692                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7693                 }
7694                 else {
7695                         phy_addr = offset;
7696                 }
7697
7698                 tw32(NVRAM_ADDR, phy_addr);
7699
7700                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7701
7702                 if ((page_off == 0) || (i == 0))
7703                         nvram_cmd |= NVRAM_CMD_FIRST;
7704                 else if (page_off == (tp->nvram_pagesize - 4))
7705                         nvram_cmd |= NVRAM_CMD_LAST;
7706
7707                 if (i == (len - 4))
7708                         nvram_cmd |= NVRAM_CMD_LAST;
7709
7710                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7711                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7712
7713                         if ((ret = tg3_nvram_exec_cmd(tp,
7714                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7715                                 NVRAM_CMD_DONE)))
7716
7717                                 break;
7718                 }
7719                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7720                         /* We always do complete word writes to eeprom. */
7721                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7722                 }
7723
7724                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7725                         break;
7726         }
7727         return ret;
7728 }
7729
7730 /* offset and length are dword aligned */
7731 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7732 {
7733         int ret;
7734
7735         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7736                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7737                 return -EINVAL;
7738         }
7739
7740         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7741                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7742                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7743                 udelay(40);
7744         }
7745
7746         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7747                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7748         }
7749         else {
7750                 u32 grc_mode;
7751
7752                 tg3_nvram_lock(tp);
7753
7754                 tg3_enable_nvram_access(tp);
7755                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7756                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7757                         tw32(NVRAM_WRITE1, 0x406);
7758
7759                 grc_mode = tr32(GRC_MODE);
7760                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7761
7762                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7763                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7764
7765                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7766                                 buf);
7767                 }
7768                 else {
7769                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7770                                 buf);
7771                 }
7772
7773                 grc_mode = tr32(GRC_MODE);
7774                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7775
7776                 tg3_disable_nvram_access(tp);
7777                 tg3_nvram_unlock(tp);
7778         }
7779
7780         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7781                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7782                 udelay(40);
7783         }
7784
7785         return ret;
7786 }
7787
7788 struct subsys_tbl_ent {
7789         u16 subsys_vendor, subsys_devid;
7790         u32 phy_id;
7791 };
7792
7793 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7794         /* Broadcom boards. */
7795         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7796         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7797         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7798         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7799         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7800         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7801         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7802         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7803         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7804         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7805         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7806
7807         /* 3com boards. */
7808         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7809         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7810         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7811         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7812         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7813
7814         /* DELL boards. */
7815         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7816         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7817         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7818         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7819
7820         /* Compaq boards. */
7821         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7822         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7823         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7824         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7825         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7826
7827         /* IBM boards. */
7828         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7829 };
7830
7831 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7832 {
7833         int i;
7834
7835         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7836                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7837                      tp->pdev->subsystem_vendor) &&
7838                     (subsys_id_to_phy_id[i].subsys_devid ==
7839                      tp->pdev->subsystem_device))
7840                         return &subsys_id_to_phy_id[i];
7841         }
7842         return NULL;
7843 }
7844
7845 /* Since this function may be called in D3-hot power state during
7846  * tg3_init_one(), only config cycles are allowed.
7847  */
7848 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7849 {
7850         u32 val;
7851
7852         /* Make sure register accesses (indirect or otherwise)
7853          * will function correctly.
7854          */
7855         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7856                                tp->misc_host_ctrl);
7857
7858         tp->phy_id = PHY_ID_INVALID;
7859         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7860
7861         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7862         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7863                 u32 nic_cfg, led_cfg;
7864                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7865                 int eeprom_phy_serdes = 0;
7866
7867                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7868                 tp->nic_sram_data_cfg = nic_cfg;
7869
7870                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7871                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7872                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7873                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7874                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7875                     (ver > 0) && (ver < 0x100))
7876                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7877
7878                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7879                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7880                         eeprom_phy_serdes = 1;
7881
7882                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7883                 if (nic_phy_id != 0) {
7884                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7885                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7886
7887                         eeprom_phy_id  = (id1 >> 16) << 10;
7888                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7889                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7890                 } else
7891                         eeprom_phy_id = 0;
7892
7893                 tp->phy_id = eeprom_phy_id;
7894                 if (eeprom_phy_serdes)
7895                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7896
7897                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7898                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7899                                     SHASTA_EXT_LED_MODE_MASK);
7900                 else
7901                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7902
7903                 switch (led_cfg) {
7904                 default:
7905                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7906                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7907                         break;
7908
7909                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7910                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7911                         break;
7912
7913                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7914                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7915                         break;
7916
7917                 case SHASTA_EXT_LED_SHARED:
7918                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7919                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7920                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7921                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7922                                                  LED_CTRL_MODE_PHY_2);
7923                         break;
7924
7925                 case SHASTA_EXT_LED_MAC:
7926                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7927                         break;
7928
7929                 case SHASTA_EXT_LED_COMBO:
7930                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7931                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7932                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7933                                                  LED_CTRL_MODE_PHY_2);
7934                         break;
7935
7936                 };
7937
7938                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7939                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7940                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7941                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7942
7943                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7944                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7945                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7946                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7947
7948                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7949                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7950                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7951                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7952                 }
7953                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7954                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7955
7956                 if (cfg2 & (1 << 17))
7957                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7958
7959                 /* serdes signal pre-emphasis in register 0x590 set by */
7960                 /* bootcode if bit 18 is set */
7961                 if (cfg2 & (1 << 18))
7962                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7963         }
7964 }
7965
7966 static int __devinit tg3_phy_probe(struct tg3 *tp)
7967 {
7968         u32 hw_phy_id_1, hw_phy_id_2;
7969         u32 hw_phy_id, hw_phy_id_masked;
7970         int err;
7971
7972         /* Reading the PHY ID register can conflict with ASF
7973          * firwmare access to the PHY hardware.
7974          */
7975         err = 0;
7976         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7977                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7978         } else {
7979                 /* Now read the physical PHY_ID from the chip and verify
7980                  * that it is sane.  If it doesn't look good, we fall back
7981                  * to either the hard-coded table based PHY_ID and failing
7982                  * that the value found in the eeprom area.
7983                  */
7984                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7985                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7986
7987                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7988                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7989                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7990
7991                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7992         }
7993
7994         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7995                 tp->phy_id = hw_phy_id;
7996                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7997                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7998         } else {
7999                 if (tp->phy_id != PHY_ID_INVALID) {
8000                         /* Do nothing, phy ID already set up in
8001                          * tg3_get_eeprom_hw_cfg().
8002                          */
8003                 } else {
8004                         struct subsys_tbl_ent *p;
8005
8006                         /* No eeprom signature?  Try the hardcoded
8007                          * subsys device table.
8008                          */
8009                         p = lookup_by_subsys(tp);
8010                         if (!p)
8011                                 return -ENODEV;
8012
8013                         tp->phy_id = p->phy_id;
8014                         if (!tp->phy_id ||
8015                             tp->phy_id == PHY_ID_BCM8002)
8016                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8017                 }
8018         }
8019
8020         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8021             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8022                 u32 bmsr, adv_reg, tg3_ctrl;
8023
8024                 tg3_readphy(tp, MII_BMSR, &bmsr);
8025                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8026                     (bmsr & BMSR_LSTATUS))
8027                         goto skip_phy_reset;
8028                     
8029                 err = tg3_phy_reset(tp);
8030                 if (err)
8031                         return err;
8032
8033                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8034                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8035                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8036                 tg3_ctrl = 0;
8037                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8038                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8039                                     MII_TG3_CTRL_ADV_1000_FULL);
8040                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8041                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8042                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8043                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8044                 }
8045
8046                 if (!tg3_copper_is_advertising_all(tp)) {
8047                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8048
8049                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8050                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8051
8052                         tg3_writephy(tp, MII_BMCR,
8053                                      BMCR_ANENABLE | BMCR_ANRESTART);
8054                 }
8055                 tg3_phy_set_wirespeed(tp);
8056
8057                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8058                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8059                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8060         }
8061
8062 skip_phy_reset:
8063         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8064                 err = tg3_init_5401phy_dsp(tp);
8065                 if (err)
8066                         return err;
8067         }
8068
8069         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8070                 err = tg3_init_5401phy_dsp(tp);
8071         }
8072
8073         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8074                 tp->link_config.advertising =
8075                         (ADVERTISED_1000baseT_Half |
8076                          ADVERTISED_1000baseT_Full |
8077                          ADVERTISED_Autoneg |
8078                          ADVERTISED_FIBRE);
8079         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8080                 tp->link_config.advertising &=
8081                         ~(ADVERTISED_1000baseT_Half |
8082                           ADVERTISED_1000baseT_Full);
8083
8084         return err;
8085 }
8086
8087 static void __devinit tg3_read_partno(struct tg3 *tp)
8088 {
8089         unsigned char vpd_data[256];
8090         int i;
8091
8092         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8093                 /* Sun decided not to put the necessary bits in the
8094                  * NVRAM of their onboard tg3 parts :(
8095                  */
8096                 strcpy(tp->board_part_number, "Sun 570X");
8097                 return;
8098         }
8099
8100         for (i = 0; i < 256; i += 4) {
8101                 u32 tmp;
8102
8103                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8104                         goto out_not_found;
8105
8106                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8107                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8108                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8109                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8110         }
8111
8112         /* Now parse and find the part number. */
8113         for (i = 0; i < 256; ) {
8114                 unsigned char val = vpd_data[i];
8115                 int block_end;
8116
8117                 if (val == 0x82 || val == 0x91) {
8118                         i = (i + 3 +
8119                              (vpd_data[i + 1] +
8120                               (vpd_data[i + 2] << 8)));
8121                         continue;
8122                 }
8123
8124                 if (val != 0x90)
8125                         goto out_not_found;
8126
8127                 block_end = (i + 3 +
8128                              (vpd_data[i + 1] +
8129                               (vpd_data[i + 2] << 8)));
8130                 i += 3;
8131                 while (i < block_end) {
8132                         if (vpd_data[i + 0] == 'P' &&
8133                             vpd_data[i + 1] == 'N') {
8134                                 int partno_len = vpd_data[i + 2];
8135
8136                                 if (partno_len > 24)
8137                                         goto out_not_found;
8138
8139                                 memcpy(tp->board_part_number,
8140                                        &vpd_data[i + 3],
8141                                        partno_len);
8142
8143                                 /* Success. */
8144                                 return;
8145                         }
8146                 }
8147
8148                 /* Part number not found. */
8149                 goto out_not_found;
8150         }
8151
8152 out_not_found:
8153         strcpy(tp->board_part_number, "none");
8154 }
8155
8156 #ifdef CONFIG_SPARC64
8157 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8158 {
8159         struct pci_dev *pdev = tp->pdev;
8160         struct pcidev_cookie *pcp = pdev->sysdata;
8161
8162         if (pcp != NULL) {
8163                 int node = pcp->prom_node;
8164                 u32 venid;
8165                 int err;
8166
8167                 err = prom_getproperty(node, "subsystem-vendor-id",
8168                                        (char *) &venid, sizeof(venid));
8169                 if (err == 0 || err == -1)
8170                         return 0;
8171                 if (venid == PCI_VENDOR_ID_SUN)
8172                         return 1;
8173         }
8174         return 0;
8175 }
8176 #endif
8177
8178 static int __devinit tg3_get_invariants(struct tg3 *tp)
8179 {
8180         static struct pci_device_id write_reorder_chipsets[] = {
8181                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8182                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8183                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8184                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8185                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8186                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8187                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8188                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8189                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8190                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8191                 { },
8192         };
8193         u32 misc_ctrl_reg;
8194         u32 cacheline_sz_reg;
8195         u32 pci_state_reg, grc_misc_cfg;
8196         u32 val;
8197         u16 pci_cmd;
8198         int err;
8199
8200 #ifdef CONFIG_SPARC64
8201         if (tg3_is_sun_570X(tp))
8202                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8203 #endif
8204
8205         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8206          * reordering to the mailbox registers done by the host
8207          * controller can cause major troubles.  We read back from
8208          * every mailbox register write to force the writes to be
8209          * posted to the chip in order.
8210          */
8211         if (pci_dev_present(write_reorder_chipsets))
8212                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8213
8214         /* Force memory write invalidate off.  If we leave it on,
8215          * then on 5700_BX chips we have to enable a workaround.
8216          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8217          * to match the cacheline size.  The Broadcom driver have this
8218          * workaround but turns MWI off all the times so never uses
8219          * it.  This seems to suggest that the workaround is insufficient.
8220          */
8221         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8222         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8223         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8224
8225         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8226          * has the register indirect write enable bit set before
8227          * we try to access any of the MMIO registers.  It is also
8228          * critical that the PCI-X hw workaround situation is decided
8229          * before that as well.
8230          */
8231         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8232                               &misc_ctrl_reg);
8233
8234         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8235                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8236
8237         /* Wrong chip ID in 5752 A0. This code can be removed later
8238          * as A0 is not in production.
8239          */
8240         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8241                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8242
8243         /* Initialize misc host control in PCI block. */
8244         tp->misc_host_ctrl |= (misc_ctrl_reg &
8245                                MISC_HOST_CTRL_CHIPREV);
8246         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8247                                tp->misc_host_ctrl);
8248
8249         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8250                               &cacheline_sz_reg);
8251
8252         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8253         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8254         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8255         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8256
8257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8258             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8259                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8260
8261         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8262             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8263                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8264
8265         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8266                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8267
8268         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8269                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8270
8271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8272             tp->pci_lat_timer < 64) {
8273                 tp->pci_lat_timer = 64;
8274
8275                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8276                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8277                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8278                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8279
8280                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8281                                        cacheline_sz_reg);
8282         }
8283
8284         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8285                               &pci_state_reg);
8286
8287         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8288                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8289
8290                 /* If this is a 5700 BX chipset, and we are in PCI-X
8291                  * mode, enable register write workaround.
8292                  *
8293                  * The workaround is to use indirect register accesses
8294                  * for all chip writes not to mailbox registers.
8295                  */
8296                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8297                         u32 pm_reg;
8298                         u16 pci_cmd;
8299
8300                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8301
8302                         /* The chip can have it's power management PCI config
8303                          * space registers clobbered due to this bug.
8304                          * So explicitly force the chip into D0 here.
8305                          */
8306                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8307                                               &pm_reg);
8308                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8309                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8310                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8311                                                pm_reg);
8312
8313                         /* Also, force SERR#/PERR# in PCI command. */
8314                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8315                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8316                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8317                 }
8318         }
8319
8320         /* Back to back register writes can cause problems on this chip,
8321          * the workaround is to read back all reg writes except those to
8322          * mailbox regs.  See tg3_write_indirect_reg32().
8323          *
8324          * PCI Express 5750_A0 rev chips need this workaround too.
8325          */
8326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8327             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8328              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8329                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8330
8331         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8332                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8333         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8334                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8335
8336         /* Chip-specific fixup from Broadcom driver */
8337         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8338             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8339                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8340                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8341         }
8342
8343         /* Get eeprom hw config before calling tg3_set_power_state().
8344          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8345          * determined before calling tg3_set_power_state() so that
8346          * we know whether or not to switch out of Vaux power.
8347          * When the flag is set, it means that GPIO1 is used for eeprom
8348          * write protect and also implies that it is a LOM where GPIOs
8349          * are not used to switch power.
8350          */ 
8351         tg3_get_eeprom_hw_cfg(tp);
8352
8353         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8354          * GPIO1 driven high will bring 5700's external PHY out of reset.
8355          * It is also used as eeprom write protect on LOMs.
8356          */
8357         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8358         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8359             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8360                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8361                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8362         /* Unused GPIO3 must be driven as output on 5752 because there
8363          * are no pull-up resistors on unused GPIO pins.
8364          */
8365         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8366                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8367
8368         /* Force the chip into D0. */
8369         err = tg3_set_power_state(tp, 0);
8370         if (err) {
8371                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8372                        pci_name(tp->pdev));
8373                 return err;
8374         }
8375
8376         /* 5700 B0 chips do not support checksumming correctly due
8377          * to hardware bugs.
8378          */
8379         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8380                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8381
8382         /* Pseudo-header checksum is done by hardware logic and not
8383          * the offload processers, so make the chip do the pseudo-
8384          * header checksums on receive.  For transmit it is more
8385          * convenient to do the pseudo-header checksum in software
8386          * as Linux does that on transmit for us in all cases.
8387          */
8388         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8389         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8390
8391         /* Derive initial jumbo mode from MTU assigned in
8392          * ether_setup() via the alloc_etherdev() call
8393          */
8394         if (tp->dev->mtu > ETH_DATA_LEN)
8395                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8396
8397         /* Determine WakeOnLan speed to use. */
8398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8399             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8400             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8401             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8402                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8403         } else {
8404                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8405         }
8406
8407         /* A few boards don't want Ethernet@WireSpeed phy feature */
8408         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8409             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8410              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8411              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8412                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8413
8414         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8415             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8416                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8417         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8418                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8419
8420         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8421                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8422
8423         /* Only 5701 and later support tagged irq status mode.
8424          * Also, 5788 chips cannot use tagged irq status.
8425          *
8426          * However, since we are using NAPI avoid tagged irq status
8427          * because the interrupt condition is more difficult to
8428          * fully clear in that mode.
8429          */
8430         tp->coalesce_mode = 0;
8431
8432         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8433             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8434                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8435
8436         /* Initialize MAC MI mode, polling disabled. */
8437         tw32_f(MAC_MI_MODE, tp->mi_mode);
8438         udelay(80);
8439
8440         /* Initialize data/descriptor byte/word swapping. */
8441         val = tr32(GRC_MODE);
8442         val &= GRC_MODE_HOST_STACKUP;
8443         tw32(GRC_MODE, val | tp->grc_mode);
8444
8445         tg3_switch_clocks(tp);
8446
8447         /* Clear this out for sanity. */
8448         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8449
8450         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8451                               &pci_state_reg);
8452         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8453             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8454                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8455
8456                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8457                     chiprevid == CHIPREV_ID_5701_B0 ||
8458                     chiprevid == CHIPREV_ID_5701_B2 ||
8459                     chiprevid == CHIPREV_ID_5701_B5) {
8460                         void __iomem *sram_base;
8461
8462                         /* Write some dummy words into the SRAM status block
8463                          * area, see if it reads back correctly.  If the return
8464                          * value is bad, force enable the PCIX workaround.
8465                          */
8466                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8467
8468                         writel(0x00000000, sram_base);
8469                         writel(0x00000000, sram_base + 4);
8470                         writel(0xffffffff, sram_base + 4);
8471                         if (readl(sram_base) != 0x00000000)
8472                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8473                 }
8474         }
8475
8476         udelay(50);
8477         tg3_nvram_init(tp);
8478
8479         grc_misc_cfg = tr32(GRC_MISC_CFG);
8480         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8481
8482         /* Broadcom's driver says that CIOBE multisplit has a bug */
8483 #if 0
8484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8485             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8486                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8487                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8488         }
8489 #endif
8490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8491             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8492              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8493                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8494
8495         /* these are limited to 10/100 only */
8496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8497              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8498             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8499              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8500              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8501               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8502               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8503             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8504              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8505               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8506                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8507
8508         err = tg3_phy_probe(tp);
8509         if (err) {
8510                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8511                        pci_name(tp->pdev), err);
8512                 /* ... but do not return immediately ... */
8513         }
8514
8515         tg3_read_partno(tp);
8516
8517         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8518                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8519         } else {
8520                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8521                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8522                 else
8523                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8524         }
8525
8526         /* 5700 {AX,BX} chips have a broken status block link
8527          * change bit implementation, so we must use the
8528          * status register in those cases.
8529          */
8530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8531                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8532         else
8533                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8534
8535         /* The led_ctrl is set during tg3_phy_probe, here we might
8536          * have to force the link status polling mechanism based
8537          * upon subsystem IDs.
8538          */
8539         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8540             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8541                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8542                                   TG3_FLAG_USE_LINKCHG_REG);
8543         }
8544
8545         /* For all SERDES we poll the MAC status register. */
8546         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8547                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8548         else
8549                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8550
8551         /* 5700 BX chips need to have their TX producer index mailboxes
8552          * written twice to workaround a bug.
8553          */
8554         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8555                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8556         else
8557                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8558
8559         /* It seems all chips can get confused if TX buffers
8560          * straddle the 4GB address boundary in some cases.
8561          */
8562         tp->dev->hard_start_xmit = tg3_start_xmit;
8563
8564         tp->rx_offset = 2;
8565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8566             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8567                 tp->rx_offset = 0;
8568
8569         /* By default, disable wake-on-lan.  User can change this
8570          * using ETHTOOL_SWOL.
8571          */
8572         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8573
8574         return err;
8575 }
8576
8577 #ifdef CONFIG_SPARC64
8578 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8579 {
8580         struct net_device *dev = tp->dev;
8581         struct pci_dev *pdev = tp->pdev;
8582         struct pcidev_cookie *pcp = pdev->sysdata;
8583
8584         if (pcp != NULL) {
8585                 int node = pcp->prom_node;
8586
8587                 if (prom_getproplen(node, "local-mac-address") == 6) {
8588                         prom_getproperty(node, "local-mac-address",
8589                                          dev->dev_addr, 6);
8590                         return 0;
8591                 }
8592         }
8593         return -ENODEV;
8594 }
8595
8596 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8597 {
8598         struct net_device *dev = tp->dev;
8599
8600         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8601         return 0;
8602 }
8603 #endif
8604
8605 static int __devinit tg3_get_device_address(struct tg3 *tp)
8606 {
8607         struct net_device *dev = tp->dev;
8608         u32 hi, lo, mac_offset;
8609
8610 #ifdef CONFIG_SPARC64
8611         if (!tg3_get_macaddr_sparc(tp))
8612                 return 0;
8613 #endif
8614
8615         mac_offset = 0x7c;
8616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8617             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8618                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8619                         mac_offset = 0xcc;
8620                 if (tg3_nvram_lock(tp))
8621                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8622                 else
8623                         tg3_nvram_unlock(tp);
8624         }
8625
8626         /* First try to get it from MAC address mailbox. */
8627         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8628         if ((hi >> 16) == 0x484b) {
8629                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8630                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8631
8632                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8633                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8634                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8635                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8636                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8637         }
8638         /* Next, try NVRAM. */
8639         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8640                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8641                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8642                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8643                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8644                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8645                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8646                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8647                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8648         }
8649         /* Finally just fetch it out of the MAC control regs. */
8650         else {
8651                 hi = tr32(MAC_ADDR_0_HIGH);
8652                 lo = tr32(MAC_ADDR_0_LOW);
8653
8654                 dev->dev_addr[5] = lo & 0xff;
8655                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8656                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8657                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8658                 dev->dev_addr[1] = hi & 0xff;
8659                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8660         }
8661
8662         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8663 #ifdef CONFIG_SPARC64
8664                 if (!tg3_get_default_macaddr_sparc(tp))
8665                         return 0;
8666 #endif
8667                 return -EINVAL;
8668         }
8669         return 0;
8670 }
8671
8672 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8673 {
8674         struct tg3_internal_buffer_desc test_desc;
8675         u32 sram_dma_descs;
8676         int i, ret;
8677
8678         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8679
8680         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8681         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8682         tw32(RDMAC_STATUS, 0);
8683         tw32(WDMAC_STATUS, 0);
8684
8685         tw32(BUFMGR_MODE, 0);
8686         tw32(FTQ_RESET, 0);
8687
8688         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8689         test_desc.addr_lo = buf_dma & 0xffffffff;
8690         test_desc.nic_mbuf = 0x00002100;
8691         test_desc.len = size;
8692
8693         /*
8694          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8695          * the *second* time the tg3 driver was getting loaded after an
8696          * initial scan.
8697          *
8698          * Broadcom tells me:
8699          *   ...the DMA engine is connected to the GRC block and a DMA
8700          *   reset may affect the GRC block in some unpredictable way...
8701          *   The behavior of resets to individual blocks has not been tested.
8702          *
8703          * Broadcom noted the GRC reset will also reset all sub-components.
8704          */
8705         if (to_device) {
8706                 test_desc.cqid_sqid = (13 << 8) | 2;
8707
8708                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8709                 udelay(40);
8710         } else {
8711                 test_desc.cqid_sqid = (16 << 8) | 7;
8712
8713                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8714                 udelay(40);
8715         }
8716         test_desc.flags = 0x00000005;
8717
8718         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8719                 u32 val;
8720
8721                 val = *(((u32 *)&test_desc) + i);
8722                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8723                                        sram_dma_descs + (i * sizeof(u32)));
8724                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8725         }
8726         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8727
8728         if (to_device) {
8729                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8730         } else {
8731                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8732         }
8733
8734         ret = -ENODEV;
8735         for (i = 0; i < 40; i++) {
8736                 u32 val;
8737
8738                 if (to_device)
8739                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8740                 else
8741                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8742                 if ((val & 0xffff) == sram_dma_descs) {
8743                         ret = 0;
8744                         break;
8745                 }
8746
8747                 udelay(100);
8748         }
8749
8750         return ret;
8751 }
8752
8753 #define TEST_BUFFER_SIZE        0x400
8754
8755 static int __devinit tg3_test_dma(struct tg3 *tp)
8756 {
8757         dma_addr_t buf_dma;
8758         u32 *buf;
8759         int ret;
8760
8761         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8762         if (!buf) {
8763                 ret = -ENOMEM;
8764                 goto out_nofree;
8765         }
8766
8767         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8768                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8769
8770 #ifndef CONFIG_X86
8771         {
8772                 u8 byte;
8773                 int cacheline_size;
8774                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8775
8776                 if (byte == 0)
8777                         cacheline_size = 1024;
8778                 else
8779                         cacheline_size = (int) byte * 4;
8780
8781                 switch (cacheline_size) {
8782                 case 16:
8783                 case 32:
8784                 case 64:
8785                 case 128:
8786                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8787                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8788                                 tp->dma_rwctrl |=
8789                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8790                                 break;
8791                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8792                                 tp->dma_rwctrl &=
8793                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8794                                 tp->dma_rwctrl |=
8795                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8796                                 break;
8797                         }
8798                         /* fallthrough */
8799                 case 256:
8800                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8801                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8802                                 tp->dma_rwctrl |=
8803                                         DMA_RWCTRL_WRITE_BNDRY_256;
8804                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8805                                 tp->dma_rwctrl |=
8806                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8807                 };
8808         }
8809 #endif
8810
8811         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8812                 /* DMA read watermark not used on PCIE */
8813                 tp->dma_rwctrl |= 0x00180000;
8814         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8816                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8817                         tp->dma_rwctrl |= 0x003f0000;
8818                 else
8819                         tp->dma_rwctrl |= 0x003f000f;
8820         } else {
8821                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8822                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8823                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8824
8825                         if (ccval == 0x6 || ccval == 0x7)
8826                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8827
8828                         /* Set bit 23 to renable PCIX hw bug fix */
8829                         tp->dma_rwctrl |= 0x009f0000;
8830                 } else {
8831                         tp->dma_rwctrl |= 0x001b000f;
8832                 }
8833         }
8834
8835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8837                 tp->dma_rwctrl &= 0xfffffff0;
8838
8839         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8840             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8841                 /* Remove this if it causes problems for some boards. */
8842                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8843
8844                 /* On 5700/5701 chips, we need to set this bit.
8845                  * Otherwise the chip will issue cacheline transactions
8846                  * to streamable DMA memory with not all the byte
8847                  * enables turned on.  This is an error on several
8848                  * RISC PCI controllers, in particular sparc64.
8849                  *
8850                  * On 5703/5704 chips, this bit has been reassigned
8851                  * a different meaning.  In particular, it is used
8852                  * on those chips to enable a PCI-X workaround.
8853                  */
8854                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8855         }
8856
8857         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8858
8859 #if 0
8860         /* Unneeded, already done by tg3_get_invariants.  */
8861         tg3_switch_clocks(tp);
8862 #endif
8863
8864         ret = 0;
8865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8866             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8867                 goto out;
8868
8869         while (1) {
8870                 u32 *p = buf, i;
8871
8872                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8873                         p[i] = i;
8874
8875                 /* Send the buffer to the chip. */
8876                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8877                 if (ret) {
8878                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8879                         break;
8880                 }
8881
8882 #if 0
8883                 /* validate data reached card RAM correctly. */
8884                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8885                         u32 val;
8886                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8887                         if (le32_to_cpu(val) != p[i]) {
8888                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8889                                 /* ret = -ENODEV here? */
8890                         }
8891                         p[i] = 0;
8892                 }
8893 #endif
8894                 /* Now read it back. */
8895                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8896                 if (ret) {
8897                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8898
8899                         break;
8900                 }
8901
8902                 /* Verify it. */
8903                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8904                         if (p[i] == i)
8905                                 continue;
8906
8907                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8908                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8909                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8910                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8911                                 break;
8912                         } else {
8913                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8914                                 ret = -ENODEV;
8915                                 goto out;
8916                         }
8917                 }
8918
8919                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8920                         /* Success. */
8921                         ret = 0;
8922                         break;
8923                 }
8924         }
8925
8926 out:
8927         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8928 out_nofree:
8929         return ret;
8930 }
8931
8932 static void __devinit tg3_init_link_config(struct tg3 *tp)
8933 {
8934         tp->link_config.advertising =
8935                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8936                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8937                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8938                  ADVERTISED_Autoneg | ADVERTISED_MII);
8939         tp->link_config.speed = SPEED_INVALID;
8940         tp->link_config.duplex = DUPLEX_INVALID;
8941         tp->link_config.autoneg = AUTONEG_ENABLE;
8942         netif_carrier_off(tp->dev);
8943         tp->link_config.active_speed = SPEED_INVALID;
8944         tp->link_config.active_duplex = DUPLEX_INVALID;
8945         tp->link_config.phy_is_low_power = 0;
8946         tp->link_config.orig_speed = SPEED_INVALID;
8947         tp->link_config.orig_duplex = DUPLEX_INVALID;
8948         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8949 }
8950
8951 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8952 {
8953         tp->bufmgr_config.mbuf_read_dma_low_water =
8954                 DEFAULT_MB_RDMA_LOW_WATER;
8955         tp->bufmgr_config.mbuf_mac_rx_low_water =
8956                 DEFAULT_MB_MACRX_LOW_WATER;
8957         tp->bufmgr_config.mbuf_high_water =
8958                 DEFAULT_MB_HIGH_WATER;
8959
8960         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8961                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8962         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8963                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8964         tp->bufmgr_config.mbuf_high_water_jumbo =
8965                 DEFAULT_MB_HIGH_WATER_JUMBO;
8966
8967         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8968         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8969 }
8970
8971 static char * __devinit tg3_phy_string(struct tg3 *tp)
8972 {
8973         switch (tp->phy_id & PHY_ID_MASK) {
8974         case PHY_ID_BCM5400:    return "5400";
8975         case PHY_ID_BCM5401:    return "5401";
8976         case PHY_ID_BCM5411:    return "5411";
8977         case PHY_ID_BCM5701:    return "5701";
8978         case PHY_ID_BCM5703:    return "5703";
8979         case PHY_ID_BCM5704:    return "5704";
8980         case PHY_ID_BCM5705:    return "5705";
8981         case PHY_ID_BCM5750:    return "5750";
8982         case PHY_ID_BCM5752:    return "5752";
8983         case PHY_ID_BCM8002:    return "8002/serdes";
8984         case 0:                 return "serdes";
8985         default:                return "unknown";
8986         };
8987 }
8988
8989 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8990 {
8991         struct pci_dev *peer;
8992         unsigned int func, devnr = tp->pdev->devfn & ~7;
8993
8994         for (func = 0; func < 8; func++) {
8995                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8996                 if (peer && peer != tp->pdev)
8997                         break;
8998                 pci_dev_put(peer);
8999         }
9000         if (!peer || peer == tp->pdev)
9001                 BUG();
9002
9003         /*
9004          * We don't need to keep the refcount elevated; there's no way
9005          * to remove one half of this device without removing the other
9006          */
9007         pci_dev_put(peer);
9008
9009         return peer;
9010 }
9011
9012 static int __devinit tg3_init_one(struct pci_dev *pdev,
9013                                   const struct pci_device_id *ent)
9014 {
9015         static int tg3_version_printed = 0;
9016         unsigned long tg3reg_base, tg3reg_len;
9017         struct net_device *dev;
9018         struct tg3 *tp;
9019         int i, err, pci_using_dac, pm_cap;
9020
9021         if (tg3_version_printed++ == 0)
9022                 printk(KERN_INFO "%s", version);
9023
9024         err = pci_enable_device(pdev);
9025         if (err) {
9026                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9027                        "aborting.\n");
9028                 return err;
9029         }
9030
9031         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9032                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9033                        "base address, aborting.\n");
9034                 err = -ENODEV;
9035                 goto err_out_disable_pdev;
9036         }
9037
9038         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9039         if (err) {
9040                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9041                        "aborting.\n");
9042                 goto err_out_disable_pdev;
9043         }
9044
9045         pci_set_master(pdev);
9046
9047         /* Find power-management capability. */
9048         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9049         if (pm_cap == 0) {
9050                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9051                        "aborting.\n");
9052                 err = -EIO;
9053                 goto err_out_free_res;
9054         }
9055
9056         /* Configure DMA attributes. */
9057         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9058         if (!err) {
9059                 pci_using_dac = 1;
9060                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9061                 if (err < 0) {
9062                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9063                                "for consistent allocations\n");
9064                         goto err_out_free_res;
9065                 }
9066         } else {
9067                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9068                 if (err) {
9069                         printk(KERN_ERR PFX "No usable DMA configuration, "
9070                                "aborting.\n");
9071                         goto err_out_free_res;
9072                 }
9073                 pci_using_dac = 0;
9074         }
9075
9076         tg3reg_base = pci_resource_start(pdev, 0);
9077         tg3reg_len = pci_resource_len(pdev, 0);
9078
9079         dev = alloc_etherdev(sizeof(*tp));
9080         if (!dev) {
9081                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9082                 err = -ENOMEM;
9083                 goto err_out_free_res;
9084         }
9085
9086         SET_MODULE_OWNER(dev);
9087         SET_NETDEV_DEV(dev, &pdev->dev);
9088
9089         if (pci_using_dac)
9090                 dev->features |= NETIF_F_HIGHDMA;
9091         dev->features |= NETIF_F_LLTX;
9092 #if TG3_VLAN_TAG_USED
9093         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9094         dev->vlan_rx_register = tg3_vlan_rx_register;
9095         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9096 #endif
9097
9098         tp = netdev_priv(dev);
9099         tp->pdev = pdev;
9100         tp->dev = dev;
9101         tp->pm_cap = pm_cap;
9102         tp->mac_mode = TG3_DEF_MAC_MODE;
9103         tp->rx_mode = TG3_DEF_RX_MODE;
9104         tp->tx_mode = TG3_DEF_TX_MODE;
9105         tp->mi_mode = MAC_MI_MODE_BASE;
9106         if (tg3_debug > 0)
9107                 tp->msg_enable = tg3_debug;
9108         else
9109                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9110
9111         /* The word/byte swap controls here control register access byte
9112          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9113          * setting below.
9114          */
9115         tp->misc_host_ctrl =
9116                 MISC_HOST_CTRL_MASK_PCI_INT |
9117                 MISC_HOST_CTRL_WORD_SWAP |
9118                 MISC_HOST_CTRL_INDIR_ACCESS |
9119                 MISC_HOST_CTRL_PCISTATE_RW;
9120
9121         /* The NONFRM (non-frame) byte/word swap controls take effect
9122          * on descriptor entries, anything which isn't packet data.
9123          *
9124          * The StrongARM chips on the board (one for tx, one for rx)
9125          * are running in big-endian mode.
9126          */
9127         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9128                         GRC_MODE_WSWAP_NONFRM_DATA);
9129 #ifdef __BIG_ENDIAN
9130         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9131 #endif
9132         spin_lock_init(&tp->lock);
9133         spin_lock_init(&tp->tx_lock);
9134         spin_lock_init(&tp->indirect_lock);
9135         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9136
9137         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9138         if (tp->regs == 0UL) {
9139                 printk(KERN_ERR PFX "Cannot map device registers, "
9140                        "aborting.\n");
9141                 err = -ENOMEM;
9142                 goto err_out_free_dev;
9143         }
9144
9145         tg3_init_link_config(tp);
9146
9147         tg3_init_bufmgr_config(tp);
9148
9149         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9150         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9151         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9152
9153         dev->open = tg3_open;
9154         dev->stop = tg3_close;
9155         dev->get_stats = tg3_get_stats;
9156         dev->set_multicast_list = tg3_set_rx_mode;
9157         dev->set_mac_address = tg3_set_mac_addr;
9158         dev->do_ioctl = tg3_ioctl;
9159         dev->tx_timeout = tg3_tx_timeout;
9160         dev->poll = tg3_poll;
9161         dev->ethtool_ops = &tg3_ethtool_ops;
9162         dev->weight = 64;
9163         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9164         dev->change_mtu = tg3_change_mtu;
9165         dev->irq = pdev->irq;
9166 #ifdef CONFIG_NET_POLL_CONTROLLER
9167         dev->poll_controller = tg3_poll_controller;
9168 #endif
9169
9170         err = tg3_get_invariants(tp);
9171         if (err) {
9172                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9173                        "aborting.\n");
9174                 goto err_out_iounmap;
9175         }
9176
9177         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9178                 tp->bufmgr_config.mbuf_read_dma_low_water =
9179                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9180                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9181                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9182                 tp->bufmgr_config.mbuf_high_water =
9183                         DEFAULT_MB_HIGH_WATER_5705;
9184         }
9185
9186 #if TG3_TSO_SUPPORT != 0
9187         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9188                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9189         }
9190         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9192             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9193             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9194                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9195         } else {
9196                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9197         }
9198
9199         /* TSO is off by default, user can enable using ethtool.  */
9200 #if 0
9201         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9202                 dev->features |= NETIF_F_TSO;
9203 #endif
9204
9205 #endif
9206
9207         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9208             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9209             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9210                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9211                 tp->rx_pending = 63;
9212         }
9213
9214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9215                 tp->pdev_peer = tg3_find_5704_peer(tp);
9216
9217         err = tg3_get_device_address(tp);
9218         if (err) {
9219                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9220                        "aborting.\n");
9221                 goto err_out_iounmap;
9222         }
9223
9224         /*
9225          * Reset chip in case UNDI or EFI driver did not shutdown
9226          * DMA self test will enable WDMAC and we'll see (spurious)
9227          * pending DMA on the PCI bus at that point.
9228          */
9229         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9230             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9231                 pci_save_state(tp->pdev);
9232                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9233                 tg3_halt(tp);
9234         }
9235
9236         err = tg3_test_dma(tp);
9237         if (err) {
9238                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9239                 goto err_out_iounmap;
9240         }
9241
9242         /* Tigon3 can do ipv4 only... and some chips have buggy
9243          * checksumming.
9244          */
9245         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9246                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9247                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9248         } else
9249                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9250
9251         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9252                 dev->features &= ~NETIF_F_HIGHDMA;
9253
9254         /* flow control autonegotiation is default behavior */
9255         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9256
9257         err = register_netdev(dev);
9258         if (err) {
9259                 printk(KERN_ERR PFX "Cannot register net device, "
9260                        "aborting.\n");
9261                 goto err_out_iounmap;
9262         }
9263
9264         pci_set_drvdata(pdev, dev);
9265
9266         /* Now that we have fully setup the chip, save away a snapshot
9267          * of the PCI config space.  We need to restore this after
9268          * GRC_MISC_CFG core clock resets and some resume events.
9269          */
9270         pci_save_state(tp->pdev);
9271
9272         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9273                dev->name,
9274                tp->board_part_number,
9275                tp->pci_chip_rev_id,
9276                tg3_phy_string(tp),
9277                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9278                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9279                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9280                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9281                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9282                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9283
9284         for (i = 0; i < 6; i++)
9285                 printk("%2.2x%c", dev->dev_addr[i],
9286                        i == 5 ? '\n' : ':');
9287
9288         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9289                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9290                "TSOcap[%d] \n",
9291                dev->name,
9292                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9293                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9294                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9295                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9296                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9297                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9298                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9299
9300         return 0;
9301
9302 err_out_iounmap:
9303         iounmap(tp->regs);
9304
9305 err_out_free_dev:
9306         free_netdev(dev);
9307
9308 err_out_free_res:
9309         pci_release_regions(pdev);
9310
9311 err_out_disable_pdev:
9312         pci_disable_device(pdev);
9313         pci_set_drvdata(pdev, NULL);
9314         return err;
9315 }
9316
9317 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9318 {
9319         struct net_device *dev = pci_get_drvdata(pdev);
9320
9321         if (dev) {
9322                 struct tg3 *tp = netdev_priv(dev);
9323
9324                 unregister_netdev(dev);
9325                 iounmap(tp->regs);
9326                 free_netdev(dev);
9327                 pci_release_regions(pdev);
9328                 pci_disable_device(pdev);
9329                 pci_set_drvdata(pdev, NULL);
9330         }
9331 }
9332
9333 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9334 {
9335         struct net_device *dev = pci_get_drvdata(pdev);
9336         struct tg3 *tp = netdev_priv(dev);
9337         int err;
9338
9339         if (!netif_running(dev))
9340                 return 0;
9341
9342         tg3_netif_stop(tp);
9343
9344         del_timer_sync(&tp->timer);
9345
9346         spin_lock_irq(&tp->lock);
9347         spin_lock(&tp->tx_lock);
9348         tg3_disable_ints(tp);
9349         spin_unlock(&tp->tx_lock);
9350         spin_unlock_irq(&tp->lock);
9351
9352         netif_device_detach(dev);
9353
9354         spin_lock_irq(&tp->lock);
9355         spin_lock(&tp->tx_lock);
9356         tg3_halt(tp);
9357         spin_unlock(&tp->tx_lock);
9358         spin_unlock_irq(&tp->lock);
9359
9360         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9361         if (err) {
9362                 spin_lock_irq(&tp->lock);
9363                 spin_lock(&tp->tx_lock);
9364
9365                 tg3_init_hw(tp);
9366
9367                 tp->timer.expires = jiffies + tp->timer_offset;
9368                 add_timer(&tp->timer);
9369
9370                 netif_device_attach(dev);
9371                 tg3_netif_start(tp);
9372
9373                 spin_unlock(&tp->tx_lock);
9374                 spin_unlock_irq(&tp->lock);
9375         }
9376
9377         return err;
9378 }
9379
9380 static int tg3_resume(struct pci_dev *pdev)
9381 {
9382         struct net_device *dev = pci_get_drvdata(pdev);
9383         struct tg3 *tp = netdev_priv(dev);
9384         int err;
9385
9386         if (!netif_running(dev))
9387                 return 0;
9388
9389         pci_restore_state(tp->pdev);
9390
9391         err = tg3_set_power_state(tp, 0);
9392         if (err)
9393                 return err;
9394
9395         netif_device_attach(dev);
9396
9397         spin_lock_irq(&tp->lock);
9398         spin_lock(&tp->tx_lock);
9399
9400         tg3_init_hw(tp);
9401
9402         tp->timer.expires = jiffies + tp->timer_offset;
9403         add_timer(&tp->timer);
9404
9405         tg3_enable_ints(tp);
9406
9407         tg3_netif_start(tp);
9408
9409         spin_unlock(&tp->tx_lock);
9410         spin_unlock_irq(&tp->lock);
9411
9412         return 0;
9413 }
9414
9415 static struct pci_driver tg3_driver = {
9416         .name           = DRV_MODULE_NAME,
9417         .id_table       = tg3_pci_tbl,
9418         .probe          = tg3_init_one,
9419         .remove         = __devexit_p(tg3_remove_one),
9420         .suspend        = tg3_suspend,
9421         .resume         = tg3_resume
9422 };
9423
9424 static int __init tg3_init(void)
9425 {
9426         return pci_module_init(&tg3_driver);
9427 }
9428
9429 static void __exit tg3_cleanup(void)
9430 {
9431         pci_unregister_driver(&tg3_driver);
9432 }
9433
9434 module_init(tg3_init);
9435 module_exit(tg3_cleanup);