]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: add bcm5752 to tg3_pci_tbl
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
89           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && \
90           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) ? 9000 : 1500)
91
92 /* These numbers seem to be hard coded in the NIC firmware somehow.
93  * You can't change the ring sizes, but you can change where you place
94  * them in the NIC onboard memory.
95  */
96 #define TG3_RX_RING_SIZE                512
97 #define TG3_DEF_RX_RING_PENDING         200
98 #define TG3_RX_JUMBO_RING_SIZE          256
99 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
100
101 /* Do not place this n-ring entries value into the tp struct itself,
102  * we really want to expose these constants to GCC so that modulo et
103  * al.  operations are done with shifts and masks instead of with
104  * hw multiply/modulo instructions.  Another solution would be to
105  * replace things like '% foo' with '& (foo - 1)'.
106  */
107 #define TG3_RX_RCB_RING_SIZE(tp)        \
108         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
109
110 #define TG3_TX_RING_SIZE                512
111 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
112
113 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_RING_SIZE)
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118                                    TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
120                                  TG3_TX_RING_SIZE)
121 #define TX_RING_GAP(TP) \
122         (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP)                                              \
124         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
125           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
126           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 static char version[] __devinitdata =
139         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_MODULE_VERSION);
145
146 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
147 module_param(tg3_debug, int, 0);
148 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149
150 static struct pci_device_id tg3_pci_tbl[] = {
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, 0x1600, /* TIGON3_5752 */
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { 0, }
234 };
235
236 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
237
238 static struct {
239         const char string[ETH_GSTRING_LEN];
240 } ethtool_stats_keys[TG3_NUM_STATS] = {
241         { "rx_octets" },
242         { "rx_fragments" },
243         { "rx_ucast_packets" },
244         { "rx_mcast_packets" },
245         { "rx_bcast_packets" },
246         { "rx_fcs_errors" },
247         { "rx_align_errors" },
248         { "rx_xon_pause_rcvd" },
249         { "rx_xoff_pause_rcvd" },
250         { "rx_mac_ctrl_rcvd" },
251         { "rx_xoff_entered" },
252         { "rx_frame_too_long_errors" },
253         { "rx_jabbers" },
254         { "rx_undersize_packets" },
255         { "rx_in_length_errors" },
256         { "rx_out_length_errors" },
257         { "rx_64_or_less_octet_packets" },
258         { "rx_65_to_127_octet_packets" },
259         { "rx_128_to_255_octet_packets" },
260         { "rx_256_to_511_octet_packets" },
261         { "rx_512_to_1023_octet_packets" },
262         { "rx_1024_to_1522_octet_packets" },
263         { "rx_1523_to_2047_octet_packets" },
264         { "rx_2048_to_4095_octet_packets" },
265         { "rx_4096_to_8191_octet_packets" },
266         { "rx_8192_to_9022_octet_packets" },
267
268         { "tx_octets" },
269         { "tx_collisions" },
270
271         { "tx_xon_sent" },
272         { "tx_xoff_sent" },
273         { "tx_flow_control" },
274         { "tx_mac_errors" },
275         { "tx_single_collisions" },
276         { "tx_mult_collisions" },
277         { "tx_deferred" },
278         { "tx_excessive_collisions" },
279         { "tx_late_collisions" },
280         { "tx_collide_2times" },
281         { "tx_collide_3times" },
282         { "tx_collide_4times" },
283         { "tx_collide_5times" },
284         { "tx_collide_6times" },
285         { "tx_collide_7times" },
286         { "tx_collide_8times" },
287         { "tx_collide_9times" },
288         { "tx_collide_10times" },
289         { "tx_collide_11times" },
290         { "tx_collide_12times" },
291         { "tx_collide_13times" },
292         { "tx_collide_14times" },
293         { "tx_collide_15times" },
294         { "tx_ucast_packets" },
295         { "tx_mcast_packets" },
296         { "tx_bcast_packets" },
297         { "tx_carrier_sense_errors" },
298         { "tx_discards" },
299         { "tx_errors" },
300
301         { "dma_writeq_full" },
302         { "dma_write_prioq_full" },
303         { "rxbds_empty" },
304         { "rx_discards" },
305         { "rx_errors" },
306         { "rx_threshold_hit" },
307
308         { "dma_readq_full" },
309         { "dma_read_prioq_full" },
310         { "tx_comp_queue_full" },
311
312         { "ring_set_send_prod_index" },
313         { "ring_status_update" },
314         { "nic_irqs" },
315         { "nic_avoided_irqs" },
316         { "nic_tx_threshold_hit" }
317 };
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
322                 unsigned long flags;
323
324                 spin_lock_irqsave(&tp->indirect_lock, flags);
325                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
326                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
327                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
328         } else {
329                 writel(val, tp->regs + off);
330                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
331                         readl(tp->regs + off);
332         }
333 }
334
335 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
336 {
337         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
338                 unsigned long flags;
339
340                 spin_lock_irqsave(&tp->indirect_lock, flags);
341                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
343                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         } else {
345                 void __iomem *dest = tp->regs + off;
346                 writel(val, dest);
347                 readl(dest);    /* always flush PCI write */
348         }
349 }
350
351 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
352 {
353         void __iomem *mbox = tp->regs + off;
354         writel(val, mbox);
355         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
356                 readl(mbox);
357 }
358
359 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
360 {
361         void __iomem *mbox = tp->regs + off;
362         writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
364                 writel(val, mbox);
365         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
366                 readl(mbox);
367 }
368
369 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
370 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
371 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
372
373 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
374 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
375 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
376 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
377 #define tr32(reg)               readl(tp->regs + (reg))
378 #define tr16(reg)               readw(tp->regs + (reg))
379 #define tr8(reg)                readb(tp->regs + (reg))
380
381 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
382 {
383         unsigned long flags;
384
385         spin_lock_irqsave(&tp->indirect_lock, flags);
386         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
387         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388
389         /* Always leave this as zero. */
390         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392 }
393
394 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
395 {
396         unsigned long flags;
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
400         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
401
402         /* Always leave this as zero. */
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 }
406
407 static void tg3_disable_ints(struct tg3 *tp)
408 {
409         tw32(TG3PCI_MISC_HOST_CTRL,
410              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
411         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
412         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
413 }
414
415 static inline void tg3_cond_int(struct tg3 *tp)
416 {
417         if (tp->hw_status->status & SD_STATUS_UPDATED)
418                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
419 }
420
421 static void tg3_enable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
426         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427
428         tg3_cond_int(tp);
429 }
430
431 /* tg3_restart_ints
432  *  similar to tg3_enable_ints, but it can return without flushing the
433  *  PIO write which reenables interrupts
434  */
435 static void tg3_restart_ints(struct tg3 *tp)
436 {
437         tw32(TG3PCI_MISC_HOST_CTRL,
438                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
439         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
440         mmiowb();
441
442         tg3_cond_int(tp);
443 }
444
445 static inline void tg3_netif_stop(struct tg3 *tp)
446 {
447         netif_poll_disable(tp->dev);
448         netif_tx_disable(tp->dev);
449 }
450
451 static inline void tg3_netif_start(struct tg3 *tp)
452 {
453         netif_wake_queue(tp->dev);
454         /* NOTE: unconditional netif_wake_queue is only appropriate
455          * so long as all callers are assured to have free tx slots
456          * (such as after tg3_init_hw)
457          */
458         netif_poll_enable(tp->dev);
459         tg3_cond_int(tp);
460 }
461
462 static void tg3_switch_clocks(struct tg3 *tp)
463 {
464         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
465         u32 orig_clock_ctrl;
466
467         orig_clock_ctrl = clock_ctrl;
468         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
469                        CLOCK_CTRL_CLKRUN_OENABLE |
470                        0x1f);
471         tp->pci_clock_ctrl = clock_ctrl;
472
473         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
474                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
475                         tw32_f(TG3PCI_CLOCK_CTRL,
476                                clock_ctrl | CLOCK_CTRL_625_CORE);
477                         udelay(40);
478                 }
479         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
480                 tw32_f(TG3PCI_CLOCK_CTRL,
481                      clock_ctrl |
482                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
483                 udelay(40);
484                 tw32_f(TG3PCI_CLOCK_CTRL,
485                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
486                 udelay(40);
487         }
488         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
489         udelay(40);
490 }
491
492 #define PHY_BUSY_LOOPS  5000
493
494 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
495 {
496         u32 frame_val;
497         unsigned int loops;
498         int ret;
499
500         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
501                 tw32_f(MAC_MI_MODE,
502                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
503                 udelay(80);
504         }
505
506         *val = 0x0;
507
508         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
509                       MI_COM_PHY_ADDR_MASK);
510         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
511                       MI_COM_REG_ADDR_MASK);
512         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
513         
514         tw32_f(MAC_MI_COM, frame_val);
515
516         loops = PHY_BUSY_LOOPS;
517         while (loops != 0) {
518                 udelay(10);
519                 frame_val = tr32(MAC_MI_COM);
520
521                 if ((frame_val & MI_COM_BUSY) == 0) {
522                         udelay(5);
523                         frame_val = tr32(MAC_MI_COM);
524                         break;
525                 }
526                 loops -= 1;
527         }
528
529         ret = -EBUSY;
530         if (loops != 0) {
531                 *val = frame_val & MI_COM_DATA_MASK;
532                 ret = 0;
533         }
534
535         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
536                 tw32_f(MAC_MI_MODE, tp->mi_mode);
537                 udelay(80);
538         }
539
540         return ret;
541 }
542
543 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
544 {
545         u32 frame_val;
546         unsigned int loops;
547         int ret;
548
549         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
550                 tw32_f(MAC_MI_MODE,
551                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
552                 udelay(80);
553         }
554
555         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
556                       MI_COM_PHY_ADDR_MASK);
557         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
558                       MI_COM_REG_ADDR_MASK);
559         frame_val |= (val & MI_COM_DATA_MASK);
560         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
561         
562         tw32_f(MAC_MI_COM, frame_val);
563
564         loops = PHY_BUSY_LOOPS;
565         while (loops != 0) {
566                 udelay(10);
567                 frame_val = tr32(MAC_MI_COM);
568                 if ((frame_val & MI_COM_BUSY) == 0) {
569                         udelay(5);
570                         frame_val = tr32(MAC_MI_COM);
571                         break;
572                 }
573                 loops -= 1;
574         }
575
576         ret = -EBUSY;
577         if (loops != 0)
578                 ret = 0;
579
580         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
581                 tw32_f(MAC_MI_MODE, tp->mi_mode);
582                 udelay(80);
583         }
584
585         return ret;
586 }
587
588 static void tg3_phy_set_wirespeed(struct tg3 *tp)
589 {
590         u32 val;
591
592         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
593                 return;
594
595         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
596             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
597                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
598                              (val | (1 << 15) | (1 << 4)));
599 }
600
601 static int tg3_bmcr_reset(struct tg3 *tp)
602 {
603         u32 phy_control;
604         int limit, err;
605
606         /* OK, reset it, and poll the BMCR_RESET bit until it
607          * clears or we time out.
608          */
609         phy_control = BMCR_RESET;
610         err = tg3_writephy(tp, MII_BMCR, phy_control);
611         if (err != 0)
612                 return -EBUSY;
613
614         limit = 5000;
615         while (limit--) {
616                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
617                 if (err != 0)
618                         return -EBUSY;
619
620                 if ((phy_control & BMCR_RESET) == 0) {
621                         udelay(40);
622                         break;
623                 }
624                 udelay(10);
625         }
626         if (limit <= 0)
627                 return -EBUSY;
628
629         return 0;
630 }
631
632 static int tg3_wait_macro_done(struct tg3 *tp)
633 {
634         int limit = 100;
635
636         while (limit--) {
637                 u32 tmp32;
638
639                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
640                         if ((tmp32 & 0x1000) == 0)
641                                 break;
642                 }
643         }
644         if (limit <= 0)
645                 return -EBUSY;
646
647         return 0;
648 }
649
650 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
651 {
652         static const u32 test_pat[4][6] = {
653         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
654         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
655         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
656         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
657         };
658         int chan;
659
660         for (chan = 0; chan < 4; chan++) {
661                 int i;
662
663                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
664                              (chan * 0x2000) | 0x0200);
665                 tg3_writephy(tp, 0x16, 0x0002);
666
667                 for (i = 0; i < 6; i++)
668                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
669                                      test_pat[chan][i]);
670
671                 tg3_writephy(tp, 0x16, 0x0202);
672                 if (tg3_wait_macro_done(tp)) {
673                         *resetp = 1;
674                         return -EBUSY;
675                 }
676
677                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
678                              (chan * 0x2000) | 0x0200);
679                 tg3_writephy(tp, 0x16, 0x0082);
680                 if (tg3_wait_macro_done(tp)) {
681                         *resetp = 1;
682                         return -EBUSY;
683                 }
684
685                 tg3_writephy(tp, 0x16, 0x0802);
686                 if (tg3_wait_macro_done(tp)) {
687                         *resetp = 1;
688                         return -EBUSY;
689                 }
690
691                 for (i = 0; i < 6; i += 2) {
692                         u32 low, high;
693
694                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
695                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
696                             tg3_wait_macro_done(tp)) {
697                                 *resetp = 1;
698                                 return -EBUSY;
699                         }
700                         low &= 0x7fff;
701                         high &= 0x000f;
702                         if (low != test_pat[chan][i] ||
703                             high != test_pat[chan][i+1]) {
704                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
705                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
706                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
707
708                                 return -EBUSY;
709                         }
710                 }
711         }
712
713         return 0;
714 }
715
716 static int tg3_phy_reset_chanpat(struct tg3 *tp)
717 {
718         int chan;
719
720         for (chan = 0; chan < 4; chan++) {
721                 int i;
722
723                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
724                              (chan * 0x2000) | 0x0200);
725                 tg3_writephy(tp, 0x16, 0x0002);
726                 for (i = 0; i < 6; i++)
727                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
728                 tg3_writephy(tp, 0x16, 0x0202);
729                 if (tg3_wait_macro_done(tp))
730                         return -EBUSY;
731         }
732
733         return 0;
734 }
735
736 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
737 {
738         u32 reg32, phy9_orig;
739         int retries, do_phy_reset, err;
740
741         retries = 10;
742         do_phy_reset = 1;
743         do {
744                 if (do_phy_reset) {
745                         err = tg3_bmcr_reset(tp);
746                         if (err)
747                                 return err;
748                         do_phy_reset = 0;
749                 }
750
751                 /* Disable transmitter and interrupt.  */
752                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
753                         continue;
754
755                 reg32 |= 0x3000;
756                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
757
758                 /* Set full-duplex, 1000 mbps.  */
759                 tg3_writephy(tp, MII_BMCR,
760                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
761
762                 /* Set to master mode.  */
763                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
764                         continue;
765
766                 tg3_writephy(tp, MII_TG3_CTRL,
767                              (MII_TG3_CTRL_AS_MASTER |
768                               MII_TG3_CTRL_ENABLE_AS_MASTER));
769
770                 /* Enable SM_DSP_CLOCK and 6dB.  */
771                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
772
773                 /* Block the PHY control access.  */
774                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
775                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
776
777                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
778                 if (!err)
779                         break;
780         } while (--retries);
781
782         err = tg3_phy_reset_chanpat(tp);
783         if (err)
784                 return err;
785
786         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
787         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
788
789         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
790         tg3_writephy(tp, 0x16, 0x0000);
791
792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
794                 /* Set Extended packet length bit for jumbo frames */
795                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
796         }
797         else {
798                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
799         }
800
801         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
802
803         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
804                 reg32 &= ~0x3000;
805                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
806         } else if (!err)
807                 err = -EBUSY;
808
809         return err;
810 }
811
812 /* This will reset the tigon3 PHY if there is no valid
813  * link unless the FORCE argument is non-zero.
814  */
815 static int tg3_phy_reset(struct tg3 *tp)
816 {
817         u32 phy_status;
818         int err;
819
820         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
821         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
822         if (err != 0)
823                 return -EBUSY;
824
825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
826             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
827             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
828                 err = tg3_phy_reset_5703_4_5(tp);
829                 if (err)
830                         return err;
831                 goto out;
832         }
833
834         err = tg3_bmcr_reset(tp);
835         if (err)
836                 return err;
837
838 out:
839         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
840                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
843                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
844                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
845                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
846         }
847         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
848                 tg3_writephy(tp, 0x1c, 0x8d68);
849                 tg3_writephy(tp, 0x1c, 0x8d68);
850         }
851         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
852                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
857                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
858                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
859                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
860         }
861         /* Set Extended packet length bit (bit 14) on all chips that */
862         /* support jumbo frames */
863         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
864                 /* Cannot do read-modify-write on 5401 */
865                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
866         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
867                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
868                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) {
869                 u32 phy_reg;
870
871                 /* Set bit 14 with read-modify-write to preserve other bits */
872                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
873                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
874                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
875         }
876
877         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
878          * jumbo frames transmission.
879          */
880         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
881             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
882             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) {
883                 u32 phy_reg;
884
885                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
886                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
887                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
888         }
889
890         tg3_phy_set_wirespeed(tp);
891         return 0;
892 }
893
894 static void tg3_frob_aux_power(struct tg3 *tp)
895 {
896         struct tg3 *tp_peer = tp;
897
898         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
899                 return;
900
901         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
902                 tp_peer = pci_get_drvdata(tp->pdev_peer);
903                 if (!tp_peer)
904                         BUG();
905         }
906
907
908         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
909             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
911                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
912                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
913                              (GRC_LCLCTRL_GPIO_OE0 |
914                               GRC_LCLCTRL_GPIO_OE1 |
915                               GRC_LCLCTRL_GPIO_OE2 |
916                               GRC_LCLCTRL_GPIO_OUTPUT0 |
917                               GRC_LCLCTRL_GPIO_OUTPUT1));
918                         udelay(100);
919                 } else {
920                         u32 no_gpio2;
921                         u32 grc_local_ctrl;
922
923                         if (tp_peer != tp &&
924                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
925                                 return;
926
927                         /* On 5753 and variants, GPIO2 cannot be used. */
928                         no_gpio2 = tp->nic_sram_data_cfg &
929                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
930
931                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
932                                          GRC_LCLCTRL_GPIO_OE1 |
933                                          GRC_LCLCTRL_GPIO_OE2 |
934                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
935                                          GRC_LCLCTRL_GPIO_OUTPUT2;
936                         if (no_gpio2) {
937                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
938                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
939                         }
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
945
946                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                                 grc_local_ctrl);
948                         udelay(100);
949
950                         if (!no_gpio2) {
951                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
952                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
953                                        grc_local_ctrl);
954                                 udelay(100);
955                         }
956                 }
957         } else {
958                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
959                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
960                         if (tp_peer != tp &&
961                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
962                                 return;
963
964                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
965                              (GRC_LCLCTRL_GPIO_OE1 |
966                               GRC_LCLCTRL_GPIO_OUTPUT1));
967                         udelay(100);
968
969                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
970                              (GRC_LCLCTRL_GPIO_OE1));
971                         udelay(100);
972
973                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
974                              (GRC_LCLCTRL_GPIO_OE1 |
975                               GRC_LCLCTRL_GPIO_OUTPUT1));
976                         udelay(100);
977                 }
978         }
979 }
980
981 static int tg3_setup_phy(struct tg3 *, int);
982
983 #define RESET_KIND_SHUTDOWN     0
984 #define RESET_KIND_INIT         1
985 #define RESET_KIND_SUSPEND      2
986
987 static void tg3_write_sig_post_reset(struct tg3 *, int);
988 static int tg3_halt_cpu(struct tg3 *, u32);
989
990 static int tg3_set_power_state(struct tg3 *tp, int state)
991 {
992         u32 misc_host_ctrl;
993         u16 power_control, power_caps;
994         int pm = tp->pm_cap;
995
996         /* Make sure register accesses (indirect or otherwise)
997          * will function correctly.
998          */
999         pci_write_config_dword(tp->pdev,
1000                                TG3PCI_MISC_HOST_CTRL,
1001                                tp->misc_host_ctrl);
1002
1003         pci_read_config_word(tp->pdev,
1004                              pm + PCI_PM_CTRL,
1005                              &power_control);
1006         power_control |= PCI_PM_CTRL_PME_STATUS;
1007         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1008         switch (state) {
1009         case 0:
1010                 power_control |= 0;
1011                 pci_write_config_word(tp->pdev,
1012                                       pm + PCI_PM_CTRL,
1013                                       power_control);
1014                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1015                 udelay(100);
1016
1017                 return 0;
1018
1019         case 1:
1020                 power_control |= 1;
1021                 break;
1022
1023         case 2:
1024                 power_control |= 2;
1025                 break;
1026
1027         case 3:
1028                 power_control |= 3;
1029                 break;
1030
1031         default:
1032                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1033                        "requested.\n",
1034                        tp->dev->name, state);
1035                 return -EINVAL;
1036         };
1037
1038         power_control |= PCI_PM_CTRL_PME_ENABLE;
1039
1040         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1041         tw32(TG3PCI_MISC_HOST_CTRL,
1042              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1043
1044         if (tp->link_config.phy_is_low_power == 0) {
1045                 tp->link_config.phy_is_low_power = 1;
1046                 tp->link_config.orig_speed = tp->link_config.speed;
1047                 tp->link_config.orig_duplex = tp->link_config.duplex;
1048                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1049         }
1050
1051         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1052                 tp->link_config.speed = SPEED_10;
1053                 tp->link_config.duplex = DUPLEX_HALF;
1054                 tp->link_config.autoneg = AUTONEG_ENABLE;
1055                 tg3_setup_phy(tp, 0);
1056         }
1057
1058         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1059
1060         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1061                 u32 mac_mode;
1062
1063                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1064                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1065                         udelay(40);
1066
1067                         mac_mode = MAC_MODE_PORT_MODE_MII;
1068
1069                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1070                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1071                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1072                 } else {
1073                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1074                 }
1075
1076                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
1077                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
1078                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1079
1080                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1081                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1082                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1083
1084                 tw32_f(MAC_MODE, mac_mode);
1085                 udelay(100);
1086
1087                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1088                 udelay(10);
1089         }
1090
1091         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1092             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1094                 u32 base_val;
1095
1096                 base_val = tp->pci_clock_ctrl;
1097                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1098                              CLOCK_CTRL_TXCLK_DISABLE);
1099
1100                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1101                      CLOCK_CTRL_ALTCLK |
1102                      CLOCK_CTRL_PWRDOWN_PLL133);
1103                 udelay(40);
1104         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1105                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1106                 u32 newbits1, newbits2;
1107
1108                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1109                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1110                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1111                                     CLOCK_CTRL_TXCLK_DISABLE |
1112                                     CLOCK_CTRL_ALTCLK);
1113                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1114                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1115                         newbits1 = CLOCK_CTRL_625_CORE;
1116                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1117                 } else {
1118                         newbits1 = CLOCK_CTRL_ALTCLK;
1119                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1120                 }
1121
1122                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1123                 udelay(40);
1124
1125                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1126                 udelay(40);
1127
1128                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1129                         u32 newbits3;
1130
1131                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1132                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1133                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1134                                             CLOCK_CTRL_TXCLK_DISABLE |
1135                                             CLOCK_CTRL_44MHZ_CORE);
1136                         } else {
1137                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1138                         }
1139
1140                         tw32_f(TG3PCI_CLOCK_CTRL,
1141                                          tp->pci_clock_ctrl | newbits3);
1142                         udelay(40);
1143                 }
1144         }
1145
1146         tg3_frob_aux_power(tp);
1147
1148         /* Workaround for unstable PLL clock */
1149         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1150             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1151                 u32 val = tr32(0x7d00);
1152
1153                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1154                 tw32(0x7d00, val);
1155                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1156                         tg3_halt_cpu(tp, RX_CPU_BASE);
1157         }
1158
1159         /* Finally, set the new power state. */
1160         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1161
1162         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1163
1164         return 0;
1165 }
1166
1167 static void tg3_link_report(struct tg3 *tp)
1168 {
1169         if (!netif_carrier_ok(tp->dev)) {
1170                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1171         } else {
1172                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1173                        tp->dev->name,
1174                        (tp->link_config.active_speed == SPEED_1000 ?
1175                         1000 :
1176                         (tp->link_config.active_speed == SPEED_100 ?
1177                          100 : 10)),
1178                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1179                         "full" : "half"));
1180
1181                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1182                        "%s for RX.\n",
1183                        tp->dev->name,
1184                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1185                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1186         }
1187 }
1188
1189 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1190 {
1191         u32 new_tg3_flags = 0;
1192         u32 old_rx_mode = tp->rx_mode;
1193         u32 old_tx_mode = tp->tx_mode;
1194
1195         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1196                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1197                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1198                                 if (remote_adv & LPA_PAUSE_CAP)
1199                                         new_tg3_flags |=
1200                                                 (TG3_FLAG_RX_PAUSE |
1201                                                 TG3_FLAG_TX_PAUSE);
1202                                 else if (remote_adv & LPA_PAUSE_ASYM)
1203                                         new_tg3_flags |=
1204                                                 (TG3_FLAG_RX_PAUSE);
1205                         } else {
1206                                 if (remote_adv & LPA_PAUSE_CAP)
1207                                         new_tg3_flags |=
1208                                                 (TG3_FLAG_RX_PAUSE |
1209                                                 TG3_FLAG_TX_PAUSE);
1210                         }
1211                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1212                         if ((remote_adv & LPA_PAUSE_CAP) &&
1213                         (remote_adv & LPA_PAUSE_ASYM))
1214                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1215                 }
1216
1217                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1218                 tp->tg3_flags |= new_tg3_flags;
1219         } else {
1220                 new_tg3_flags = tp->tg3_flags;
1221         }
1222
1223         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1224                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1225         else
1226                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1227
1228         if (old_rx_mode != tp->rx_mode) {
1229                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1230         }
1231         
1232         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1233                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1234         else
1235                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1236
1237         if (old_tx_mode != tp->tx_mode) {
1238                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1239         }
1240 }
1241
1242 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1243 {
1244         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1245         case MII_TG3_AUX_STAT_10HALF:
1246                 *speed = SPEED_10;
1247                 *duplex = DUPLEX_HALF;
1248                 break;
1249
1250         case MII_TG3_AUX_STAT_10FULL:
1251                 *speed = SPEED_10;
1252                 *duplex = DUPLEX_FULL;
1253                 break;
1254
1255         case MII_TG3_AUX_STAT_100HALF:
1256                 *speed = SPEED_100;
1257                 *duplex = DUPLEX_HALF;
1258                 break;
1259
1260         case MII_TG3_AUX_STAT_100FULL:
1261                 *speed = SPEED_100;
1262                 *duplex = DUPLEX_FULL;
1263                 break;
1264
1265         case MII_TG3_AUX_STAT_1000HALF:
1266                 *speed = SPEED_1000;
1267                 *duplex = DUPLEX_HALF;
1268                 break;
1269
1270         case MII_TG3_AUX_STAT_1000FULL:
1271                 *speed = SPEED_1000;
1272                 *duplex = DUPLEX_FULL;
1273                 break;
1274
1275         default:
1276                 *speed = SPEED_INVALID;
1277                 *duplex = DUPLEX_INVALID;
1278                 break;
1279         };
1280 }
1281
1282 static void tg3_phy_copper_begin(struct tg3 *tp)
1283 {
1284         u32 new_adv;
1285         int i;
1286
1287         if (tp->link_config.phy_is_low_power) {
1288                 /* Entering low power mode.  Disable gigabit and
1289                  * 100baseT advertisements.
1290                  */
1291                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1292
1293                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1294                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1295                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1296                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1297
1298                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1299         } else if (tp->link_config.speed == SPEED_INVALID) {
1300                 tp->link_config.advertising =
1301                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1302                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1303                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1304                          ADVERTISED_Autoneg | ADVERTISED_MII);
1305
1306                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1307                         tp->link_config.advertising &=
1308                                 ~(ADVERTISED_1000baseT_Half |
1309                                   ADVERTISED_1000baseT_Full);
1310
1311                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1312                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1313                         new_adv |= ADVERTISE_10HALF;
1314                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1315                         new_adv |= ADVERTISE_10FULL;
1316                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1317                         new_adv |= ADVERTISE_100HALF;
1318                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1319                         new_adv |= ADVERTISE_100FULL;
1320                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1321
1322                 if (tp->link_config.advertising &
1323                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1324                         new_adv = 0;
1325                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1326                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1327                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1328                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1329                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1330                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1331                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1332                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1333                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1334                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1335                 } else {
1336                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1337                 }
1338         } else {
1339                 /* Asking for a specific link mode. */
1340                 if (tp->link_config.speed == SPEED_1000) {
1341                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1342                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1343
1344                         if (tp->link_config.duplex == DUPLEX_FULL)
1345                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1346                         else
1347                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1348                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1349                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1350                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1351                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1352                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1353                 } else {
1354                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1355
1356                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1357                         if (tp->link_config.speed == SPEED_100) {
1358                                 if (tp->link_config.duplex == DUPLEX_FULL)
1359                                         new_adv |= ADVERTISE_100FULL;
1360                                 else
1361                                         new_adv |= ADVERTISE_100HALF;
1362                         } else {
1363                                 if (tp->link_config.duplex == DUPLEX_FULL)
1364                                         new_adv |= ADVERTISE_10FULL;
1365                                 else
1366                                         new_adv |= ADVERTISE_10HALF;
1367                         }
1368                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1369                 }
1370         }
1371
1372         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1373             tp->link_config.speed != SPEED_INVALID) {
1374                 u32 bmcr, orig_bmcr;
1375
1376                 tp->link_config.active_speed = tp->link_config.speed;
1377                 tp->link_config.active_duplex = tp->link_config.duplex;
1378
1379                 bmcr = 0;
1380                 switch (tp->link_config.speed) {
1381                 default:
1382                 case SPEED_10:
1383                         break;
1384
1385                 case SPEED_100:
1386                         bmcr |= BMCR_SPEED100;
1387                         break;
1388
1389                 case SPEED_1000:
1390                         bmcr |= TG3_BMCR_SPEED1000;
1391                         break;
1392                 };
1393
1394                 if (tp->link_config.duplex == DUPLEX_FULL)
1395                         bmcr |= BMCR_FULLDPLX;
1396
1397                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1398                     (bmcr != orig_bmcr)) {
1399                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1400                         for (i = 0; i < 1500; i++) {
1401                                 u32 tmp;
1402
1403                                 udelay(10);
1404                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1405                                     tg3_readphy(tp, MII_BMSR, &tmp))
1406                                         continue;
1407                                 if (!(tmp & BMSR_LSTATUS)) {
1408                                         udelay(40);
1409                                         break;
1410                                 }
1411                         }
1412                         tg3_writephy(tp, MII_BMCR, bmcr);
1413                         udelay(40);
1414                 }
1415         } else {
1416                 tg3_writephy(tp, MII_BMCR,
1417                              BMCR_ANENABLE | BMCR_ANRESTART);
1418         }
1419 }
1420
1421 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1422 {
1423         int err;
1424
1425         /* Turn off tap power management. */
1426         /* Set Extended packet length bit */
1427         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1428
1429         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1430         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1431
1432         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1433         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1434
1435         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1436         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1437
1438         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1439         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1440
1441         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1442         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1443
1444         udelay(40);
1445
1446         return err;
1447 }
1448
1449 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1450 {
1451         u32 adv_reg, all_mask;
1452
1453         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1454                 return 0;
1455
1456         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1457                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1458         if ((adv_reg & all_mask) != all_mask)
1459                 return 0;
1460         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1461                 u32 tg3_ctrl;
1462
1463                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1464                         return 0;
1465
1466                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1467                             MII_TG3_CTRL_ADV_1000_FULL);
1468                 if ((tg3_ctrl & all_mask) != all_mask)
1469                         return 0;
1470         }
1471         return 1;
1472 }
1473
1474 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1475 {
1476         int current_link_up;
1477         u32 bmsr, dummy;
1478         u16 current_speed;
1479         u8 current_duplex;
1480         int i, err;
1481
1482         tw32(MAC_EVENT, 0);
1483
1484         tw32_f(MAC_STATUS,
1485              (MAC_STATUS_SYNC_CHANGED |
1486               MAC_STATUS_CFG_CHANGED |
1487               MAC_STATUS_MI_COMPLETION |
1488               MAC_STATUS_LNKSTATE_CHANGED));
1489         udelay(40);
1490
1491         tp->mi_mode = MAC_MI_MODE_BASE;
1492         tw32_f(MAC_MI_MODE, tp->mi_mode);
1493         udelay(80);
1494
1495         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1496
1497         /* Some third-party PHYs need to be reset on link going
1498          * down.
1499          */
1500         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1501              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1503             netif_carrier_ok(tp->dev)) {
1504                 tg3_readphy(tp, MII_BMSR, &bmsr);
1505                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1506                     !(bmsr & BMSR_LSTATUS))
1507                         force_reset = 1;
1508         }
1509         if (force_reset)
1510                 tg3_phy_reset(tp);
1511
1512         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1513                 tg3_readphy(tp, MII_BMSR, &bmsr);
1514                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1515                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1516                         bmsr = 0;
1517
1518                 if (!(bmsr & BMSR_LSTATUS)) {
1519                         err = tg3_init_5401phy_dsp(tp);
1520                         if (err)
1521                                 return err;
1522
1523                         tg3_readphy(tp, MII_BMSR, &bmsr);
1524                         for (i = 0; i < 1000; i++) {
1525                                 udelay(10);
1526                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1527                                     (bmsr & BMSR_LSTATUS)) {
1528                                         udelay(40);
1529                                         break;
1530                                 }
1531                         }
1532
1533                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1534                             !(bmsr & BMSR_LSTATUS) &&
1535                             tp->link_config.active_speed == SPEED_1000) {
1536                                 err = tg3_phy_reset(tp);
1537                                 if (!err)
1538                                         err = tg3_init_5401phy_dsp(tp);
1539                                 if (err)
1540                                         return err;
1541                         }
1542                 }
1543         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1544                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1545                 /* 5701 {A0,B0} CRC bug workaround */
1546                 tg3_writephy(tp, 0x15, 0x0a75);
1547                 tg3_writephy(tp, 0x1c, 0x8c68);
1548                 tg3_writephy(tp, 0x1c, 0x8d68);
1549                 tg3_writephy(tp, 0x1c, 0x8c68);
1550         }
1551
1552         /* Clear pending interrupts... */
1553         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1554         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1555
1556         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1557                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1558         else
1559                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1560
1561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1562             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1563                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1564                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1565                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1566                 else
1567                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1568         }
1569
1570         current_link_up = 0;
1571         current_speed = SPEED_INVALID;
1572         current_duplex = DUPLEX_INVALID;
1573
1574         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1575                 u32 val;
1576
1577                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1578                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1579                 if (!(val & (1 << 10))) {
1580                         val |= (1 << 10);
1581                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1582                         goto relink;
1583                 }
1584         }
1585
1586         bmsr = 0;
1587         for (i = 0; i < 100; i++) {
1588                 tg3_readphy(tp, MII_BMSR, &bmsr);
1589                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1590                     (bmsr & BMSR_LSTATUS))
1591                         break;
1592                 udelay(40);
1593         }
1594
1595         if (bmsr & BMSR_LSTATUS) {
1596                 u32 aux_stat, bmcr;
1597
1598                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1599                 for (i = 0; i < 2000; i++) {
1600                         udelay(10);
1601                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1602                             aux_stat)
1603                                 break;
1604                 }
1605
1606                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1607                                              &current_speed,
1608                                              &current_duplex);
1609
1610                 bmcr = 0;
1611                 for (i = 0; i < 200; i++) {
1612                         tg3_readphy(tp, MII_BMCR, &bmcr);
1613                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1614                                 continue;
1615                         if (bmcr && bmcr != 0x7fff)
1616                                 break;
1617                         udelay(10);
1618                 }
1619
1620                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1621                         if (bmcr & BMCR_ANENABLE) {
1622                                 current_link_up = 1;
1623
1624                                 /* Force autoneg restart if we are exiting
1625                                  * low power mode.
1626                                  */
1627                                 if (!tg3_copper_is_advertising_all(tp))
1628                                         current_link_up = 0;
1629                         } else {
1630                                 current_link_up = 0;
1631                         }
1632                 } else {
1633                         if (!(bmcr & BMCR_ANENABLE) &&
1634                             tp->link_config.speed == current_speed &&
1635                             tp->link_config.duplex == current_duplex) {
1636                                 current_link_up = 1;
1637                         } else {
1638                                 current_link_up = 0;
1639                         }
1640                 }
1641
1642                 tp->link_config.active_speed = current_speed;
1643                 tp->link_config.active_duplex = current_duplex;
1644         }
1645
1646         if (current_link_up == 1 &&
1647             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1648             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1649                 u32 local_adv, remote_adv;
1650
1651                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1652                         local_adv = 0;
1653                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1654
1655                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1656                         remote_adv = 0;
1657
1658                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1659
1660                 /* If we are not advertising full pause capability,
1661                  * something is wrong.  Bring the link down and reconfigure.
1662                  */
1663                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1664                         current_link_up = 0;
1665                 } else {
1666                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1667                 }
1668         }
1669 relink:
1670         if (current_link_up == 0) {
1671                 u32 tmp;
1672
1673                 tg3_phy_copper_begin(tp);
1674
1675                 tg3_readphy(tp, MII_BMSR, &tmp);
1676                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1677                     (tmp & BMSR_LSTATUS))
1678                         current_link_up = 1;
1679         }
1680
1681         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1682         if (current_link_up == 1) {
1683                 if (tp->link_config.active_speed == SPEED_100 ||
1684                     tp->link_config.active_speed == SPEED_10)
1685                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1686                 else
1687                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1688         } else
1689                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1690
1691         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1692         if (tp->link_config.active_duplex == DUPLEX_HALF)
1693                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1694
1695         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1697                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1698                     (current_link_up == 1 &&
1699                      tp->link_config.active_speed == SPEED_10))
1700                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1701         } else {
1702                 if (current_link_up == 1)
1703                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1704         }
1705
1706         /* ??? Without this setting Netgear GA302T PHY does not
1707          * ??? send/receive packets...
1708          */
1709         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1710             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1711                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1712                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1713                 udelay(80);
1714         }
1715
1716         tw32_f(MAC_MODE, tp->mac_mode);
1717         udelay(40);
1718
1719         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1720                 /* Polled via timer. */
1721                 tw32_f(MAC_EVENT, 0);
1722         } else {
1723                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1724         }
1725         udelay(40);
1726
1727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1728             current_link_up == 1 &&
1729             tp->link_config.active_speed == SPEED_1000 &&
1730             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1731              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1732                 udelay(120);
1733                 tw32_f(MAC_STATUS,
1734                      (MAC_STATUS_SYNC_CHANGED |
1735                       MAC_STATUS_CFG_CHANGED));
1736                 udelay(40);
1737                 tg3_write_mem(tp,
1738                               NIC_SRAM_FIRMWARE_MBOX,
1739                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1740         }
1741
1742         if (current_link_up != netif_carrier_ok(tp->dev)) {
1743                 if (current_link_up)
1744                         netif_carrier_on(tp->dev);
1745                 else
1746                         netif_carrier_off(tp->dev);
1747                 tg3_link_report(tp);
1748         }
1749
1750         return 0;
1751 }
1752
1753 struct tg3_fiber_aneginfo {
1754         int state;
1755 #define ANEG_STATE_UNKNOWN              0
1756 #define ANEG_STATE_AN_ENABLE            1
1757 #define ANEG_STATE_RESTART_INIT         2
1758 #define ANEG_STATE_RESTART              3
1759 #define ANEG_STATE_DISABLE_LINK_OK      4
1760 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1761 #define ANEG_STATE_ABILITY_DETECT       6
1762 #define ANEG_STATE_ACK_DETECT_INIT      7
1763 #define ANEG_STATE_ACK_DETECT           8
1764 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1765 #define ANEG_STATE_COMPLETE_ACK         10
1766 #define ANEG_STATE_IDLE_DETECT_INIT     11
1767 #define ANEG_STATE_IDLE_DETECT          12
1768 #define ANEG_STATE_LINK_OK              13
1769 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1770 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1771
1772         u32 flags;
1773 #define MR_AN_ENABLE            0x00000001
1774 #define MR_RESTART_AN           0x00000002
1775 #define MR_AN_COMPLETE          0x00000004
1776 #define MR_PAGE_RX              0x00000008
1777 #define MR_NP_LOADED            0x00000010
1778 #define MR_TOGGLE_TX            0x00000020
1779 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1780 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1781 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1782 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1783 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1784 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1785 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1786 #define MR_TOGGLE_RX            0x00002000
1787 #define MR_NP_RX                0x00004000
1788
1789 #define MR_LINK_OK              0x80000000
1790
1791         unsigned long link_time, cur_time;
1792
1793         u32 ability_match_cfg;
1794         int ability_match_count;
1795
1796         char ability_match, idle_match, ack_match;
1797
1798         u32 txconfig, rxconfig;
1799 #define ANEG_CFG_NP             0x00000080
1800 #define ANEG_CFG_ACK            0x00000040
1801 #define ANEG_CFG_RF2            0x00000020
1802 #define ANEG_CFG_RF1            0x00000010
1803 #define ANEG_CFG_PS2            0x00000001
1804 #define ANEG_CFG_PS1            0x00008000
1805 #define ANEG_CFG_HD             0x00004000
1806 #define ANEG_CFG_FD             0x00002000
1807 #define ANEG_CFG_INVAL          0x00001f06
1808
1809 };
1810 #define ANEG_OK         0
1811 #define ANEG_DONE       1
1812 #define ANEG_TIMER_ENAB 2
1813 #define ANEG_FAILED     -1
1814
1815 #define ANEG_STATE_SETTLE_TIME  10000
1816
1817 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1818                                    struct tg3_fiber_aneginfo *ap)
1819 {
1820         unsigned long delta;
1821         u32 rx_cfg_reg;
1822         int ret;
1823
1824         if (ap->state == ANEG_STATE_UNKNOWN) {
1825                 ap->rxconfig = 0;
1826                 ap->link_time = 0;
1827                 ap->cur_time = 0;
1828                 ap->ability_match_cfg = 0;
1829                 ap->ability_match_count = 0;
1830                 ap->ability_match = 0;
1831                 ap->idle_match = 0;
1832                 ap->ack_match = 0;
1833         }
1834         ap->cur_time++;
1835
1836         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1837                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1838
1839                 if (rx_cfg_reg != ap->ability_match_cfg) {
1840                         ap->ability_match_cfg = rx_cfg_reg;
1841                         ap->ability_match = 0;
1842                         ap->ability_match_count = 0;
1843                 } else {
1844                         if (++ap->ability_match_count > 1) {
1845                                 ap->ability_match = 1;
1846                                 ap->ability_match_cfg = rx_cfg_reg;
1847                         }
1848                 }
1849                 if (rx_cfg_reg & ANEG_CFG_ACK)
1850                         ap->ack_match = 1;
1851                 else
1852                         ap->ack_match = 0;
1853
1854                 ap->idle_match = 0;
1855         } else {
1856                 ap->idle_match = 1;
1857                 ap->ability_match_cfg = 0;
1858                 ap->ability_match_count = 0;
1859                 ap->ability_match = 0;
1860                 ap->ack_match = 0;
1861
1862                 rx_cfg_reg = 0;
1863         }
1864
1865         ap->rxconfig = rx_cfg_reg;
1866         ret = ANEG_OK;
1867
1868         switch(ap->state) {
1869         case ANEG_STATE_UNKNOWN:
1870                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1871                         ap->state = ANEG_STATE_AN_ENABLE;
1872
1873                 /* fallthru */
1874         case ANEG_STATE_AN_ENABLE:
1875                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1876                 if (ap->flags & MR_AN_ENABLE) {
1877                         ap->link_time = 0;
1878                         ap->cur_time = 0;
1879                         ap->ability_match_cfg = 0;
1880                         ap->ability_match_count = 0;
1881                         ap->ability_match = 0;
1882                         ap->idle_match = 0;
1883                         ap->ack_match = 0;
1884
1885                         ap->state = ANEG_STATE_RESTART_INIT;
1886                 } else {
1887                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1888                 }
1889                 break;
1890
1891         case ANEG_STATE_RESTART_INIT:
1892                 ap->link_time = ap->cur_time;
1893                 ap->flags &= ~(MR_NP_LOADED);
1894                 ap->txconfig = 0;
1895                 tw32(MAC_TX_AUTO_NEG, 0);
1896                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1897                 tw32_f(MAC_MODE, tp->mac_mode);
1898                 udelay(40);
1899
1900                 ret = ANEG_TIMER_ENAB;
1901                 ap->state = ANEG_STATE_RESTART;
1902
1903                 /* fallthru */
1904         case ANEG_STATE_RESTART:
1905                 delta = ap->cur_time - ap->link_time;
1906                 if (delta > ANEG_STATE_SETTLE_TIME) {
1907                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1908                 } else {
1909                         ret = ANEG_TIMER_ENAB;
1910                 }
1911                 break;
1912
1913         case ANEG_STATE_DISABLE_LINK_OK:
1914                 ret = ANEG_DONE;
1915                 break;
1916
1917         case ANEG_STATE_ABILITY_DETECT_INIT:
1918                 ap->flags &= ~(MR_TOGGLE_TX);
1919                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1920                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1921                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1922                 tw32_f(MAC_MODE, tp->mac_mode);
1923                 udelay(40);
1924
1925                 ap->state = ANEG_STATE_ABILITY_DETECT;
1926                 break;
1927
1928         case ANEG_STATE_ABILITY_DETECT:
1929                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1930                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1931                 }
1932                 break;
1933
1934         case ANEG_STATE_ACK_DETECT_INIT:
1935                 ap->txconfig |= ANEG_CFG_ACK;
1936                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1937                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1938                 tw32_f(MAC_MODE, tp->mac_mode);
1939                 udelay(40);
1940
1941                 ap->state = ANEG_STATE_ACK_DETECT;
1942
1943                 /* fallthru */
1944         case ANEG_STATE_ACK_DETECT:
1945                 if (ap->ack_match != 0) {
1946                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1947                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1948                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1949                         } else {
1950                                 ap->state = ANEG_STATE_AN_ENABLE;
1951                         }
1952                 } else if (ap->ability_match != 0 &&
1953                            ap->rxconfig == 0) {
1954                         ap->state = ANEG_STATE_AN_ENABLE;
1955                 }
1956                 break;
1957
1958         case ANEG_STATE_COMPLETE_ACK_INIT:
1959                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1960                         ret = ANEG_FAILED;
1961                         break;
1962                 }
1963                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1964                                MR_LP_ADV_HALF_DUPLEX |
1965                                MR_LP_ADV_SYM_PAUSE |
1966                                MR_LP_ADV_ASYM_PAUSE |
1967                                MR_LP_ADV_REMOTE_FAULT1 |
1968                                MR_LP_ADV_REMOTE_FAULT2 |
1969                                MR_LP_ADV_NEXT_PAGE |
1970                                MR_TOGGLE_RX |
1971                                MR_NP_RX);
1972                 if (ap->rxconfig & ANEG_CFG_FD)
1973                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1974                 if (ap->rxconfig & ANEG_CFG_HD)
1975                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1976                 if (ap->rxconfig & ANEG_CFG_PS1)
1977                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1978                 if (ap->rxconfig & ANEG_CFG_PS2)
1979                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1980                 if (ap->rxconfig & ANEG_CFG_RF1)
1981                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1982                 if (ap->rxconfig & ANEG_CFG_RF2)
1983                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1984                 if (ap->rxconfig & ANEG_CFG_NP)
1985                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1986
1987                 ap->link_time = ap->cur_time;
1988
1989                 ap->flags ^= (MR_TOGGLE_TX);
1990                 if (ap->rxconfig & 0x0008)
1991                         ap->flags |= MR_TOGGLE_RX;
1992                 if (ap->rxconfig & ANEG_CFG_NP)
1993                         ap->flags |= MR_NP_RX;
1994                 ap->flags |= MR_PAGE_RX;
1995
1996                 ap->state = ANEG_STATE_COMPLETE_ACK;
1997                 ret = ANEG_TIMER_ENAB;
1998                 break;
1999
2000         case ANEG_STATE_COMPLETE_ACK:
2001                 if (ap->ability_match != 0 &&
2002                     ap->rxconfig == 0) {
2003                         ap->state = ANEG_STATE_AN_ENABLE;
2004                         break;
2005                 }
2006                 delta = ap->cur_time - ap->link_time;
2007                 if (delta > ANEG_STATE_SETTLE_TIME) {
2008                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2009                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2010                         } else {
2011                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2012                                     !(ap->flags & MR_NP_RX)) {
2013                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2014                                 } else {
2015                                         ret = ANEG_FAILED;
2016                                 }
2017                         }
2018                 }
2019                 break;
2020
2021         case ANEG_STATE_IDLE_DETECT_INIT:
2022                 ap->link_time = ap->cur_time;
2023                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2024                 tw32_f(MAC_MODE, tp->mac_mode);
2025                 udelay(40);
2026
2027                 ap->state = ANEG_STATE_IDLE_DETECT;
2028                 ret = ANEG_TIMER_ENAB;
2029                 break;
2030
2031         case ANEG_STATE_IDLE_DETECT:
2032                 if (ap->ability_match != 0 &&
2033                     ap->rxconfig == 0) {
2034                         ap->state = ANEG_STATE_AN_ENABLE;
2035                         break;
2036                 }
2037                 delta = ap->cur_time - ap->link_time;
2038                 if (delta > ANEG_STATE_SETTLE_TIME) {
2039                         /* XXX another gem from the Broadcom driver :( */
2040                         ap->state = ANEG_STATE_LINK_OK;
2041                 }
2042                 break;
2043
2044         case ANEG_STATE_LINK_OK:
2045                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2046                 ret = ANEG_DONE;
2047                 break;
2048
2049         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2050                 /* ??? unimplemented */
2051                 break;
2052
2053         case ANEG_STATE_NEXT_PAGE_WAIT:
2054                 /* ??? unimplemented */
2055                 break;
2056
2057         default:
2058                 ret = ANEG_FAILED;
2059                 break;
2060         };
2061
2062         return ret;
2063 }
2064
2065 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2066 {
2067         int res = 0;
2068         struct tg3_fiber_aneginfo aninfo;
2069         int status = ANEG_FAILED;
2070         unsigned int tick;
2071         u32 tmp;
2072
2073         tw32_f(MAC_TX_AUTO_NEG, 0);
2074
2075         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2076         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2077         udelay(40);
2078
2079         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2080         udelay(40);
2081
2082         memset(&aninfo, 0, sizeof(aninfo));
2083         aninfo.flags |= MR_AN_ENABLE;
2084         aninfo.state = ANEG_STATE_UNKNOWN;
2085         aninfo.cur_time = 0;
2086         tick = 0;
2087         while (++tick < 195000) {
2088                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2089                 if (status == ANEG_DONE || status == ANEG_FAILED)
2090                         break;
2091
2092                 udelay(1);
2093         }
2094
2095         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2096         tw32_f(MAC_MODE, tp->mac_mode);
2097         udelay(40);
2098
2099         *flags = aninfo.flags;
2100
2101         if (status == ANEG_DONE &&
2102             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2103                              MR_LP_ADV_FULL_DUPLEX)))
2104                 res = 1;
2105
2106         return res;
2107 }
2108
2109 static void tg3_init_bcm8002(struct tg3 *tp)
2110 {
2111         u32 mac_status = tr32(MAC_STATUS);
2112         int i;
2113
2114         /* Reset when initting first time or we have a link. */
2115         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2116             !(mac_status & MAC_STATUS_PCS_SYNCED))
2117                 return;
2118
2119         /* Set PLL lock range. */
2120         tg3_writephy(tp, 0x16, 0x8007);
2121
2122         /* SW reset */
2123         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2124
2125         /* Wait for reset to complete. */
2126         /* XXX schedule_timeout() ... */
2127         for (i = 0; i < 500; i++)
2128                 udelay(10);
2129
2130         /* Config mode; select PMA/Ch 1 regs. */
2131         tg3_writephy(tp, 0x10, 0x8411);
2132
2133         /* Enable auto-lock and comdet, select txclk for tx. */
2134         tg3_writephy(tp, 0x11, 0x0a10);
2135
2136         tg3_writephy(tp, 0x18, 0x00a0);
2137         tg3_writephy(tp, 0x16, 0x41ff);
2138
2139         /* Assert and deassert POR. */
2140         tg3_writephy(tp, 0x13, 0x0400);
2141         udelay(40);
2142         tg3_writephy(tp, 0x13, 0x0000);
2143
2144         tg3_writephy(tp, 0x11, 0x0a50);
2145         udelay(40);
2146         tg3_writephy(tp, 0x11, 0x0a10);
2147
2148         /* Wait for signal to stabilize */
2149         /* XXX schedule_timeout() ... */
2150         for (i = 0; i < 15000; i++)
2151                 udelay(10);
2152
2153         /* Deselect the channel register so we can read the PHYID
2154          * later.
2155          */
2156         tg3_writephy(tp, 0x10, 0x8011);
2157 }
2158
2159 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2160 {
2161         u32 sg_dig_ctrl, sg_dig_status;
2162         u32 serdes_cfg, expected_sg_dig_ctrl;
2163         int workaround, port_a;
2164         int current_link_up;
2165
2166         serdes_cfg = 0;
2167         expected_sg_dig_ctrl = 0;
2168         workaround = 0;
2169         port_a = 1;
2170         current_link_up = 0;
2171
2172         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2173             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2174                 workaround = 1;
2175                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2176                         port_a = 0;
2177
2178                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2179                 /* preserve bits 20-23 for voltage regulator */
2180                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2181         }
2182
2183         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2184
2185         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2186                 if (sg_dig_ctrl & (1 << 31)) {
2187                         if (workaround) {
2188                                 u32 val = serdes_cfg;
2189
2190                                 if (port_a)
2191                                         val |= 0xc010000;
2192                                 else
2193                                         val |= 0x4010000;
2194                                 tw32_f(MAC_SERDES_CFG, val);
2195                         }
2196                         tw32_f(SG_DIG_CTRL, 0x01388400);
2197                 }
2198                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2199                         tg3_setup_flow_control(tp, 0, 0);
2200                         current_link_up = 1;
2201                 }
2202                 goto out;
2203         }
2204
2205         /* Want auto-negotiation.  */
2206         expected_sg_dig_ctrl = 0x81388400;
2207
2208         /* Pause capability */
2209         expected_sg_dig_ctrl |= (1 << 11);
2210
2211         /* Asymettric pause */
2212         expected_sg_dig_ctrl |= (1 << 12);
2213
2214         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2215                 if (workaround)
2216                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2217                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2218                 udelay(5);
2219                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2220
2221                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2222         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2223                                  MAC_STATUS_SIGNAL_DET)) {
2224                 int i;
2225
2226                 /* Giver time to negotiate (~200ms) */
2227                 for (i = 0; i < 40000; i++) {
2228                         sg_dig_status = tr32(SG_DIG_STATUS);
2229                         if (sg_dig_status & (0x3))
2230                                 break;
2231                         udelay(5);
2232                 }
2233                 mac_status = tr32(MAC_STATUS);
2234
2235                 if ((sg_dig_status & (1 << 1)) &&
2236                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2237                         u32 local_adv, remote_adv;
2238
2239                         local_adv = ADVERTISE_PAUSE_CAP;
2240                         remote_adv = 0;
2241                         if (sg_dig_status & (1 << 19))
2242                                 remote_adv |= LPA_PAUSE_CAP;
2243                         if (sg_dig_status & (1 << 20))
2244                                 remote_adv |= LPA_PAUSE_ASYM;
2245
2246                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2247                         current_link_up = 1;
2248                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2249                 } else if (!(sg_dig_status & (1 << 1))) {
2250                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2251                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2252                         else {
2253                                 if (workaround) {
2254                                         u32 val = serdes_cfg;
2255
2256                                         if (port_a)
2257                                                 val |= 0xc010000;
2258                                         else
2259                                                 val |= 0x4010000;
2260
2261                                         tw32_f(MAC_SERDES_CFG, val);
2262                                 }
2263
2264                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2265                                 udelay(40);
2266
2267                                 /* Link parallel detection - link is up */
2268                                 /* only if we have PCS_SYNC and not */
2269                                 /* receiving config code words */
2270                                 mac_status = tr32(MAC_STATUS);
2271                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2272                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2273                                         tg3_setup_flow_control(tp, 0, 0);
2274                                         current_link_up = 1;
2275                                 }
2276                         }
2277                 }
2278         }
2279
2280 out:
2281         return current_link_up;
2282 }
2283
2284 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2285 {
2286         int current_link_up = 0;
2287
2288         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2289                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2290                 goto out;
2291         }
2292
2293         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2294                 u32 flags;
2295                 int i;
2296   
2297                 if (fiber_autoneg(tp, &flags)) {
2298                         u32 local_adv, remote_adv;
2299
2300                         local_adv = ADVERTISE_PAUSE_CAP;
2301                         remote_adv = 0;
2302                         if (flags & MR_LP_ADV_SYM_PAUSE)
2303                                 remote_adv |= LPA_PAUSE_CAP;
2304                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2305                                 remote_adv |= LPA_PAUSE_ASYM;
2306
2307                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2308
2309                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2310                         current_link_up = 1;
2311                 }
2312                 for (i = 0; i < 30; i++) {
2313                         udelay(20);
2314                         tw32_f(MAC_STATUS,
2315                                (MAC_STATUS_SYNC_CHANGED |
2316                                 MAC_STATUS_CFG_CHANGED));
2317                         udelay(40);
2318                         if ((tr32(MAC_STATUS) &
2319                              (MAC_STATUS_SYNC_CHANGED |
2320                               MAC_STATUS_CFG_CHANGED)) == 0)
2321                                 break;
2322                 }
2323
2324                 mac_status = tr32(MAC_STATUS);
2325                 if (current_link_up == 0 &&
2326                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2327                     !(mac_status & MAC_STATUS_RCVD_CFG))
2328                         current_link_up = 1;
2329         } else {
2330                 /* Forcing 1000FD link up. */
2331                 current_link_up = 1;
2332                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2333
2334                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2335                 udelay(40);
2336         }
2337
2338 out:
2339         return current_link_up;
2340 }
2341
2342 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2343 {
2344         u32 orig_pause_cfg;
2345         u16 orig_active_speed;
2346         u8 orig_active_duplex;
2347         u32 mac_status;
2348         int current_link_up;
2349         int i;
2350
2351         orig_pause_cfg =
2352                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2353                                   TG3_FLAG_TX_PAUSE));
2354         orig_active_speed = tp->link_config.active_speed;
2355         orig_active_duplex = tp->link_config.active_duplex;
2356
2357         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2358             netif_carrier_ok(tp->dev) &&
2359             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2360                 mac_status = tr32(MAC_STATUS);
2361                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2362                                MAC_STATUS_SIGNAL_DET |
2363                                MAC_STATUS_CFG_CHANGED |
2364                                MAC_STATUS_RCVD_CFG);
2365                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2366                                    MAC_STATUS_SIGNAL_DET)) {
2367                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2368                                             MAC_STATUS_CFG_CHANGED));
2369                         return 0;
2370                 }
2371         }
2372
2373         tw32_f(MAC_TX_AUTO_NEG, 0);
2374
2375         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2376         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2377         tw32_f(MAC_MODE, tp->mac_mode);
2378         udelay(40);
2379
2380         if (tp->phy_id == PHY_ID_BCM8002)
2381                 tg3_init_bcm8002(tp);
2382
2383         /* Enable link change event even when serdes polling.  */
2384         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2385         udelay(40);
2386
2387         current_link_up = 0;
2388         mac_status = tr32(MAC_STATUS);
2389
2390         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2391                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2392         else
2393                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2394
2395         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2396         tw32_f(MAC_MODE, tp->mac_mode);
2397         udelay(40);
2398
2399         tp->hw_status->status =
2400                 (SD_STATUS_UPDATED |
2401                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2402
2403         for (i = 0; i < 100; i++) {
2404                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2405                                     MAC_STATUS_CFG_CHANGED));
2406                 udelay(5);
2407                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2408                                          MAC_STATUS_CFG_CHANGED)) == 0)
2409                         break;
2410         }
2411
2412         mac_status = tr32(MAC_STATUS);
2413         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2414                 current_link_up = 0;
2415                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2416                         tw32_f(MAC_MODE, (tp->mac_mode |
2417                                           MAC_MODE_SEND_CONFIGS));
2418                         udelay(1);
2419                         tw32_f(MAC_MODE, tp->mac_mode);
2420                 }
2421         }
2422
2423         if (current_link_up == 1) {
2424                 tp->link_config.active_speed = SPEED_1000;
2425                 tp->link_config.active_duplex = DUPLEX_FULL;
2426                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2427                                     LED_CTRL_LNKLED_OVERRIDE |
2428                                     LED_CTRL_1000MBPS_ON));
2429         } else {
2430                 tp->link_config.active_speed = SPEED_INVALID;
2431                 tp->link_config.active_duplex = DUPLEX_INVALID;
2432                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2433                                     LED_CTRL_LNKLED_OVERRIDE |
2434                                     LED_CTRL_TRAFFIC_OVERRIDE));
2435         }
2436
2437         if (current_link_up != netif_carrier_ok(tp->dev)) {
2438                 if (current_link_up)
2439                         netif_carrier_on(tp->dev);
2440                 else
2441                         netif_carrier_off(tp->dev);
2442                 tg3_link_report(tp);
2443         } else {
2444                 u32 now_pause_cfg =
2445                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2446                                          TG3_FLAG_TX_PAUSE);
2447                 if (orig_pause_cfg != now_pause_cfg ||
2448                     orig_active_speed != tp->link_config.active_speed ||
2449                     orig_active_duplex != tp->link_config.active_duplex)
2450                         tg3_link_report(tp);
2451         }
2452
2453         return 0;
2454 }
2455
2456 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2457 {
2458         int err;
2459
2460         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2461                 err = tg3_setup_fiber_phy(tp, force_reset);
2462         } else {
2463                 err = tg3_setup_copper_phy(tp, force_reset);
2464         }
2465
2466         if (tp->link_config.active_speed == SPEED_1000 &&
2467             tp->link_config.active_duplex == DUPLEX_HALF)
2468                 tw32(MAC_TX_LENGTHS,
2469                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2470                       (6 << TX_LENGTHS_IPG_SHIFT) |
2471                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2472         else
2473                 tw32(MAC_TX_LENGTHS,
2474                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2475                       (6 << TX_LENGTHS_IPG_SHIFT) |
2476                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2477
2478         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2479                 if (netif_carrier_ok(tp->dev)) {
2480                         tw32(HOSTCC_STAT_COAL_TICKS,
2481                              DEFAULT_STAT_COAL_TICKS);
2482                 } else {
2483                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2484                 }
2485         }
2486
2487         return err;
2488 }
2489
2490 /* Tigon3 never reports partial packet sends.  So we do not
2491  * need special logic to handle SKBs that have not had all
2492  * of their frags sent yet, like SunGEM does.
2493  */
2494 static void tg3_tx(struct tg3 *tp)
2495 {
2496         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2497         u32 sw_idx = tp->tx_cons;
2498
2499         while (sw_idx != hw_idx) {
2500                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2501                 struct sk_buff *skb = ri->skb;
2502                 int i;
2503
2504                 if (unlikely(skb == NULL))
2505                         BUG();
2506
2507                 pci_unmap_single(tp->pdev,
2508                                  pci_unmap_addr(ri, mapping),
2509                                  skb_headlen(skb),
2510                                  PCI_DMA_TODEVICE);
2511
2512                 ri->skb = NULL;
2513
2514                 sw_idx = NEXT_TX(sw_idx);
2515
2516                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2517                         if (unlikely(sw_idx == hw_idx))
2518                                 BUG();
2519
2520                         ri = &tp->tx_buffers[sw_idx];
2521                         if (unlikely(ri->skb != NULL))
2522                                 BUG();
2523
2524                         pci_unmap_page(tp->pdev,
2525                                        pci_unmap_addr(ri, mapping),
2526                                        skb_shinfo(skb)->frags[i].size,
2527                                        PCI_DMA_TODEVICE);
2528
2529                         sw_idx = NEXT_TX(sw_idx);
2530                 }
2531
2532                 dev_kfree_skb_irq(skb);
2533         }
2534
2535         tp->tx_cons = sw_idx;
2536
2537         if (netif_queue_stopped(tp->dev) &&
2538             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2539                 netif_wake_queue(tp->dev);
2540 }
2541
2542 /* Returns size of skb allocated or < 0 on error.
2543  *
2544  * We only need to fill in the address because the other members
2545  * of the RX descriptor are invariant, see tg3_init_rings.
2546  *
2547  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2548  * posting buffers we only dirty the first cache line of the RX
2549  * descriptor (containing the address).  Whereas for the RX status
2550  * buffers the cpu only reads the last cacheline of the RX descriptor
2551  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2552  */
2553 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2554                             int src_idx, u32 dest_idx_unmasked)
2555 {
2556         struct tg3_rx_buffer_desc *desc;
2557         struct ring_info *map, *src_map;
2558         struct sk_buff *skb;
2559         dma_addr_t mapping;
2560         int skb_size, dest_idx;
2561
2562         src_map = NULL;
2563         switch (opaque_key) {
2564         case RXD_OPAQUE_RING_STD:
2565                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2566                 desc = &tp->rx_std[dest_idx];
2567                 map = &tp->rx_std_buffers[dest_idx];
2568                 if (src_idx >= 0)
2569                         src_map = &tp->rx_std_buffers[src_idx];
2570                 skb_size = RX_PKT_BUF_SZ;
2571                 break;
2572
2573         case RXD_OPAQUE_RING_JUMBO:
2574                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2575                 desc = &tp->rx_jumbo[dest_idx];
2576                 map = &tp->rx_jumbo_buffers[dest_idx];
2577                 if (src_idx >= 0)
2578                         src_map = &tp->rx_jumbo_buffers[src_idx];
2579                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2580                 break;
2581
2582         default:
2583                 return -EINVAL;
2584         };
2585
2586         /* Do not overwrite any of the map or rp information
2587          * until we are sure we can commit to a new buffer.
2588          *
2589          * Callers depend upon this behavior and assume that
2590          * we leave everything unchanged if we fail.
2591          */
2592         skb = dev_alloc_skb(skb_size);
2593         if (skb == NULL)
2594                 return -ENOMEM;
2595
2596         skb->dev = tp->dev;
2597         skb_reserve(skb, tp->rx_offset);
2598
2599         mapping = pci_map_single(tp->pdev, skb->data,
2600                                  skb_size - tp->rx_offset,
2601                                  PCI_DMA_FROMDEVICE);
2602
2603         map->skb = skb;
2604         pci_unmap_addr_set(map, mapping, mapping);
2605
2606         if (src_map != NULL)
2607                 src_map->skb = NULL;
2608
2609         desc->addr_hi = ((u64)mapping >> 32);
2610         desc->addr_lo = ((u64)mapping & 0xffffffff);
2611
2612         return skb_size;
2613 }
2614
2615 /* We only need to move over in the address because the other
2616  * members of the RX descriptor are invariant.  See notes above
2617  * tg3_alloc_rx_skb for full details.
2618  */
2619 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2620                            int src_idx, u32 dest_idx_unmasked)
2621 {
2622         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2623         struct ring_info *src_map, *dest_map;
2624         int dest_idx;
2625
2626         switch (opaque_key) {
2627         case RXD_OPAQUE_RING_STD:
2628                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2629                 dest_desc = &tp->rx_std[dest_idx];
2630                 dest_map = &tp->rx_std_buffers[dest_idx];
2631                 src_desc = &tp->rx_std[src_idx];
2632                 src_map = &tp->rx_std_buffers[src_idx];
2633                 break;
2634
2635         case RXD_OPAQUE_RING_JUMBO:
2636                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2637                 dest_desc = &tp->rx_jumbo[dest_idx];
2638                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2639                 src_desc = &tp->rx_jumbo[src_idx];
2640                 src_map = &tp->rx_jumbo_buffers[src_idx];
2641                 break;
2642
2643         default:
2644                 return;
2645         };
2646
2647         dest_map->skb = src_map->skb;
2648         pci_unmap_addr_set(dest_map, mapping,
2649                            pci_unmap_addr(src_map, mapping));
2650         dest_desc->addr_hi = src_desc->addr_hi;
2651         dest_desc->addr_lo = src_desc->addr_lo;
2652
2653         src_map->skb = NULL;
2654 }
2655
2656 #if TG3_VLAN_TAG_USED
2657 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2658 {
2659         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2660 }
2661 #endif
2662
2663 /* The RX ring scheme is composed of multiple rings which post fresh
2664  * buffers to the chip, and one special ring the chip uses to report
2665  * status back to the host.
2666  *
2667  * The special ring reports the status of received packets to the
2668  * host.  The chip does not write into the original descriptor the
2669  * RX buffer was obtained from.  The chip simply takes the original
2670  * descriptor as provided by the host, updates the status and length
2671  * field, then writes this into the next status ring entry.
2672  *
2673  * Each ring the host uses to post buffers to the chip is described
2674  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2675  * it is first placed into the on-chip ram.  When the packet's length
2676  * is known, it walks down the TG3_BDINFO entries to select the ring.
2677  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2678  * which is within the range of the new packet's length is chosen.
2679  *
2680  * The "separate ring for rx status" scheme may sound queer, but it makes
2681  * sense from a cache coherency perspective.  If only the host writes
2682  * to the buffer post rings, and only the chip writes to the rx status
2683  * rings, then cache lines never move beyond shared-modified state.
2684  * If both the host and chip were to write into the same ring, cache line
2685  * eviction could occur since both entities want it in an exclusive state.
2686  */
2687 static int tg3_rx(struct tg3 *tp, int budget)
2688 {
2689         u32 work_mask;
2690         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2691         u16 hw_idx, sw_idx;
2692         int received;
2693
2694         hw_idx = tp->hw_status->idx[0].rx_producer;
2695         /*
2696          * We need to order the read of hw_idx and the read of
2697          * the opaque cookie.
2698          */
2699         rmb();
2700         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2701         work_mask = 0;
2702         received = 0;
2703         while (sw_idx != hw_idx && budget > 0) {
2704                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2705                 unsigned int len;
2706                 struct sk_buff *skb;
2707                 dma_addr_t dma_addr;
2708                 u32 opaque_key, desc_idx, *post_ptr;
2709
2710                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2711                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2712                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2713                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2714                                                   mapping);
2715                         skb = tp->rx_std_buffers[desc_idx].skb;
2716                         post_ptr = &tp->rx_std_ptr;
2717                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2718                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2719                                                   mapping);
2720                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2721                         post_ptr = &tp->rx_jumbo_ptr;
2722                 }
2723                 else {
2724                         goto next_pkt_nopost;
2725                 }
2726
2727                 work_mask |= opaque_key;
2728
2729                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2730                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2731                 drop_it:
2732                         tg3_recycle_rx(tp, opaque_key,
2733                                        desc_idx, *post_ptr);
2734                 drop_it_no_recycle:
2735                         /* Other statistics kept track of by card. */
2736                         tp->net_stats.rx_dropped++;
2737                         goto next_pkt;
2738                 }
2739
2740                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2741
2742                 if (len > RX_COPY_THRESHOLD 
2743                         && tp->rx_offset == 2
2744                         /* rx_offset != 2 iff this is a 5701 card running
2745                          * in PCI-X mode [see tg3_get_invariants()] */
2746                 ) {
2747                         int skb_size;
2748
2749                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2750                                                     desc_idx, *post_ptr);
2751                         if (skb_size < 0)
2752                                 goto drop_it;
2753
2754                         pci_unmap_single(tp->pdev, dma_addr,
2755                                          skb_size - tp->rx_offset,
2756                                          PCI_DMA_FROMDEVICE);
2757
2758                         skb_put(skb, len);
2759                 } else {
2760                         struct sk_buff *copy_skb;
2761
2762                         tg3_recycle_rx(tp, opaque_key,
2763                                        desc_idx, *post_ptr);
2764
2765                         copy_skb = dev_alloc_skb(len + 2);
2766                         if (copy_skb == NULL)
2767                                 goto drop_it_no_recycle;
2768
2769                         copy_skb->dev = tp->dev;
2770                         skb_reserve(copy_skb, 2);
2771                         skb_put(copy_skb, len);
2772                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2773                         memcpy(copy_skb->data, skb->data, len);
2774                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2775
2776                         /* We'll reuse the original ring buffer. */
2777                         skb = copy_skb;
2778                 }
2779
2780                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2781                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2782                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2783                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2784                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2785                 else
2786                         skb->ip_summed = CHECKSUM_NONE;
2787
2788                 skb->protocol = eth_type_trans(skb, tp->dev);
2789 #if TG3_VLAN_TAG_USED
2790                 if (tp->vlgrp != NULL &&
2791                     desc->type_flags & RXD_FLAG_VLAN) {
2792                         tg3_vlan_rx(tp, skb,
2793                                     desc->err_vlan & RXD_VLAN_MASK);
2794                 } else
2795 #endif
2796                         netif_receive_skb(skb);
2797
2798                 tp->dev->last_rx = jiffies;
2799                 received++;
2800                 budget--;
2801
2802 next_pkt:
2803                 (*post_ptr)++;
2804 next_pkt_nopost:
2805                 rx_rcb_ptr++;
2806                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2807         }
2808
2809         /* ACK the status ring. */
2810         tp->rx_rcb_ptr = rx_rcb_ptr;
2811         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2812                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2813
2814         /* Refill RX ring(s). */
2815         if (work_mask & RXD_OPAQUE_RING_STD) {
2816                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2817                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2818                              sw_idx);
2819         }
2820         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2821                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2822                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2823                              sw_idx);
2824         }
2825         mmiowb();
2826
2827         return received;
2828 }
2829
2830 static int tg3_poll(struct net_device *netdev, int *budget)
2831 {
2832         struct tg3 *tp = netdev_priv(netdev);
2833         struct tg3_hw_status *sblk = tp->hw_status;
2834         unsigned long flags;
2835         int done;
2836
2837         spin_lock_irqsave(&tp->lock, flags);
2838
2839         /* handle link change and other phy events */
2840         if (!(tp->tg3_flags &
2841               (TG3_FLAG_USE_LINKCHG_REG |
2842                TG3_FLAG_POLL_SERDES))) {
2843                 if (sblk->status & SD_STATUS_LINK_CHG) {
2844                         sblk->status = SD_STATUS_UPDATED |
2845                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2846                         tg3_setup_phy(tp, 0);
2847                 }
2848         }
2849
2850         /* run TX completion thread */
2851         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2852                 spin_lock(&tp->tx_lock);
2853                 tg3_tx(tp);
2854                 spin_unlock(&tp->tx_lock);
2855         }
2856
2857         spin_unlock_irqrestore(&tp->lock, flags);
2858
2859         /* run RX thread, within the bounds set by NAPI.
2860          * All RX "locking" is done by ensuring outside
2861          * code synchronizes with dev->poll()
2862          */
2863         done = 1;
2864         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2865                 int orig_budget = *budget;
2866                 int work_done;
2867
2868                 if (orig_budget > netdev->quota)
2869                         orig_budget = netdev->quota;
2870
2871                 work_done = tg3_rx(tp, orig_budget);
2872
2873                 *budget -= work_done;
2874                 netdev->quota -= work_done;
2875
2876                 if (work_done >= orig_budget)
2877                         done = 0;
2878         }
2879
2880         /* if no more work, tell net stack and NIC we're done */
2881         if (done) {
2882                 spin_lock_irqsave(&tp->lock, flags);
2883                 __netif_rx_complete(netdev);
2884                 tg3_restart_ints(tp);
2885                 spin_unlock_irqrestore(&tp->lock, flags);
2886         }
2887
2888         return (done ? 0 : 1);
2889 }
2890
2891 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2892 {
2893         struct tg3_hw_status *sblk = tp->hw_status;
2894         unsigned int work_exists = 0;
2895
2896         /* check for phy events */
2897         if (!(tp->tg3_flags &
2898               (TG3_FLAG_USE_LINKCHG_REG |
2899                TG3_FLAG_POLL_SERDES))) {
2900                 if (sblk->status & SD_STATUS_LINK_CHG)
2901                         work_exists = 1;
2902         }
2903         /* check for RX/TX work to do */
2904         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2905             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2906                 work_exists = 1;
2907
2908         return work_exists;
2909 }
2910
2911 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2912 {
2913         struct net_device *dev = dev_id;
2914         struct tg3 *tp = netdev_priv(dev);
2915         struct tg3_hw_status *sblk = tp->hw_status;
2916         unsigned long flags;
2917         unsigned int handled = 1;
2918
2919         spin_lock_irqsave(&tp->lock, flags);
2920
2921         /* In INTx mode, it is possible for the interrupt to arrive at
2922          * the CPU before the status block posted prior to the interrupt.
2923          * Reading the PCI State register will confirm whether the
2924          * interrupt is ours and will flush the status block.
2925          */
2926         if ((sblk->status & SD_STATUS_UPDATED) ||
2927             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2928                 /*
2929                  * writing any value to intr-mbox-0 clears PCI INTA# and
2930                  * chip-internal interrupt pending events.
2931                  * writing non-zero to intr-mbox-0 additional tells the
2932                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2933                  * event coalescing.
2934                  */
2935                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2936                              0x00000001);
2937                 /*
2938                  * Flush PCI write.  This also guarantees that our
2939                  * status block has been flushed to host memory.
2940                  */
2941                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2942                 sblk->status &= ~SD_STATUS_UPDATED;
2943
2944                 if (likely(tg3_has_work(dev, tp)))
2945                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2946                 else {
2947                         /* no work, shared interrupt perhaps?  re-enable
2948                          * interrupts, and flush that PCI write
2949                          */
2950                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2951                                 0x00000000);
2952                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2953                 }
2954         } else {        /* shared interrupt */
2955                 handled = 0;
2956         }
2957
2958         spin_unlock_irqrestore(&tp->lock, flags);
2959
2960         return IRQ_RETVAL(handled);
2961 }
2962
2963 static int tg3_init_hw(struct tg3 *);
2964 static int tg3_halt(struct tg3 *);
2965
2966 #ifdef CONFIG_NET_POLL_CONTROLLER
2967 static void tg3_poll_controller(struct net_device *dev)
2968 {
2969         tg3_interrupt(dev->irq, dev, NULL);
2970 }
2971 #endif
2972
2973 static void tg3_reset_task(void *_data)
2974 {
2975         struct tg3 *tp = _data;
2976         unsigned int restart_timer;
2977
2978         tg3_netif_stop(tp);
2979
2980         spin_lock_irq(&tp->lock);
2981         spin_lock(&tp->tx_lock);
2982
2983         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2984         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2985
2986         tg3_halt(tp);
2987         tg3_init_hw(tp);
2988
2989         tg3_netif_start(tp);
2990
2991         spin_unlock(&tp->tx_lock);
2992         spin_unlock_irq(&tp->lock);
2993
2994         if (restart_timer)
2995                 mod_timer(&tp->timer, jiffies + 1);
2996 }
2997
2998 static void tg3_tx_timeout(struct net_device *dev)
2999 {
3000         struct tg3 *tp = netdev_priv(dev);
3001
3002         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3003                dev->name);
3004
3005         schedule_work(&tp->reset_task);
3006 }
3007
3008 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3009
3010 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3011                                        u32 guilty_entry, int guilty_len,
3012                                        u32 last_plus_one, u32 *start, u32 mss)
3013 {
3014         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3015         dma_addr_t new_addr;
3016         u32 entry = *start;
3017         int i;
3018
3019         if (!new_skb) {
3020                 dev_kfree_skb(skb);
3021                 return -1;
3022         }
3023
3024         /* New SKB is guaranteed to be linear. */
3025         entry = *start;
3026         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3027                                   PCI_DMA_TODEVICE);
3028         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3029                     (skb->ip_summed == CHECKSUM_HW) ?
3030                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3031         *start = NEXT_TX(entry);
3032
3033         /* Now clean up the sw ring entries. */
3034         i = 0;
3035         while (entry != last_plus_one) {
3036                 int len;
3037
3038                 if (i == 0)
3039                         len = skb_headlen(skb);
3040                 else
3041                         len = skb_shinfo(skb)->frags[i-1].size;
3042                 pci_unmap_single(tp->pdev,
3043                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3044                                  len, PCI_DMA_TODEVICE);
3045                 if (i == 0) {
3046                         tp->tx_buffers[entry].skb = new_skb;
3047                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3048                 } else {
3049                         tp->tx_buffers[entry].skb = NULL;
3050                 }
3051                 entry = NEXT_TX(entry);
3052                 i++;
3053         }
3054
3055         dev_kfree_skb(skb);
3056
3057         return 0;
3058 }
3059
3060 static void tg3_set_txd(struct tg3 *tp, int entry,
3061                         dma_addr_t mapping, int len, u32 flags,
3062                         u32 mss_and_is_end)
3063 {
3064         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3065         int is_end = (mss_and_is_end & 0x1);
3066         u32 mss = (mss_and_is_end >> 1);
3067         u32 vlan_tag = 0;
3068
3069         if (is_end)
3070                 flags |= TXD_FLAG_END;
3071         if (flags & TXD_FLAG_VLAN) {
3072                 vlan_tag = flags >> 16;
3073                 flags &= 0xffff;
3074         }
3075         vlan_tag |= (mss << TXD_MSS_SHIFT);
3076
3077         txd->addr_hi = ((u64) mapping >> 32);
3078         txd->addr_lo = ((u64) mapping & 0xffffffff);
3079         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3080         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3081 }
3082
3083 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3084 {
3085         u32 base = (u32) mapping & 0xffffffff;
3086
3087         return ((base > 0xffffdcc0) &&
3088                 (base + len + 8 < base));
3089 }
3090
3091 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3092 {
3093         struct tg3 *tp = netdev_priv(dev);
3094         dma_addr_t mapping;
3095         unsigned int i;
3096         u32 len, entry, base_flags, mss;
3097         int would_hit_hwbug;
3098         unsigned long flags;
3099
3100         len = skb_headlen(skb);
3101
3102         /* No BH disabling for tx_lock here.  We are running in BH disabled
3103          * context and TX reclaim runs via tp->poll inside of a software
3104          * interrupt.  Rejoice!
3105          *
3106          * Actually, things are not so simple.  If we are to take a hw
3107          * IRQ here, we can deadlock, consider:
3108          *
3109          *       CPU1           CPU2
3110          *   tg3_start_xmit
3111          *   take tp->tx_lock
3112          *                      tg3_timer
3113          *                      take tp->lock
3114          *   tg3_interrupt
3115          *   spin on tp->lock
3116          *                      spin on tp->tx_lock
3117          *
3118          * So we really do need to disable interrupts when taking
3119          * tx_lock here.
3120          */
3121         local_irq_save(flags);
3122         if (!spin_trylock(&tp->tx_lock)) { 
3123                 local_irq_restore(flags);
3124                 return NETDEV_TX_LOCKED; 
3125         } 
3126
3127         /* This is a hard error, log it. */
3128         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3129                 netif_stop_queue(dev);
3130                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3131                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3132                        dev->name);
3133                 return NETDEV_TX_BUSY;
3134         }
3135
3136         entry = tp->tx_prod;
3137         base_flags = 0;
3138         if (skb->ip_summed == CHECKSUM_HW)
3139                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3140 #if TG3_TSO_SUPPORT != 0
3141         mss = 0;
3142         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3143             (mss = skb_shinfo(skb)->tso_size) != 0) {
3144                 int tcp_opt_len, ip_tcp_len;
3145
3146                 if (skb_header_cloned(skb) &&
3147                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3148                         dev_kfree_skb(skb);
3149                         goto out_unlock;
3150                 }
3151
3152                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3153                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3154
3155                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3156                                TXD_FLAG_CPU_POST_DMA);
3157
3158                 skb->nh.iph->check = 0;
3159                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3160                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3161                         skb->h.th->check = 0;
3162                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3163                 }
3164                 else {
3165                         skb->h.th->check =
3166                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3167                                                    skb->nh.iph->daddr,
3168                                                    0, IPPROTO_TCP, 0);
3169                 }
3170
3171                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3172                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3173                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3174                                 int tsflags;
3175
3176                                 tsflags = ((skb->nh.iph->ihl - 5) +
3177                                            (tcp_opt_len >> 2));
3178                                 mss |= (tsflags << 11);
3179                         }
3180                 } else {
3181                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3182                                 int tsflags;
3183
3184                                 tsflags = ((skb->nh.iph->ihl - 5) +
3185                                            (tcp_opt_len >> 2));
3186                                 base_flags |= tsflags << 12;
3187                         }
3188                 }
3189         }
3190 #else
3191         mss = 0;
3192 #endif
3193 #if TG3_VLAN_TAG_USED
3194         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3195                 base_flags |= (TXD_FLAG_VLAN |
3196                                (vlan_tx_tag_get(skb) << 16));
3197 #endif
3198
3199         /* Queue skb data, a.k.a. the main skb fragment. */
3200         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3201
3202         tp->tx_buffers[entry].skb = skb;
3203         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3204
3205         would_hit_hwbug = 0;
3206
3207         if (tg3_4g_overflow_test(mapping, len))
3208                 would_hit_hwbug = entry + 1;
3209
3210         tg3_set_txd(tp, entry, mapping, len, base_flags,
3211                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3212
3213         entry = NEXT_TX(entry);
3214
3215         /* Now loop through additional data fragments, and queue them. */
3216         if (skb_shinfo(skb)->nr_frags > 0) {
3217                 unsigned int i, last;
3218
3219                 last = skb_shinfo(skb)->nr_frags - 1;
3220                 for (i = 0; i <= last; i++) {
3221                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3222
3223                         len = frag->size;
3224                         mapping = pci_map_page(tp->pdev,
3225                                                frag->page,
3226                                                frag->page_offset,
3227                                                len, PCI_DMA_TODEVICE);
3228
3229                         tp->tx_buffers[entry].skb = NULL;
3230                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3231
3232                         if (tg3_4g_overflow_test(mapping, len)) {
3233                                 /* Only one should match. */
3234                                 if (would_hit_hwbug)
3235                                         BUG();
3236                                 would_hit_hwbug = entry + 1;
3237                         }
3238
3239                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3240                                 tg3_set_txd(tp, entry, mapping, len,
3241                                             base_flags, (i == last)|(mss << 1));
3242                         else
3243                                 tg3_set_txd(tp, entry, mapping, len,
3244                                             base_flags, (i == last));
3245
3246                         entry = NEXT_TX(entry);
3247                 }
3248         }
3249
3250         if (would_hit_hwbug) {
3251                 u32 last_plus_one = entry;
3252                 u32 start;
3253                 unsigned int len = 0;
3254
3255                 would_hit_hwbug -= 1;
3256                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3257                 entry &= (TG3_TX_RING_SIZE - 1);
3258                 start = entry;
3259                 i = 0;
3260                 while (entry != last_plus_one) {
3261                         if (i == 0)
3262                                 len = skb_headlen(skb);
3263                         else
3264                                 len = skb_shinfo(skb)->frags[i-1].size;
3265
3266                         if (entry == would_hit_hwbug)
3267                                 break;
3268
3269                         i++;
3270                         entry = NEXT_TX(entry);
3271
3272                 }
3273
3274                 /* If the workaround fails due to memory/mapping
3275                  * failure, silently drop this packet.
3276                  */
3277                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3278                                                 entry, len,
3279                                                 last_plus_one,
3280                                                 &start, mss))
3281                         goto out_unlock;
3282
3283                 entry = start;
3284         }
3285
3286         /* Packets are ready, update Tx producer idx local and on card. */
3287         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3288
3289         tp->tx_prod = entry;
3290         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3291                 netif_stop_queue(dev);
3292
3293 out_unlock:
3294         mmiowb();
3295         spin_unlock_irqrestore(&tp->tx_lock, flags);
3296
3297         dev->trans_start = jiffies;
3298
3299         return NETDEV_TX_OK;
3300 }
3301
3302 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3303                                int new_mtu)
3304 {
3305         dev->mtu = new_mtu;
3306
3307         if (new_mtu > ETH_DATA_LEN)
3308                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3309         else
3310                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3311 }
3312
3313 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3314 {
3315         struct tg3 *tp = netdev_priv(dev);
3316
3317         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3318                 return -EINVAL;
3319
3320         if (!netif_running(dev)) {
3321                 /* We'll just catch it later when the
3322                  * device is up'd.
3323                  */
3324                 tg3_set_mtu(dev, tp, new_mtu);
3325                 return 0;
3326         }
3327
3328         tg3_netif_stop(tp);
3329         spin_lock_irq(&tp->lock);
3330         spin_lock(&tp->tx_lock);
3331
3332         tg3_halt(tp);
3333
3334         tg3_set_mtu(dev, tp, new_mtu);
3335
3336         tg3_init_hw(tp);
3337
3338         tg3_netif_start(tp);
3339
3340         spin_unlock(&tp->tx_lock);
3341         spin_unlock_irq(&tp->lock);
3342
3343         return 0;
3344 }
3345
3346 /* Free up pending packets in all rx/tx rings.
3347  *
3348  * The chip has been shut down and the driver detached from
3349  * the networking, so no interrupts or new tx packets will
3350  * end up in the driver.  tp->{tx,}lock is not held and we are not
3351  * in an interrupt context and thus may sleep.
3352  */
3353 static void tg3_free_rings(struct tg3 *tp)
3354 {
3355         struct ring_info *rxp;
3356         int i;
3357
3358         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3359                 rxp = &tp->rx_std_buffers[i];
3360
3361                 if (rxp->skb == NULL)
3362                         continue;
3363                 pci_unmap_single(tp->pdev,
3364                                  pci_unmap_addr(rxp, mapping),
3365                                  RX_PKT_BUF_SZ - tp->rx_offset,
3366                                  PCI_DMA_FROMDEVICE);
3367                 dev_kfree_skb_any(rxp->skb);
3368                 rxp->skb = NULL;
3369         }
3370
3371         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3372                 rxp = &tp->rx_jumbo_buffers[i];
3373
3374                 if (rxp->skb == NULL)
3375                         continue;
3376                 pci_unmap_single(tp->pdev,
3377                                  pci_unmap_addr(rxp, mapping),
3378                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3379                                  PCI_DMA_FROMDEVICE);
3380                 dev_kfree_skb_any(rxp->skb);
3381                 rxp->skb = NULL;
3382         }
3383
3384         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3385                 struct tx_ring_info *txp;
3386                 struct sk_buff *skb;
3387                 int j;
3388
3389                 txp = &tp->tx_buffers[i];
3390                 skb = txp->skb;
3391
3392                 if (skb == NULL) {
3393                         i++;
3394                         continue;
3395                 }
3396
3397                 pci_unmap_single(tp->pdev,
3398                                  pci_unmap_addr(txp, mapping),
3399                                  skb_headlen(skb),
3400                                  PCI_DMA_TODEVICE);
3401                 txp->skb = NULL;
3402
3403                 i++;
3404
3405                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3406                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3407                         pci_unmap_page(tp->pdev,
3408                                        pci_unmap_addr(txp, mapping),
3409                                        skb_shinfo(skb)->frags[j].size,
3410                                        PCI_DMA_TODEVICE);
3411                         i++;
3412                 }
3413
3414                 dev_kfree_skb_any(skb);
3415         }
3416 }
3417
3418 /* Initialize tx/rx rings for packet processing.
3419  *
3420  * The chip has been shut down and the driver detached from
3421  * the networking, so no interrupts or new tx packets will
3422  * end up in the driver.  tp->{tx,}lock are held and thus
3423  * we may not sleep.
3424  */
3425 static void tg3_init_rings(struct tg3 *tp)
3426 {
3427         u32 i;
3428
3429         /* Free up all the SKBs. */
3430         tg3_free_rings(tp);
3431
3432         /* Zero out all descriptors. */
3433         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3434         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3435         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3436         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3437
3438         /* Initialize invariants of the rings, we only set this
3439          * stuff once.  This works because the card does not
3440          * write into the rx buffer posting rings.
3441          */
3442         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3443                 struct tg3_rx_buffer_desc *rxd;
3444
3445                 rxd = &tp->rx_std[i];
3446                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3447                         << RXD_LEN_SHIFT;
3448                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3449                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3450                                (i << RXD_OPAQUE_INDEX_SHIFT));
3451         }
3452
3453         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3454                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3455                         struct tg3_rx_buffer_desc *rxd;
3456
3457                         rxd = &tp->rx_jumbo[i];
3458                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3459                                 << RXD_LEN_SHIFT;
3460                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3461                                 RXD_FLAG_JUMBO;
3462                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3463                                (i << RXD_OPAQUE_INDEX_SHIFT));
3464                 }
3465         }
3466
3467         /* Now allocate fresh SKBs for each rx ring. */
3468         for (i = 0; i < tp->rx_pending; i++) {
3469                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3470                                      -1, i) < 0)
3471                         break;
3472         }
3473
3474         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3475                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3476                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3477                                              -1, i) < 0)
3478                                 break;
3479                 }
3480         }
3481 }
3482
3483 /*
3484  * Must not be invoked with interrupt sources disabled and
3485  * the hardware shutdown down.
3486  */
3487 static void tg3_free_consistent(struct tg3 *tp)
3488 {
3489         if (tp->rx_std_buffers) {
3490                 kfree(tp->rx_std_buffers);
3491                 tp->rx_std_buffers = NULL;
3492         }
3493         if (tp->rx_std) {
3494                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3495                                     tp->rx_std, tp->rx_std_mapping);
3496                 tp->rx_std = NULL;
3497         }
3498         if (tp->rx_jumbo) {
3499                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3500                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3501                 tp->rx_jumbo = NULL;
3502         }
3503         if (tp->rx_rcb) {
3504                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3505                                     tp->rx_rcb, tp->rx_rcb_mapping);
3506                 tp->rx_rcb = NULL;
3507         }
3508         if (tp->tx_ring) {
3509                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3510                         tp->tx_ring, tp->tx_desc_mapping);
3511                 tp->tx_ring = NULL;
3512         }
3513         if (tp->hw_status) {
3514                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3515                                     tp->hw_status, tp->status_mapping);
3516                 tp->hw_status = NULL;
3517         }
3518         if (tp->hw_stats) {
3519                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3520                                     tp->hw_stats, tp->stats_mapping);
3521                 tp->hw_stats = NULL;
3522         }
3523 }
3524
3525 /*
3526  * Must not be invoked with interrupt sources disabled and
3527  * the hardware shutdown down.  Can sleep.
3528  */
3529 static int tg3_alloc_consistent(struct tg3 *tp)
3530 {
3531         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3532                                       (TG3_RX_RING_SIZE +
3533                                        TG3_RX_JUMBO_RING_SIZE)) +
3534                                      (sizeof(struct tx_ring_info) *
3535                                       TG3_TX_RING_SIZE),
3536                                      GFP_KERNEL);
3537         if (!tp->rx_std_buffers)
3538                 return -ENOMEM;
3539
3540         memset(tp->rx_std_buffers, 0,
3541                (sizeof(struct ring_info) *
3542                 (TG3_RX_RING_SIZE +
3543                  TG3_RX_JUMBO_RING_SIZE)) +
3544                (sizeof(struct tx_ring_info) *
3545                 TG3_TX_RING_SIZE));
3546
3547         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3548         tp->tx_buffers = (struct tx_ring_info *)
3549                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3550
3551         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3552                                           &tp->rx_std_mapping);
3553         if (!tp->rx_std)
3554                 goto err_out;
3555
3556         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3557                                             &tp->rx_jumbo_mapping);
3558
3559         if (!tp->rx_jumbo)
3560                 goto err_out;
3561
3562         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3563                                           &tp->rx_rcb_mapping);
3564         if (!tp->rx_rcb)
3565                 goto err_out;
3566
3567         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3568                                            &tp->tx_desc_mapping);
3569         if (!tp->tx_ring)
3570                 goto err_out;
3571
3572         tp->hw_status = pci_alloc_consistent(tp->pdev,
3573                                              TG3_HW_STATUS_SIZE,
3574                                              &tp->status_mapping);
3575         if (!tp->hw_status)
3576                 goto err_out;
3577
3578         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3579                                             sizeof(struct tg3_hw_stats),
3580                                             &tp->stats_mapping);
3581         if (!tp->hw_stats)
3582                 goto err_out;
3583
3584         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3585         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3586
3587         return 0;
3588
3589 err_out:
3590         tg3_free_consistent(tp);
3591         return -ENOMEM;
3592 }
3593
3594 #define MAX_WAIT_CNT 1000
3595
3596 /* To stop a block, clear the enable bit and poll till it
3597  * clears.  tp->lock is held.
3598  */
3599 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3600 {
3601         unsigned int i;
3602         u32 val;
3603
3604         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3605                 switch (ofs) {
3606                 case RCVLSC_MODE:
3607                 case DMAC_MODE:
3608                 case MBFREE_MODE:
3609                 case BUFMGR_MODE:
3610                 case MEMARB_MODE:
3611                         /* We can't enable/disable these bits of the
3612                          * 5705/5750, just say success.
3613                          */
3614                         return 0;
3615
3616                 default:
3617                         break;
3618                 };
3619         }
3620
3621         val = tr32(ofs);
3622         val &= ~enable_bit;
3623         tw32_f(ofs, val);
3624
3625         for (i = 0; i < MAX_WAIT_CNT; i++) {
3626                 udelay(100);
3627                 val = tr32(ofs);
3628                 if ((val & enable_bit) == 0)
3629                         break;
3630         }
3631
3632         if (i == MAX_WAIT_CNT) {
3633                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3634                        "ofs=%lx enable_bit=%x\n",
3635                        ofs, enable_bit);
3636                 return -ENODEV;
3637         }
3638
3639         return 0;
3640 }
3641
3642 /* tp->lock is held. */
3643 static int tg3_abort_hw(struct tg3 *tp)
3644 {
3645         int i, err;
3646
3647         tg3_disable_ints(tp);
3648
3649         tp->rx_mode &= ~RX_MODE_ENABLE;
3650         tw32_f(MAC_RX_MODE, tp->rx_mode);
3651         udelay(10);
3652
3653         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3654         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3658         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3659
3660         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3661         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3662         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3663         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3664         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3665         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3666         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3667         if (err)
3668                 goto out;
3669
3670         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3671         tw32_f(MAC_MODE, tp->mac_mode);
3672         udelay(40);
3673
3674         tp->tx_mode &= ~TX_MODE_ENABLE;
3675         tw32_f(MAC_TX_MODE, tp->tx_mode);
3676
3677         for (i = 0; i < MAX_WAIT_CNT; i++) {
3678                 udelay(100);
3679                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3680                         break;
3681         }
3682         if (i >= MAX_WAIT_CNT) {
3683                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3684                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3685                        tp->dev->name, tr32(MAC_TX_MODE));
3686                 return -ENODEV;
3687         }
3688
3689         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3690         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3691         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3692
3693         tw32(FTQ_RESET, 0xffffffff);
3694         tw32(FTQ_RESET, 0x00000000);
3695
3696         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3697         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3698         if (err)
3699                 goto out;
3700
3701         if (tp->hw_status)
3702                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3703         if (tp->hw_stats)
3704                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3705
3706 out:
3707         return err;
3708 }
3709
3710 /* tp->lock is held. */
3711 static int tg3_nvram_lock(struct tg3 *tp)
3712 {
3713         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3714                 int i;
3715
3716                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3717                 for (i = 0; i < 8000; i++) {
3718                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3719                                 break;
3720                         udelay(20);
3721                 }
3722                 if (i == 8000)
3723                         return -ENODEV;
3724         }
3725         return 0;
3726 }
3727
3728 /* tp->lock is held. */
3729 static void tg3_nvram_unlock(struct tg3 *tp)
3730 {
3731         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3732                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3733 }
3734
3735 /* tp->lock is held. */
3736 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3737 {
3738         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3739                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3740                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3741
3742         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3743                 switch (kind) {
3744                 case RESET_KIND_INIT:
3745                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3746                                       DRV_STATE_START);
3747                         break;
3748
3749                 case RESET_KIND_SHUTDOWN:
3750                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3751                                       DRV_STATE_UNLOAD);
3752                         break;
3753
3754                 case RESET_KIND_SUSPEND:
3755                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3756                                       DRV_STATE_SUSPEND);
3757                         break;
3758
3759                 default:
3760                         break;
3761                 };
3762         }
3763 }
3764
3765 /* tp->lock is held. */
3766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3767 {
3768         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3769                 switch (kind) {
3770                 case RESET_KIND_INIT:
3771                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3772                                       DRV_STATE_START_DONE);
3773                         break;
3774
3775                 case RESET_KIND_SHUTDOWN:
3776                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3777                                       DRV_STATE_UNLOAD_DONE);
3778                         break;
3779
3780                 default:
3781                         break;
3782                 };
3783         }
3784 }
3785
3786 /* tp->lock is held. */
3787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3788 {
3789         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3790                 switch (kind) {
3791                 case RESET_KIND_INIT:
3792                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3793                                       DRV_STATE_START);
3794                         break;
3795
3796                 case RESET_KIND_SHUTDOWN:
3797                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3798                                       DRV_STATE_UNLOAD);
3799                         break;
3800
3801                 case RESET_KIND_SUSPEND:
3802                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3803                                       DRV_STATE_SUSPEND);
3804                         break;
3805
3806                 default:
3807                         break;
3808                 };
3809         }
3810 }
3811
3812 static void tg3_stop_fw(struct tg3 *);
3813
3814 /* tp->lock is held. */
3815 static int tg3_chip_reset(struct tg3 *tp)
3816 {
3817         u32 val;
3818         u32 flags_save;
3819         int i;
3820
3821         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3822                 tg3_nvram_lock(tp);
3823
3824         /*
3825          * We must avoid the readl() that normally takes place.
3826          * It locks machines, causes machine checks, and other
3827          * fun things.  So, temporarily disable the 5701
3828          * hardware workaround, while we do the reset.
3829          */
3830         flags_save = tp->tg3_flags;
3831         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3832
3833         /* do the reset */
3834         val = GRC_MISC_CFG_CORECLK_RESET;
3835
3836         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3837                 if (tr32(0x7e2c) == 0x60) {
3838                         tw32(0x7e2c, 0x20);
3839                 }
3840                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3841                         tw32(GRC_MISC_CFG, (1 << 29));
3842                         val |= (1 << 29);
3843                 }
3844         }
3845
3846         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3847                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3848         tw32(GRC_MISC_CFG, val);
3849
3850         /* restore 5701 hardware bug workaround flag */
3851         tp->tg3_flags = flags_save;
3852
3853         /* Unfortunately, we have to delay before the PCI read back.
3854          * Some 575X chips even will not respond to a PCI cfg access
3855          * when the reset command is given to the chip.
3856          *
3857          * How do these hardware designers expect things to work
3858          * properly if the PCI write is posted for a long period
3859          * of time?  It is always necessary to have some method by
3860          * which a register read back can occur to push the write
3861          * out which does the reset.
3862          *
3863          * For most tg3 variants the trick below was working.
3864          * Ho hum...
3865          */
3866         udelay(120);
3867
3868         /* Flush PCI posted writes.  The normal MMIO registers
3869          * are inaccessible at this time so this is the only
3870          * way to make this reliably (actually, this is no longer
3871          * the case, see above).  I tried to use indirect
3872          * register read/write but this upset some 5701 variants.
3873          */
3874         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3875
3876         udelay(120);
3877
3878         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3879                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3880                         int i;
3881                         u32 cfg_val;
3882
3883                         /* Wait for link training to complete.  */
3884                         for (i = 0; i < 5000; i++)
3885                                 udelay(100);
3886
3887                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3888                         pci_write_config_dword(tp->pdev, 0xc4,
3889                                                cfg_val | (1 << 15));
3890                 }
3891                 /* Set PCIE max payload size and clear error status.  */
3892                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3893         }
3894
3895         /* Re-enable indirect register accesses. */
3896         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3897                                tp->misc_host_ctrl);
3898
3899         /* Set MAX PCI retry to zero. */
3900         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3901         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3902             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3903                 val |= PCISTATE_RETRY_SAME_DMA;
3904         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3905
3906         pci_restore_state(tp->pdev);
3907
3908         /* Make sure PCI-X relaxed ordering bit is clear. */
3909         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3910         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3911         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3912
3913         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3914
3915         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3916                 tg3_stop_fw(tp);
3917                 tw32(0x5000, 0x400);
3918         }
3919
3920         tw32(GRC_MODE, tp->grc_mode);
3921
3922         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3923                 u32 val = tr32(0xc4);
3924
3925                 tw32(0xc4, val | (1 << 15));
3926         }
3927
3928         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3930                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3931                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3932                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3933                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3934         }
3935
3936         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3937                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3938                 tw32_f(MAC_MODE, tp->mac_mode);
3939         } else
3940                 tw32_f(MAC_MODE, 0);
3941         udelay(40);
3942
3943         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3944                 /* Wait for firmware initialization to complete. */
3945                 for (i = 0; i < 100000; i++) {
3946                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3947                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3948                                 break;
3949                         udelay(10);
3950                 }
3951                 if (i >= 100000) {
3952                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3953                                "firmware will not restart magic=%08x\n",
3954                                tp->dev->name, val);
3955                         return -ENODEV;
3956                 }
3957         }
3958
3959         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3960             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3961                 u32 val = tr32(0x7c00);
3962
3963                 tw32(0x7c00, val | (1 << 25));
3964         }
3965
3966         /* Reprobe ASF enable state.  */
3967         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3968         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3969         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3970         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3971                 u32 nic_cfg;
3972
3973                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3974                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3975                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3976                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
3977                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
3978                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3979                 }
3980         }
3981
3982         return 0;
3983 }
3984
3985 /* tp->lock is held. */
3986 static void tg3_stop_fw(struct tg3 *tp)
3987 {
3988         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3989                 u32 val;
3990                 int i;
3991
3992                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3993                 val = tr32(GRC_RX_CPU_EVENT);
3994                 val |= (1 << 14);
3995                 tw32(GRC_RX_CPU_EVENT, val);
3996
3997                 /* Wait for RX cpu to ACK the event.  */
3998                 for (i = 0; i < 100; i++) {
3999                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4000                                 break;
4001                         udelay(1);
4002                 }
4003         }
4004 }
4005
4006 /* tp->lock is held. */
4007 static int tg3_halt(struct tg3 *tp)
4008 {
4009         int err;
4010
4011         tg3_stop_fw(tp);
4012
4013         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4014
4015         tg3_abort_hw(tp);
4016         err = tg3_chip_reset(tp);
4017
4018         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4019         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4020
4021         if (err)
4022                 return err;
4023
4024         return 0;
4025 }
4026
4027 #define TG3_FW_RELEASE_MAJOR    0x0
4028 #define TG3_FW_RELASE_MINOR     0x0
4029 #define TG3_FW_RELEASE_FIX      0x0
4030 #define TG3_FW_START_ADDR       0x08000000
4031 #define TG3_FW_TEXT_ADDR        0x08000000
4032 #define TG3_FW_TEXT_LEN         0x9c0
4033 #define TG3_FW_RODATA_ADDR      0x080009c0
4034 #define TG3_FW_RODATA_LEN       0x60
4035 #define TG3_FW_DATA_ADDR        0x08000a40
4036 #define TG3_FW_DATA_LEN         0x20
4037 #define TG3_FW_SBSS_ADDR        0x08000a60
4038 #define TG3_FW_SBSS_LEN         0xc
4039 #define TG3_FW_BSS_ADDR         0x08000a70
4040 #define TG3_FW_BSS_LEN          0x10
4041
4042 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4043         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4044         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4045         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4046         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4047         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4048         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4049         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4050         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4051         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4052         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4053         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4054         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4055         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4056         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4057         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4058         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4059         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4060         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4061         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4062         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4063         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4064         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4065         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4066         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4067         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4068         0, 0, 0, 0, 0, 0,
4069         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4072         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4073         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4074         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4075         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4077         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4078         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4079         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4080         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4081         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4082         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4083         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4084         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4085         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4086         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4087         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4088         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4089         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4090         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4091         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4092         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4093         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4094         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4095         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4096         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4097         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4098         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4099         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4100         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4101         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4102         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4103         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4104         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4105         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4106         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4107         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4108         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4109         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4110         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4111         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4112         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4113         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4114         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4115         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4116         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4117         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4118         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4119         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4120         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4121         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4122         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4123         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4124         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4125         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4126         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4127         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4128         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4129         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4130         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4131         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4132         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4133         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4134 };
4135
4136 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4137         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4138         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4139         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4140         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4141         0x00000000
4142 };
4143
4144 #if 0 /* All zeros, don't eat up space with it. */
4145 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4146         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4147         0x00000000, 0x00000000, 0x00000000, 0x00000000
4148 };
4149 #endif
4150
4151 #define RX_CPU_SCRATCH_BASE     0x30000
4152 #define RX_CPU_SCRATCH_SIZE     0x04000
4153 #define TX_CPU_SCRATCH_BASE     0x34000
4154 #define TX_CPU_SCRATCH_SIZE     0x04000
4155
4156 /* tp->lock is held. */
4157 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4158 {
4159         int i;
4160
4161         if (offset == TX_CPU_BASE &&
4162             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4163                 BUG();
4164
4165         if (offset == RX_CPU_BASE) {
4166                 for (i = 0; i < 10000; i++) {
4167                         tw32(offset + CPU_STATE, 0xffffffff);
4168                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4169                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4170                                 break;
4171                 }
4172
4173                 tw32(offset + CPU_STATE, 0xffffffff);
4174                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4175                 udelay(10);
4176         } else {
4177                 for (i = 0; i < 10000; i++) {
4178                         tw32(offset + CPU_STATE, 0xffffffff);
4179                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4180                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4181                                 break;
4182                 }
4183         }
4184
4185         if (i >= 10000) {
4186                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4187                        "and %s CPU\n",
4188                        tp->dev->name,
4189                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4190                 return -ENODEV;
4191         }
4192         return 0;
4193 }
4194
4195 struct fw_info {
4196         unsigned int text_base;
4197         unsigned int text_len;
4198         u32 *text_data;
4199         unsigned int rodata_base;
4200         unsigned int rodata_len;
4201         u32 *rodata_data;
4202         unsigned int data_base;
4203         unsigned int data_len;
4204         u32 *data_data;
4205 };
4206
4207 /* tp->lock is held. */
4208 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4209                                  int cpu_scratch_size, struct fw_info *info)
4210 {
4211         int err, i;
4212         u32 orig_tg3_flags = tp->tg3_flags;
4213         void (*write_op)(struct tg3 *, u32, u32);
4214
4215         if (cpu_base == TX_CPU_BASE &&
4216             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4217                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4218                        "TX cpu firmware on %s which is 5705.\n",
4219                        tp->dev->name);
4220                 return -EINVAL;
4221         }
4222
4223         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4224                 write_op = tg3_write_mem;
4225         else
4226                 write_op = tg3_write_indirect_reg32;
4227
4228         /* Force use of PCI config space for indirect register
4229          * write calls.
4230          */
4231         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4232
4233         err = tg3_halt_cpu(tp, cpu_base);
4234         if (err)
4235                 goto out;
4236
4237         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4238                 write_op(tp, cpu_scratch_base + i, 0);
4239         tw32(cpu_base + CPU_STATE, 0xffffffff);
4240         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4241         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4242                 write_op(tp, (cpu_scratch_base +
4243                               (info->text_base & 0xffff) +
4244                               (i * sizeof(u32))),
4245                          (info->text_data ?
4246                           info->text_data[i] : 0));
4247         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4248                 write_op(tp, (cpu_scratch_base +
4249                               (info->rodata_base & 0xffff) +
4250                               (i * sizeof(u32))),
4251                          (info->rodata_data ?
4252                           info->rodata_data[i] : 0));
4253         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4254                 write_op(tp, (cpu_scratch_base +
4255                               (info->data_base & 0xffff) +
4256                               (i * sizeof(u32))),
4257                          (info->data_data ?
4258                           info->data_data[i] : 0));
4259
4260         err = 0;
4261
4262 out:
4263         tp->tg3_flags = orig_tg3_flags;
4264         return err;
4265 }
4266
4267 /* tp->lock is held. */
4268 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4269 {
4270         struct fw_info info;
4271         int err, i;
4272
4273         info.text_base = TG3_FW_TEXT_ADDR;
4274         info.text_len = TG3_FW_TEXT_LEN;
4275         info.text_data = &tg3FwText[0];
4276         info.rodata_base = TG3_FW_RODATA_ADDR;
4277         info.rodata_len = TG3_FW_RODATA_LEN;
4278         info.rodata_data = &tg3FwRodata[0];
4279         info.data_base = TG3_FW_DATA_ADDR;
4280         info.data_len = TG3_FW_DATA_LEN;
4281         info.data_data = NULL;
4282
4283         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4284                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4285                                     &info);
4286         if (err)
4287                 return err;
4288
4289         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4290                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4291                                     &info);
4292         if (err)
4293                 return err;
4294
4295         /* Now startup only the RX cpu. */
4296         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4297         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4298
4299         for (i = 0; i < 5; i++) {
4300                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4301                         break;
4302                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4303                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4304                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4305                 udelay(1000);
4306         }
4307         if (i >= 5) {
4308                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4309                        "to set RX CPU PC, is %08x should be %08x\n",
4310                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4311                        TG3_FW_TEXT_ADDR);
4312                 return -ENODEV;
4313         }
4314         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4315         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4316
4317         return 0;
4318 }
4319
4320 #if TG3_TSO_SUPPORT != 0
4321
4322 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4323 #define TG3_TSO_FW_RELASE_MINOR         0x6
4324 #define TG3_TSO_FW_RELEASE_FIX          0x0
4325 #define TG3_TSO_FW_START_ADDR           0x08000000
4326 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4327 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4328 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4329 #define TG3_TSO_FW_RODATA_LEN           0x60
4330 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4331 #define TG3_TSO_FW_DATA_LEN             0x30
4332 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4333 #define TG3_TSO_FW_SBSS_LEN             0x2c
4334 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4335 #define TG3_TSO_FW_BSS_LEN              0x894
4336
4337 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4338         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4339         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4340         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4341         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4342         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4343         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4344         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4345         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4346         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4347         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4348         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4349         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4350         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4351         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4352         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4353         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4354         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4355         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4356         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4357         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4358         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4359         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4360         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4361         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4362         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4363         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4364         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4365         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4366         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4367         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4368         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4369         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4370         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4371         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4372         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4373         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4374         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4375         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4376         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4377         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4378         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4379         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4380         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4381         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4382         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4383         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4384         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4385         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4386         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4387         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4388         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4389         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4390         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4391         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4392         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4393         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4394         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4395         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4396         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4397         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4398         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4399         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4400         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4401         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4402         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4403         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4404         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4405         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4406         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4407         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4408         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4409         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4410         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4411         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4412         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4413         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4414         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4415         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4416         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4417         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4418         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4419         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4420         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4421         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4422         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4423         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4424         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4425         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4426         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4427         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4428         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4429         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4430         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4431         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4432         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4433         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4434         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4435         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4436         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4437         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4438         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4439         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4440         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4441         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4442         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4443         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4444         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4445         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4446         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4447         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4448         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4449         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4450         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4451         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4452         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4453         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4454         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4455         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4456         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4457         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4458         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4459         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4460         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4461         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4462         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4463         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4464         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4465         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4466         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4467         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4468         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4469         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4470         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4471         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4472         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4473         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4474         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4475         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4476         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4477         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4478         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4479         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4480         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4481         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4482         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4483         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4484         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4485         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4486         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4487         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4488         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4489         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4490         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4491         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4492         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4493         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4494         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4495         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4496         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4497         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4498         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4499         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4500         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4501         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4502         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4503         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4504         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4505         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4506         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4507         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4508         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4509         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4510         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4511         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4512         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4513         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4514         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4515         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4516         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4517         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4518         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4519         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4520         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4521         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4522         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4523         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4524         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4525         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4526         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4527         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4528         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4529         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4530         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4531         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4532         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4533         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4534         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4535         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4536         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4537         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4538         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4539         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4540         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4541         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4542         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4543         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4544         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4545         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4546         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4547         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4548         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4549         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4550         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4551         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4552         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4553         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4554         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4555         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4556         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4557         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4558         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4559         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4560         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4561         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4562         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4563         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4564         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4565         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4566         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4567         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4568         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4569         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4570         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4571         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4572         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4573         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4574         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4575         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4576         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4577         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4578         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4579         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4580         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4581         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4582         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4583         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4584         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4585         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4586         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4587         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4588         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4589         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4590         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4591         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4592         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4593         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4594         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4595         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4596         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4597         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4598         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4599         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4600         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4601         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4602         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4603         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4604         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4605         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4606         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4607         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4608         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4609         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4610         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4611         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4612         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4613         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4614         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4615         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4616         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4617         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4618         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4619         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4620         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4621         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4622 };
4623
4624 static u32 tg3TsoFwRodata[] = {
4625         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4626         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4627         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4628         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4629         0x00000000,
4630 };
4631
4632 static u32 tg3TsoFwData[] = {
4633         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4634         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4635         0x00000000,
4636 };
4637
4638 /* 5705 needs a special version of the TSO firmware.  */
4639 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4640 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4641 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4642 #define TG3_TSO5_FW_START_ADDR          0x00010000
4643 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4644 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4645 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4646 #define TG3_TSO5_FW_RODATA_LEN          0x50
4647 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4648 #define TG3_TSO5_FW_DATA_LEN            0x20
4649 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4650 #define TG3_TSO5_FW_SBSS_LEN            0x28
4651 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4652 #define TG3_TSO5_FW_BSS_LEN             0x88
4653
4654 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4655         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4656         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4657         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4658         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4659         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4660         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4661         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4662         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4663         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4664         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4665         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4666         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4667         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4668         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4669         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4670         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4671         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4672         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4673         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4674         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4675         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4676         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4677         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4678         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4679         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4680         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4681         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4682         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4683         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4684         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4685         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4686         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4687         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4688         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4689         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4690         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4691         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4692         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4693         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4694         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4695         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4696         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4697         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4698         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4699         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4700         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4701         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4702         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4703         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4704         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4705         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4706         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4707         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4708         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4709         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4710         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4711         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4712         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4713         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4714         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4715         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4716         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4717         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4718         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4719         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4720         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4721         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4722         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4723         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4724         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4725         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4726         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4727         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4728         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4729         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4730         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4731         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4732         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4733         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4734         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4735         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4736         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4737         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4738         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4739         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4740         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4741         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4742         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4743         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4744         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4745         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4746         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4747         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4748         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4749         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4750         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4751         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4752         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4753         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4754         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4755         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4756         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4757         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4758         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4759         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4760         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4761         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4762         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4763         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4764         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4765         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4766         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4767         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4768         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4769         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4770         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4771         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4772         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4773         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4774         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4775         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4776         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4777         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4778         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4779         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4780         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4781         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4782         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4783         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4784         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4785         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4786         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4787         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4788         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4789         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4790         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4791         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4792         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4793         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4794         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4795         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4796         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4797         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4798         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4799         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4800         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4801         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4802         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4803         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4804         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4805         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4806         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4807         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4808         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4809         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4810         0x00000000, 0x00000000, 0x00000000,
4811 };
4812
4813 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4814         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4815         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4816         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4817         0x00000000, 0x00000000, 0x00000000,
4818 };
4819
4820 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4821         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4822         0x00000000, 0x00000000, 0x00000000,
4823 };
4824
4825 /* tp->lock is held. */
4826 static int tg3_load_tso_firmware(struct tg3 *tp)
4827 {
4828         struct fw_info info;
4829         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4830         int err, i;
4831
4832         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4833                 return 0;
4834
4835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4836                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4837                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4838                 info.text_data = &tg3Tso5FwText[0];
4839                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4840                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4841                 info.rodata_data = &tg3Tso5FwRodata[0];
4842                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4843                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4844                 info.data_data = &tg3Tso5FwData[0];
4845                 cpu_base = RX_CPU_BASE;
4846                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4847                 cpu_scratch_size = (info.text_len +
4848                                     info.rodata_len +
4849                                     info.data_len +
4850                                     TG3_TSO5_FW_SBSS_LEN +
4851                                     TG3_TSO5_FW_BSS_LEN);
4852         } else {
4853                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4854                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4855                 info.text_data = &tg3TsoFwText[0];
4856                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4857                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4858                 info.rodata_data = &tg3TsoFwRodata[0];
4859                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4860                 info.data_len = TG3_TSO_FW_DATA_LEN;
4861                 info.data_data = &tg3TsoFwData[0];
4862                 cpu_base = TX_CPU_BASE;
4863                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4864                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4865         }
4866
4867         err = tg3_load_firmware_cpu(tp, cpu_base,
4868                                     cpu_scratch_base, cpu_scratch_size,
4869                                     &info);
4870         if (err)
4871                 return err;
4872
4873         /* Now startup the cpu. */
4874         tw32(cpu_base + CPU_STATE, 0xffffffff);
4875         tw32_f(cpu_base + CPU_PC,    info.text_base);
4876
4877         for (i = 0; i < 5; i++) {
4878                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4879                         break;
4880                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4881                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4882                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4883                 udelay(1000);
4884         }
4885         if (i >= 5) {
4886                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4887                        "to set CPU PC, is %08x should be %08x\n",
4888                        tp->dev->name, tr32(cpu_base + CPU_PC),
4889                        info.text_base);
4890                 return -ENODEV;
4891         }
4892         tw32(cpu_base + CPU_STATE, 0xffffffff);
4893         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4894         return 0;
4895 }
4896
4897 #endif /* TG3_TSO_SUPPORT != 0 */
4898
4899 /* tp->lock is held. */
4900 static void __tg3_set_mac_addr(struct tg3 *tp)
4901 {
4902         u32 addr_high, addr_low;
4903         int i;
4904
4905         addr_high = ((tp->dev->dev_addr[0] << 8) |
4906                      tp->dev->dev_addr[1]);
4907         addr_low = ((tp->dev->dev_addr[2] << 24) |
4908                     (tp->dev->dev_addr[3] << 16) |
4909                     (tp->dev->dev_addr[4] <<  8) |
4910                     (tp->dev->dev_addr[5] <<  0));
4911         for (i = 0; i < 4; i++) {
4912                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4913                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4914         }
4915
4916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4918                 for (i = 0; i < 12; i++) {
4919                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4920                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4921                 }
4922         }
4923
4924         addr_high = (tp->dev->dev_addr[0] +
4925                      tp->dev->dev_addr[1] +
4926                      tp->dev->dev_addr[2] +
4927                      tp->dev->dev_addr[3] +
4928                      tp->dev->dev_addr[4] +
4929                      tp->dev->dev_addr[5]) &
4930                 TX_BACKOFF_SEED_MASK;
4931         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4932 }
4933
4934 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4935 {
4936         struct tg3 *tp = netdev_priv(dev);
4937         struct sockaddr *addr = p;
4938
4939         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4940
4941         spin_lock_irq(&tp->lock);
4942         __tg3_set_mac_addr(tp);
4943         spin_unlock_irq(&tp->lock);
4944
4945         return 0;
4946 }
4947
4948 /* tp->lock is held. */
4949 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4950                            dma_addr_t mapping, u32 maxlen_flags,
4951                            u32 nic_addr)
4952 {
4953         tg3_write_mem(tp,
4954                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4955                       ((u64) mapping >> 32));
4956         tg3_write_mem(tp,
4957                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4958                       ((u64) mapping & 0xffffffff));
4959         tg3_write_mem(tp,
4960                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4961                        maxlen_flags);
4962
4963         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4964                 tg3_write_mem(tp,
4965                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4966                               nic_addr);
4967 }
4968
4969 static void __tg3_set_rx_mode(struct net_device *);
4970
4971 /* tp->lock is held. */
4972 static int tg3_reset_hw(struct tg3 *tp)
4973 {
4974         u32 val, rdmac_mode;
4975         int i, err, limit;
4976
4977         tg3_disable_ints(tp);
4978
4979         tg3_stop_fw(tp);
4980
4981         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4982
4983         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4984                 err = tg3_abort_hw(tp);
4985                 if (err)
4986                         return err;
4987         }
4988
4989         err = tg3_chip_reset(tp);
4990         if (err)
4991                 return err;
4992
4993         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4994
4995         /* This works around an issue with Athlon chipsets on
4996          * B3 tigon3 silicon.  This bit has no effect on any
4997          * other revision.  But do not set this on PCI Express
4998          * chips.
4999          */
5000         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5001                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5002         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5003
5004         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5005             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5006                 val = tr32(TG3PCI_PCISTATE);
5007                 val |= PCISTATE_RETRY_SAME_DMA;
5008                 tw32(TG3PCI_PCISTATE, val);
5009         }
5010
5011         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5012                 /* Enable some hw fixes.  */
5013                 val = tr32(TG3PCI_MSI_DATA);
5014                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5015                 tw32(TG3PCI_MSI_DATA, val);
5016         }
5017
5018         /* Descriptor ring init may make accesses to the
5019          * NIC SRAM area to setup the TX descriptors, so we
5020          * can only do this after the hardware has been
5021          * successfully reset.
5022          */
5023         tg3_init_rings(tp);
5024
5025         /* This value is determined during the probe time DMA
5026          * engine test, tg3_test_dma.
5027          */
5028         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5029
5030         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5031                           GRC_MODE_4X_NIC_SEND_RINGS |
5032                           GRC_MODE_NO_TX_PHDR_CSUM |
5033                           GRC_MODE_NO_RX_PHDR_CSUM);
5034         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5035         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5036                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5037         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5038                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5039
5040         tw32(GRC_MODE,
5041              tp->grc_mode |
5042              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5043
5044         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5045         val = tr32(GRC_MISC_CFG);
5046         val &= ~0xff;
5047         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5048         tw32(GRC_MISC_CFG, val);
5049
5050         /* Initialize MBUF/DESC pool. */
5051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
5053                 /* Do nothing.  */
5054         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5055                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5056                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5057                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5058                 else
5059                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5060                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5061                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5062         }
5063 #if TG3_TSO_SUPPORT != 0
5064         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5065                 int fw_len;
5066
5067                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5068                           TG3_TSO5_FW_RODATA_LEN +
5069                           TG3_TSO5_FW_DATA_LEN +
5070                           TG3_TSO5_FW_SBSS_LEN +
5071                           TG3_TSO5_FW_BSS_LEN);
5072                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5073                 tw32(BUFMGR_MB_POOL_ADDR,
5074                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5075                 tw32(BUFMGR_MB_POOL_SIZE,
5076                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5077         }
5078 #endif
5079
5080         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5081                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5082                      tp->bufmgr_config.mbuf_read_dma_low_water);
5083                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5084                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5085                 tw32(BUFMGR_MB_HIGH_WATER,
5086                      tp->bufmgr_config.mbuf_high_water);
5087         } else {
5088                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5089                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5090                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5091                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5092                 tw32(BUFMGR_MB_HIGH_WATER,
5093                      tp->bufmgr_config.mbuf_high_water_jumbo);
5094         }
5095         tw32(BUFMGR_DMA_LOW_WATER,
5096              tp->bufmgr_config.dma_low_water);
5097         tw32(BUFMGR_DMA_HIGH_WATER,
5098              tp->bufmgr_config.dma_high_water);
5099
5100         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5101         for (i = 0; i < 2000; i++) {
5102                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5103                         break;
5104                 udelay(10);
5105         }
5106         if (i >= 2000) {
5107                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5108                        tp->dev->name);
5109                 return -ENODEV;
5110         }
5111
5112         /* Setup replenish threshold. */
5113         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5114
5115         /* Initialize TG3_BDINFO's at:
5116          *  RCVDBDI_STD_BD:     standard eth size rx ring
5117          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5118          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5119          *
5120          * like so:
5121          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5122          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5123          *                              ring attribute flags
5124          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5125          *
5126          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5127          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5128          *
5129          * The size of each ring is fixed in the firmware, but the location is
5130          * configurable.
5131          */
5132         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5133              ((u64) tp->rx_std_mapping >> 32));
5134         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5135              ((u64) tp->rx_std_mapping & 0xffffffff));
5136         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5137              NIC_SRAM_RX_BUFFER_DESC);
5138
5139         /* Don't even try to program the JUMBO/MINI buffer descriptor
5140          * configs on 5705.
5141          */
5142         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5143                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5144                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5145         } else {
5146                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5147                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5148
5149                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5150                      BDINFO_FLAGS_DISABLED);
5151
5152                 /* Setup replenish threshold. */
5153                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5154
5155                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5156                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5157                              ((u64) tp->rx_jumbo_mapping >> 32));
5158                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5159                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5160                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5161                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5162                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5163                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5164                 } else {
5165                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5166                              BDINFO_FLAGS_DISABLED);
5167                 }
5168
5169         }
5170
5171         /* There is only one send ring on 5705/5750, no need to explicitly
5172          * disable the others.
5173          */
5174         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5175                 /* Clear out send RCB ring in SRAM. */
5176                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5177                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5178                                       BDINFO_FLAGS_DISABLED);
5179         }
5180
5181         tp->tx_prod = 0;
5182         tp->tx_cons = 0;
5183         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5184         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5185
5186         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5187                        tp->tx_desc_mapping,
5188                        (TG3_TX_RING_SIZE <<
5189                         BDINFO_FLAGS_MAXLEN_SHIFT),
5190                        NIC_SRAM_TX_BUFFER_DESC);
5191
5192         /* There is only one receive return ring on 5705/5750, no need
5193          * to explicitly disable the others.
5194          */
5195         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5196                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5197                      i += TG3_BDINFO_SIZE) {
5198                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5199                                       BDINFO_FLAGS_DISABLED);
5200                 }
5201         }
5202
5203         tp->rx_rcb_ptr = 0;
5204         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5205
5206         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5207                        tp->rx_rcb_mapping,
5208                        (TG3_RX_RCB_RING_SIZE(tp) <<
5209                         BDINFO_FLAGS_MAXLEN_SHIFT),
5210                        0);
5211
5212         tp->rx_std_ptr = tp->rx_pending;
5213         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5214                      tp->rx_std_ptr);
5215
5216         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5217                                                 tp->rx_jumbo_pending : 0;
5218         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5219                      tp->rx_jumbo_ptr);
5220
5221         /* Initialize MAC address and backoff seed. */
5222         __tg3_set_mac_addr(tp);
5223
5224         /* MTU + ethernet header + FCS + optional VLAN tag */
5225         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5226
5227         /* The slot time is changed by tg3_setup_phy if we
5228          * run at gigabit with half duplex.
5229          */
5230         tw32(MAC_TX_LENGTHS,
5231              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5232              (6 << TX_LENGTHS_IPG_SHIFT) |
5233              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5234
5235         /* Receive rules. */
5236         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5237         tw32(RCVLPC_CONFIG, 0x0181);
5238
5239         /* Calculate RDMAC_MODE setting early, we need it to determine
5240          * the RCVLPC_STATE_ENABLE mask.
5241          */
5242         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5243                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5244                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5245                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5246                       RDMAC_MODE_LNGREAD_ENAB);
5247         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5248                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5249         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5250              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5251             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5252              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
5253                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5254                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5255                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5256                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5257                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5258                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5259                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5260                 }
5261         }
5262
5263 #if TG3_TSO_SUPPORT != 0
5264         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5265                 rdmac_mode |= (1 << 27);
5266 #endif
5267
5268         /* Receive/send statistics. */
5269         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5270             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5271                 val = tr32(RCVLPC_STATS_ENABLE);
5272                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5273                 tw32(RCVLPC_STATS_ENABLE, val);
5274         } else {
5275                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5276         }
5277         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5278         tw32(SNDDATAI_STATSENAB, 0xffffff);
5279         tw32(SNDDATAI_STATSCTRL,
5280              (SNDDATAI_SCTRL_ENABLE |
5281               SNDDATAI_SCTRL_FASTUPD));
5282
5283         /* Setup host coalescing engine. */
5284         tw32(HOSTCC_MODE, 0);
5285         for (i = 0; i < 2000; i++) {
5286                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5287                         break;
5288                 udelay(10);
5289         }
5290
5291         tw32(HOSTCC_RXCOL_TICKS, 0);
5292         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5293         tw32(HOSTCC_RXMAX_FRAMES, 1);
5294         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5295         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5296                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5297                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5298         }
5299         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5300         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5301
5302         /* set status block DMA address */
5303         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5304              ((u64) tp->status_mapping >> 32));
5305         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5306              ((u64) tp->status_mapping & 0xffffffff));
5307
5308         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5309                 /* Status/statistics block address.  See tg3_timer,
5310                  * the tg3_periodic_fetch_stats call there, and
5311                  * tg3_get_stats to see how this works for 5705/5750 chips.
5312                  */
5313                 tw32(HOSTCC_STAT_COAL_TICKS,
5314                      DEFAULT_STAT_COAL_TICKS);
5315                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5316                      ((u64) tp->stats_mapping >> 32));
5317                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5318                      ((u64) tp->stats_mapping & 0xffffffff));
5319                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5320                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5321         }
5322
5323         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5324
5325         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5326         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5327         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5328                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5329
5330         /* Clear statistics/status block in chip, and status block in ram. */
5331         for (i = NIC_SRAM_STATS_BLK;
5332              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5333              i += sizeof(u32)) {
5334                 tg3_write_mem(tp, i, 0);
5335                 udelay(40);
5336         }
5337         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5338
5339         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5340                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5341         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5342         udelay(40);
5343
5344         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5346                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5347                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5348         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5349         udelay(100);
5350
5351         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5352         tr32(MAILBOX_INTERRUPT_0);
5353
5354         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5355                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5356                 udelay(40);
5357         }
5358
5359         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5360                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5361                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5362                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5363                WDMAC_MODE_LNGREAD_ENAB);
5364
5365         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5366              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5367             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5368              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
5369                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5370                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5371                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5372                         /* nothing */
5373                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5374                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5375                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5376                         val |= WDMAC_MODE_RX_ACCEL;
5377                 }
5378         }
5379
5380         tw32_f(WDMAC_MODE, val);
5381         udelay(40);
5382
5383         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5384                 val = tr32(TG3PCI_X_CAPS);
5385                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5386                         val &= ~PCIX_CAPS_BURST_MASK;
5387                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5388                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5389                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5390                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5391                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5392                                 val |= (tp->split_mode_max_reqs <<
5393                                         PCIX_CAPS_SPLIT_SHIFT);
5394                 }
5395                 tw32(TG3PCI_X_CAPS, val);
5396         }
5397
5398         tw32_f(RDMAC_MODE, rdmac_mode);
5399         udelay(40);
5400
5401         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5402         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5403                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5404         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5405         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5406         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5407         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5408         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5409 #if TG3_TSO_SUPPORT != 0
5410         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5411                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5412 #endif
5413         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5414         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5415
5416         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5417                 err = tg3_load_5701_a0_firmware_fix(tp);
5418                 if (err)
5419                         return err;
5420         }
5421
5422 #if TG3_TSO_SUPPORT != 0
5423         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5424                 err = tg3_load_tso_firmware(tp);
5425                 if (err)
5426                         return err;
5427         }
5428 #endif
5429
5430         tp->tx_mode = TX_MODE_ENABLE;
5431         tw32_f(MAC_TX_MODE, tp->tx_mode);
5432         udelay(100);
5433
5434         tp->rx_mode = RX_MODE_ENABLE;
5435         tw32_f(MAC_RX_MODE, tp->rx_mode);
5436         udelay(10);
5437
5438         if (tp->link_config.phy_is_low_power) {
5439                 tp->link_config.phy_is_low_power = 0;
5440                 tp->link_config.speed = tp->link_config.orig_speed;
5441                 tp->link_config.duplex = tp->link_config.orig_duplex;
5442                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5443         }
5444
5445         tp->mi_mode = MAC_MI_MODE_BASE;
5446         tw32_f(MAC_MI_MODE, tp->mi_mode);
5447         udelay(80);
5448
5449         tw32(MAC_LED_CTRL, tp->led_ctrl);
5450
5451         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5452         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5453                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5454                 udelay(10);
5455         }
5456         tw32_f(MAC_RX_MODE, tp->rx_mode);
5457         udelay(10);
5458
5459         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5460                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5461                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5462                         /* Set drive transmission level to 1.2V  */
5463                         /* only if the signal pre-emphasis bit is not set  */
5464                         val = tr32(MAC_SERDES_CFG);
5465                         val &= 0xfffff000;
5466                         val |= 0x880;
5467                         tw32(MAC_SERDES_CFG, val);
5468                 }
5469                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5470                         tw32(MAC_SERDES_CFG, 0x616000);
5471         }
5472
5473         /* Prevent chip from dropping frames when flow control
5474          * is enabled.
5475          */
5476         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5477
5478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5479             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5480                 /* Use hardware link auto-negotiation */
5481                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5482         }
5483
5484         err = tg3_setup_phy(tp, 1);
5485         if (err)
5486                 return err;
5487
5488         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5489                 u32 tmp;
5490
5491                 /* Clear CRC stats. */
5492                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5493                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5494                         tg3_readphy(tp, 0x14, &tmp);
5495                 }
5496         }
5497
5498         __tg3_set_rx_mode(tp->dev);
5499
5500         /* Initialize receive rules. */
5501         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5502         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5503         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5504         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5505
5506         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5507                 limit = 8;
5508         else
5509                 limit = 16;
5510         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5511                 limit -= 4;
5512         switch (limit) {
5513         case 16:
5514                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5515         case 15:
5516                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5517         case 14:
5518                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5519         case 13:
5520                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5521         case 12:
5522                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5523         case 11:
5524                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5525         case 10:
5526                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5527         case 9:
5528                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5529         case 8:
5530                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5531         case 7:
5532                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5533         case 6:
5534                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5535         case 5:
5536                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5537         case 4:
5538                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5539         case 3:
5540                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5541         case 2:
5542         case 1:
5543
5544         default:
5545                 break;
5546         };
5547
5548         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5549
5550         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5551                 tg3_enable_ints(tp);
5552
5553         return 0;
5554 }
5555
5556 /* Called at device open time to get the chip ready for
5557  * packet processing.  Invoked with tp->lock held.
5558  */
5559 static int tg3_init_hw(struct tg3 *tp)
5560 {
5561         int err;
5562
5563         /* Force the chip into D0. */
5564         err = tg3_set_power_state(tp, 0);
5565         if (err)
5566                 goto out;
5567
5568         tg3_switch_clocks(tp);
5569
5570         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5571
5572         err = tg3_reset_hw(tp);
5573
5574 out:
5575         return err;
5576 }
5577
5578 #define TG3_STAT_ADD32(PSTAT, REG) \
5579 do {    u32 __val = tr32(REG); \
5580         (PSTAT)->low += __val; \
5581         if ((PSTAT)->low < __val) \
5582                 (PSTAT)->high += 1; \
5583 } while (0)
5584
5585 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5586 {
5587         struct tg3_hw_stats *sp = tp->hw_stats;
5588
5589         if (!netif_carrier_ok(tp->dev))
5590                 return;
5591
5592         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5593         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5594         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5595         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5596         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5597         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5598         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5599         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5600         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5601         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5602         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5603         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5604         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5605
5606         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5607         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5608         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5609         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5610         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5611         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5612         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5613         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5614         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5615         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5616         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5617         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5618         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5619         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5620 }
5621
5622 static void tg3_timer(unsigned long __opaque)
5623 {
5624         struct tg3 *tp = (struct tg3 *) __opaque;
5625         unsigned long flags;
5626
5627         spin_lock_irqsave(&tp->lock, flags);
5628         spin_lock(&tp->tx_lock);
5629
5630         /* All of this garbage is because when using non-tagged
5631          * IRQ status the mailbox/status_block protocol the chip
5632          * uses with the cpu is race prone.
5633          */
5634         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5635                 tw32(GRC_LOCAL_CTRL,
5636                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5637         } else {
5638                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5639                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5640         }
5641
5642         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5643                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5644                 spin_unlock(&tp->tx_lock);
5645                 spin_unlock_irqrestore(&tp->lock, flags);
5646                 schedule_work(&tp->reset_task);
5647                 return;
5648         }
5649
5650         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5651                 tg3_periodic_fetch_stats(tp);
5652
5653         /* This part only runs once per second. */
5654         if (!--tp->timer_counter) {
5655                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5656                         u32 mac_stat;
5657                         int phy_event;
5658
5659                         mac_stat = tr32(MAC_STATUS);
5660
5661                         phy_event = 0;
5662                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5663                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5664                                         phy_event = 1;
5665                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5666                                 phy_event = 1;
5667
5668                         if (phy_event)
5669                                 tg3_setup_phy(tp, 0);
5670                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5671                         u32 mac_stat = tr32(MAC_STATUS);
5672                         int need_setup = 0;
5673
5674                         if (netif_carrier_ok(tp->dev) &&
5675                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5676                                 need_setup = 1;
5677                         }
5678                         if (! netif_carrier_ok(tp->dev) &&
5679                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5680                                          MAC_STATUS_SIGNAL_DET))) {
5681                                 need_setup = 1;
5682                         }
5683                         if (need_setup) {
5684                                 tw32_f(MAC_MODE,
5685                                      (tp->mac_mode &
5686                                       ~MAC_MODE_PORT_MODE_MASK));
5687                                 udelay(40);
5688                                 tw32_f(MAC_MODE, tp->mac_mode);
5689                                 udelay(40);
5690                                 tg3_setup_phy(tp, 0);
5691                         }
5692                 }
5693
5694                 tp->timer_counter = tp->timer_multiplier;
5695         }
5696
5697         /* Heartbeat is only sent once every 120 seconds.  */
5698         if (!--tp->asf_counter) {
5699                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5700                         u32 val;
5701
5702                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5703                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5704                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5705                         val = tr32(GRC_RX_CPU_EVENT);
5706                         val |= (1 << 14);
5707                         tw32(GRC_RX_CPU_EVENT, val);
5708                 }
5709                 tp->asf_counter = tp->asf_multiplier;
5710         }
5711
5712         spin_unlock(&tp->tx_lock);
5713         spin_unlock_irqrestore(&tp->lock, flags);
5714
5715         tp->timer.expires = jiffies + tp->timer_offset;
5716         add_timer(&tp->timer);
5717 }
5718
5719 static int tg3_open(struct net_device *dev)
5720 {
5721         struct tg3 *tp = netdev_priv(dev);
5722         int err;
5723
5724         spin_lock_irq(&tp->lock);
5725         spin_lock(&tp->tx_lock);
5726
5727         tg3_disable_ints(tp);
5728         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5729
5730         spin_unlock(&tp->tx_lock);
5731         spin_unlock_irq(&tp->lock);
5732
5733         /* The placement of this call is tied
5734          * to the setup and use of Host TX descriptors.
5735          */
5736         err = tg3_alloc_consistent(tp);
5737         if (err)
5738                 return err;
5739
5740         err = request_irq(dev->irq, tg3_interrupt,
5741                           SA_SHIRQ, dev->name, dev);
5742
5743         if (err) {
5744                 tg3_free_consistent(tp);
5745                 return err;
5746         }
5747
5748         spin_lock_irq(&tp->lock);
5749         spin_lock(&tp->tx_lock);
5750
5751         err = tg3_init_hw(tp);
5752         if (err) {
5753                 tg3_halt(tp);
5754                 tg3_free_rings(tp);
5755         } else {
5756                 tp->timer_offset = HZ / 10;
5757                 tp->timer_counter = tp->timer_multiplier = 10;
5758                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5759
5760                 init_timer(&tp->timer);
5761                 tp->timer.expires = jiffies + tp->timer_offset;
5762                 tp->timer.data = (unsigned long) tp;
5763                 tp->timer.function = tg3_timer;
5764                 add_timer(&tp->timer);
5765
5766                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5767         }
5768
5769         spin_unlock(&tp->tx_lock);
5770         spin_unlock_irq(&tp->lock);
5771
5772         if (err) {
5773                 free_irq(dev->irq, dev);
5774                 tg3_free_consistent(tp);
5775                 return err;
5776         }
5777
5778         spin_lock_irq(&tp->lock);
5779         spin_lock(&tp->tx_lock);
5780
5781         tg3_enable_ints(tp);
5782
5783         spin_unlock(&tp->tx_lock);
5784         spin_unlock_irq(&tp->lock);
5785
5786         netif_start_queue(dev);
5787
5788         return 0;
5789 }
5790
5791 #if 0
5792 /*static*/ void tg3_dump_state(struct tg3 *tp)
5793 {
5794         u32 val32, val32_2, val32_3, val32_4, val32_5;
5795         u16 val16;
5796         int i;
5797
5798         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5799         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5800         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5801                val16, val32);
5802
5803         /* MAC block */
5804         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5805                tr32(MAC_MODE), tr32(MAC_STATUS));
5806         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5807                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5808         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5809                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5810         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5811                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5812
5813         /* Send data initiator control block */
5814         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5815                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5816         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5817                tr32(SNDDATAI_STATSCTRL));
5818
5819         /* Send data completion control block */
5820         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5821
5822         /* Send BD ring selector block */
5823         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5824                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5825
5826         /* Send BD initiator control block */
5827         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5828                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5829
5830         /* Send BD completion control block */
5831         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5832
5833         /* Receive list placement control block */
5834         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5835                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5836         printk("       RCVLPC_STATSCTRL[%08x]\n",
5837                tr32(RCVLPC_STATSCTRL));
5838
5839         /* Receive data and receive BD initiator control block */
5840         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5841                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5842
5843         /* Receive data completion control block */
5844         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5845                tr32(RCVDCC_MODE));
5846
5847         /* Receive BD initiator control block */
5848         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5849                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5850
5851         /* Receive BD completion control block */
5852         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5853                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5854
5855         /* Receive list selector control block */
5856         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5857                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5858
5859         /* Mbuf cluster free block */
5860         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5861                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5862
5863         /* Host coalescing control block */
5864         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5865                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5866         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5867                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5868                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5869         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5870                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5871                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5872         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5873                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5874         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5875                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5876
5877         /* Memory arbiter control block */
5878         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5879                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5880
5881         /* Buffer manager control block */
5882         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5883                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5884         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5885                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5886         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5887                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5888                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5889                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5890
5891         /* Read DMA control block */
5892         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5893                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5894
5895         /* Write DMA control block */
5896         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5897                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5898
5899         /* DMA completion block */
5900         printk("DEBUG: DMAC_MODE[%08x]\n",
5901                tr32(DMAC_MODE));
5902
5903         /* GRC block */
5904         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5905                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5906         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5907                tr32(GRC_LOCAL_CTRL));
5908
5909         /* TG3_BDINFOs */
5910         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5911                tr32(RCVDBDI_JUMBO_BD + 0x0),
5912                tr32(RCVDBDI_JUMBO_BD + 0x4),
5913                tr32(RCVDBDI_JUMBO_BD + 0x8),
5914                tr32(RCVDBDI_JUMBO_BD + 0xc));
5915         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5916                tr32(RCVDBDI_STD_BD + 0x0),
5917                tr32(RCVDBDI_STD_BD + 0x4),
5918                tr32(RCVDBDI_STD_BD + 0x8),
5919                tr32(RCVDBDI_STD_BD + 0xc));
5920         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5921                tr32(RCVDBDI_MINI_BD + 0x0),
5922                tr32(RCVDBDI_MINI_BD + 0x4),
5923                tr32(RCVDBDI_MINI_BD + 0x8),
5924                tr32(RCVDBDI_MINI_BD + 0xc));
5925
5926         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5927         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5928         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5929         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5930         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5931                val32, val32_2, val32_3, val32_4);
5932
5933         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5934         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5935         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5936         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5937         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5938                val32, val32_2, val32_3, val32_4);
5939
5940         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5941         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5942         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5943         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5944         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5945         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5946                val32, val32_2, val32_3, val32_4, val32_5);
5947
5948         /* SW status block */
5949         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5950                tp->hw_status->status,
5951                tp->hw_status->status_tag,
5952                tp->hw_status->rx_jumbo_consumer,
5953                tp->hw_status->rx_consumer,
5954                tp->hw_status->rx_mini_consumer,
5955                tp->hw_status->idx[0].rx_producer,
5956                tp->hw_status->idx[0].tx_consumer);
5957
5958         /* SW statistics block */
5959         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5960                ((u32 *)tp->hw_stats)[0],
5961                ((u32 *)tp->hw_stats)[1],
5962                ((u32 *)tp->hw_stats)[2],
5963                ((u32 *)tp->hw_stats)[3]);
5964
5965         /* Mailboxes */
5966         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5967                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5968                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5969                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5970                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5971
5972         /* NIC side send descriptors. */
5973         for (i = 0; i < 6; i++) {
5974                 unsigned long txd;
5975
5976                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5977                         + (i * sizeof(struct tg3_tx_buffer_desc));
5978                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5979                        i,
5980                        readl(txd + 0x0), readl(txd + 0x4),
5981                        readl(txd + 0x8), readl(txd + 0xc));
5982         }
5983
5984         /* NIC side RX descriptors. */
5985         for (i = 0; i < 6; i++) {
5986                 unsigned long rxd;
5987
5988                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5989                         + (i * sizeof(struct tg3_rx_buffer_desc));
5990                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5991                        i,
5992                        readl(rxd + 0x0), readl(rxd + 0x4),
5993                        readl(rxd + 0x8), readl(rxd + 0xc));
5994                 rxd += (4 * sizeof(u32));
5995                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5996                        i,
5997                        readl(rxd + 0x0), readl(rxd + 0x4),
5998                        readl(rxd + 0x8), readl(rxd + 0xc));
5999         }
6000
6001         for (i = 0; i < 6; i++) {
6002                 unsigned long rxd;
6003
6004                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6005                         + (i * sizeof(struct tg3_rx_buffer_desc));
6006                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6007                        i,
6008                        readl(rxd + 0x0), readl(rxd + 0x4),
6009                        readl(rxd + 0x8), readl(rxd + 0xc));
6010                 rxd += (4 * sizeof(u32));
6011                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6012                        i,
6013                        readl(rxd + 0x0), readl(rxd + 0x4),
6014                        readl(rxd + 0x8), readl(rxd + 0xc));
6015         }
6016 }
6017 #endif
6018
6019 static struct net_device_stats *tg3_get_stats(struct net_device *);
6020 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6021
6022 static int tg3_close(struct net_device *dev)
6023 {
6024         struct tg3 *tp = netdev_priv(dev);
6025
6026         netif_stop_queue(dev);
6027
6028         del_timer_sync(&tp->timer);
6029
6030         spin_lock_irq(&tp->lock);
6031         spin_lock(&tp->tx_lock);
6032 #if 0
6033         tg3_dump_state(tp);
6034 #endif
6035
6036         tg3_disable_ints(tp);
6037
6038         tg3_halt(tp);
6039         tg3_free_rings(tp);
6040         tp->tg3_flags &=
6041                 ~(TG3_FLAG_INIT_COMPLETE |
6042                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6043         netif_carrier_off(tp->dev);
6044
6045         spin_unlock(&tp->tx_lock);
6046         spin_unlock_irq(&tp->lock);
6047
6048         free_irq(dev->irq, dev);
6049
6050         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6051                sizeof(tp->net_stats_prev));
6052         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6053                sizeof(tp->estats_prev));
6054
6055         tg3_free_consistent(tp);
6056
6057         return 0;
6058 }
6059
6060 static inline unsigned long get_stat64(tg3_stat64_t *val)
6061 {
6062         unsigned long ret;
6063
6064 #if (BITS_PER_LONG == 32)
6065         ret = val->low;
6066 #else
6067         ret = ((u64)val->high << 32) | ((u64)val->low);
6068 #endif
6069         return ret;
6070 }
6071
6072 static unsigned long calc_crc_errors(struct tg3 *tp)
6073 {
6074         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6075
6076         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6078              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6079                 unsigned long flags;
6080                 u32 val;
6081
6082                 spin_lock_irqsave(&tp->lock, flags);
6083                 if (!tg3_readphy(tp, 0x1e, &val)) {
6084                         tg3_writephy(tp, 0x1e, val | 0x8000);
6085                         tg3_readphy(tp, 0x14, &val);
6086                 } else
6087                         val = 0;
6088                 spin_unlock_irqrestore(&tp->lock, flags);
6089
6090                 tp->phy_crc_errors += val;
6091
6092                 return tp->phy_crc_errors;
6093         }
6094
6095         return get_stat64(&hw_stats->rx_fcs_errors);
6096 }
6097
6098 #define ESTAT_ADD(member) \
6099         estats->member =        old_estats->member + \
6100                                 get_stat64(&hw_stats->member)
6101
6102 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6103 {
6104         struct tg3_ethtool_stats *estats = &tp->estats;
6105         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6106         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6107
6108         if (!hw_stats)
6109                 return old_estats;
6110
6111         ESTAT_ADD(rx_octets);
6112         ESTAT_ADD(rx_fragments);
6113         ESTAT_ADD(rx_ucast_packets);
6114         ESTAT_ADD(rx_mcast_packets);
6115         ESTAT_ADD(rx_bcast_packets);
6116         ESTAT_ADD(rx_fcs_errors);
6117         ESTAT_ADD(rx_align_errors);
6118         ESTAT_ADD(rx_xon_pause_rcvd);
6119         ESTAT_ADD(rx_xoff_pause_rcvd);
6120         ESTAT_ADD(rx_mac_ctrl_rcvd);
6121         ESTAT_ADD(rx_xoff_entered);
6122         ESTAT_ADD(rx_frame_too_long_errors);
6123         ESTAT_ADD(rx_jabbers);
6124         ESTAT_ADD(rx_undersize_packets);
6125         ESTAT_ADD(rx_in_length_errors);
6126         ESTAT_ADD(rx_out_length_errors);
6127         ESTAT_ADD(rx_64_or_less_octet_packets);
6128         ESTAT_ADD(rx_65_to_127_octet_packets);
6129         ESTAT_ADD(rx_128_to_255_octet_packets);
6130         ESTAT_ADD(rx_256_to_511_octet_packets);
6131         ESTAT_ADD(rx_512_to_1023_octet_packets);
6132         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6133         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6134         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6135         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6136         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6137
6138         ESTAT_ADD(tx_octets);
6139         ESTAT_ADD(tx_collisions);
6140         ESTAT_ADD(tx_xon_sent);
6141         ESTAT_ADD(tx_xoff_sent);
6142         ESTAT_ADD(tx_flow_control);
6143         ESTAT_ADD(tx_mac_errors);
6144         ESTAT_ADD(tx_single_collisions);
6145         ESTAT_ADD(tx_mult_collisions);
6146         ESTAT_ADD(tx_deferred);
6147         ESTAT_ADD(tx_excessive_collisions);
6148         ESTAT_ADD(tx_late_collisions);
6149         ESTAT_ADD(tx_collide_2times);
6150         ESTAT_ADD(tx_collide_3times);
6151         ESTAT_ADD(tx_collide_4times);
6152         ESTAT_ADD(tx_collide_5times);
6153         ESTAT_ADD(tx_collide_6times);
6154         ESTAT_ADD(tx_collide_7times);
6155         ESTAT_ADD(tx_collide_8times);
6156         ESTAT_ADD(tx_collide_9times);
6157         ESTAT_ADD(tx_collide_10times);
6158         ESTAT_ADD(tx_collide_11times);
6159         ESTAT_ADD(tx_collide_12times);
6160         ESTAT_ADD(tx_collide_13times);
6161         ESTAT_ADD(tx_collide_14times);
6162         ESTAT_ADD(tx_collide_15times);
6163         ESTAT_ADD(tx_ucast_packets);
6164         ESTAT_ADD(tx_mcast_packets);
6165         ESTAT_ADD(tx_bcast_packets);
6166         ESTAT_ADD(tx_carrier_sense_errors);
6167         ESTAT_ADD(tx_discards);
6168         ESTAT_ADD(tx_errors);
6169
6170         ESTAT_ADD(dma_writeq_full);
6171         ESTAT_ADD(dma_write_prioq_full);
6172         ESTAT_ADD(rxbds_empty);
6173         ESTAT_ADD(rx_discards);
6174         ESTAT_ADD(rx_errors);
6175         ESTAT_ADD(rx_threshold_hit);
6176
6177         ESTAT_ADD(dma_readq_full);
6178         ESTAT_ADD(dma_read_prioq_full);
6179         ESTAT_ADD(tx_comp_queue_full);
6180
6181         ESTAT_ADD(ring_set_send_prod_index);
6182         ESTAT_ADD(ring_status_update);
6183         ESTAT_ADD(nic_irqs);
6184         ESTAT_ADD(nic_avoided_irqs);
6185         ESTAT_ADD(nic_tx_threshold_hit);
6186
6187         return estats;
6188 }
6189
6190 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6191 {
6192         struct tg3 *tp = netdev_priv(dev);
6193         struct net_device_stats *stats = &tp->net_stats;
6194         struct net_device_stats *old_stats = &tp->net_stats_prev;
6195         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6196
6197         if (!hw_stats)
6198                 return old_stats;
6199
6200         stats->rx_packets = old_stats->rx_packets +
6201                 get_stat64(&hw_stats->rx_ucast_packets) +
6202                 get_stat64(&hw_stats->rx_mcast_packets) +
6203                 get_stat64(&hw_stats->rx_bcast_packets);
6204                 
6205         stats->tx_packets = old_stats->tx_packets +
6206                 get_stat64(&hw_stats->tx_ucast_packets) +
6207                 get_stat64(&hw_stats->tx_mcast_packets) +
6208                 get_stat64(&hw_stats->tx_bcast_packets);
6209
6210         stats->rx_bytes = old_stats->rx_bytes +
6211                 get_stat64(&hw_stats->rx_octets);
6212         stats->tx_bytes = old_stats->tx_bytes +
6213                 get_stat64(&hw_stats->tx_octets);
6214
6215         stats->rx_errors = old_stats->rx_errors +
6216                 get_stat64(&hw_stats->rx_errors) +
6217                 get_stat64(&hw_stats->rx_discards);
6218         stats->tx_errors = old_stats->tx_errors +
6219                 get_stat64(&hw_stats->tx_errors) +
6220                 get_stat64(&hw_stats->tx_mac_errors) +
6221                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6222                 get_stat64(&hw_stats->tx_discards);
6223
6224         stats->multicast = old_stats->multicast +
6225                 get_stat64(&hw_stats->rx_mcast_packets);
6226         stats->collisions = old_stats->collisions +
6227                 get_stat64(&hw_stats->tx_collisions);
6228
6229         stats->rx_length_errors = old_stats->rx_length_errors +
6230                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6231                 get_stat64(&hw_stats->rx_undersize_packets);
6232
6233         stats->rx_over_errors = old_stats->rx_over_errors +
6234                 get_stat64(&hw_stats->rxbds_empty);
6235         stats->rx_frame_errors = old_stats->rx_frame_errors +
6236                 get_stat64(&hw_stats->rx_align_errors);
6237         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6238                 get_stat64(&hw_stats->tx_discards);
6239         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6240                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6241
6242         stats->rx_crc_errors = old_stats->rx_crc_errors +
6243                 calc_crc_errors(tp);
6244
6245         return stats;
6246 }
6247
6248 static inline u32 calc_crc(unsigned char *buf, int len)
6249 {
6250         u32 reg;
6251         u32 tmp;
6252         int j, k;
6253
6254         reg = 0xffffffff;
6255
6256         for (j = 0; j < len; j++) {
6257                 reg ^= buf[j];
6258
6259                 for (k = 0; k < 8; k++) {
6260                         tmp = reg & 0x01;
6261
6262                         reg >>= 1;
6263
6264                         if (tmp) {
6265                                 reg ^= 0xedb88320;
6266                         }
6267                 }
6268         }
6269
6270         return ~reg;
6271 }
6272
6273 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6274 {
6275         /* accept or reject all multicast frames */
6276         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6277         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6278         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6279         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6280 }
6281
6282 static void __tg3_set_rx_mode(struct net_device *dev)
6283 {
6284         struct tg3 *tp = netdev_priv(dev);
6285         u32 rx_mode;
6286
6287         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6288                                   RX_MODE_KEEP_VLAN_TAG);
6289
6290         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6291          * flag clear.
6292          */
6293 #if TG3_VLAN_TAG_USED
6294         if (!tp->vlgrp &&
6295             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6296                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6297 #else
6298         /* By definition, VLAN is disabled always in this
6299          * case.
6300          */
6301         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6302                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6303 #endif
6304
6305         if (dev->flags & IFF_PROMISC) {
6306                 /* Promiscuous mode. */
6307                 rx_mode |= RX_MODE_PROMISC;
6308         } else if (dev->flags & IFF_ALLMULTI) {
6309                 /* Accept all multicast. */
6310                 tg3_set_multi (tp, 1);
6311         } else if (dev->mc_count < 1) {
6312                 /* Reject all multicast. */
6313                 tg3_set_multi (tp, 0);
6314         } else {
6315                 /* Accept one or more multicast(s). */
6316                 struct dev_mc_list *mclist;
6317                 unsigned int i;
6318                 u32 mc_filter[4] = { 0, };
6319                 u32 regidx;
6320                 u32 bit;
6321                 u32 crc;
6322
6323                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6324                      i++, mclist = mclist->next) {
6325
6326                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6327                         bit = ~crc & 0x7f;
6328                         regidx = (bit & 0x60) >> 5;
6329                         bit &= 0x1f;
6330                         mc_filter[regidx] |= (1 << bit);
6331                 }
6332
6333                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6334                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6335                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6336                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6337         }
6338
6339         if (rx_mode != tp->rx_mode) {
6340                 tp->rx_mode = rx_mode;
6341                 tw32_f(MAC_RX_MODE, rx_mode);
6342                 udelay(10);
6343         }
6344 }
6345
6346 static void tg3_set_rx_mode(struct net_device *dev)
6347 {
6348         struct tg3 *tp = netdev_priv(dev);
6349
6350         spin_lock_irq(&tp->lock);
6351         spin_lock(&tp->tx_lock);
6352         __tg3_set_rx_mode(dev);
6353         spin_unlock(&tp->tx_lock);
6354         spin_unlock_irq(&tp->lock);
6355 }
6356
6357 #define TG3_REGDUMP_LEN         (32 * 1024)
6358
6359 static int tg3_get_regs_len(struct net_device *dev)
6360 {
6361         return TG3_REGDUMP_LEN;
6362 }
6363
6364 static void tg3_get_regs(struct net_device *dev,
6365                 struct ethtool_regs *regs, void *_p)
6366 {
6367         u32 *p = _p;
6368         struct tg3 *tp = netdev_priv(dev);
6369         u8 *orig_p = _p;
6370         int i;
6371
6372         regs->version = 0;
6373
6374         memset(p, 0, TG3_REGDUMP_LEN);
6375
6376         spin_lock_irq(&tp->lock);
6377         spin_lock(&tp->tx_lock);
6378
6379 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6380 #define GET_REG32_LOOP(base,len)                \
6381 do {    p = (u32 *)(orig_p + (base));           \
6382         for (i = 0; i < len; i += 4)            \
6383                 __GET_REG32((base) + i);        \
6384 } while (0)
6385 #define GET_REG32_1(reg)                        \
6386 do {    p = (u32 *)(orig_p + (reg));            \
6387         __GET_REG32((reg));                     \
6388 } while (0)
6389
6390         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6391         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6392         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6393         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6394         GET_REG32_1(SNDDATAC_MODE);
6395         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6396         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6397         GET_REG32_1(SNDBDC_MODE);
6398         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6399         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6400         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6401         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6402         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6403         GET_REG32_1(RCVDCC_MODE);
6404         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6405         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6406         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6407         GET_REG32_1(MBFREE_MODE);
6408         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6409         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6410         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6411         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6412         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6413         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6414         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6415         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6416         GET_REG32_LOOP(FTQ_RESET, 0x120);
6417         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6418         GET_REG32_1(DMAC_MODE);
6419         GET_REG32_LOOP(GRC_MODE, 0x4c);
6420         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6421                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6422
6423 #undef __GET_REG32
6424 #undef GET_REG32_LOOP
6425 #undef GET_REG32_1
6426
6427         spin_unlock(&tp->tx_lock);
6428         spin_unlock_irq(&tp->lock);
6429 }
6430
6431 static int tg3_get_eeprom_len(struct net_device *dev)
6432 {
6433         struct tg3 *tp = netdev_priv(dev);
6434
6435         return tp->nvram_size;
6436 }
6437
6438 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6439
6440 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6441 {
6442         struct tg3 *tp = netdev_priv(dev);
6443         int ret;
6444         u8  *pd;
6445         u32 i, offset, len, val, b_offset, b_count;
6446
6447         offset = eeprom->offset;
6448         len = eeprom->len;
6449         eeprom->len = 0;
6450
6451         eeprom->magic = TG3_EEPROM_MAGIC;
6452
6453         if (offset & 3) {
6454                 /* adjustments to start on required 4 byte boundary */
6455                 b_offset = offset & 3;
6456                 b_count = 4 - b_offset;
6457                 if (b_count > len) {
6458                         /* i.e. offset=1 len=2 */
6459                         b_count = len;
6460                 }
6461                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6462                 if (ret)
6463                         return ret;
6464                 val = cpu_to_le32(val);
6465                 memcpy(data, ((char*)&val) + b_offset, b_count);
6466                 len -= b_count;
6467                 offset += b_count;
6468                 eeprom->len += b_count;
6469         }
6470
6471         /* read bytes upto the last 4 byte boundary */
6472         pd = &data[eeprom->len];
6473         for (i = 0; i < (len - (len & 3)); i += 4) {
6474                 ret = tg3_nvram_read(tp, offset + i, &val);
6475                 if (ret) {
6476                         eeprom->len += i;
6477                         return ret;
6478                 }
6479                 val = cpu_to_le32(val);
6480                 memcpy(pd + i, &val, 4);
6481         }
6482         eeprom->len += i;
6483
6484         if (len & 3) {
6485                 /* read last bytes not ending on 4 byte boundary */
6486                 pd = &data[eeprom->len];
6487                 b_count = len & 3;
6488                 b_offset = offset + len - b_count;
6489                 ret = tg3_nvram_read(tp, b_offset, &val);
6490                 if (ret)
6491                         return ret;
6492                 val = cpu_to_le32(val);
6493                 memcpy(pd, ((char*)&val), b_count);
6494                 eeprom->len += b_count;
6495         }
6496         return 0;
6497 }
6498
6499 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6500
6501 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6502 {
6503         struct tg3 *tp = netdev_priv(dev);
6504         int ret;
6505         u32 offset, len, b_offset, odd_len, start, end;
6506         u8 *buf;
6507
6508         if (eeprom->magic != TG3_EEPROM_MAGIC)
6509                 return -EINVAL;
6510
6511         offset = eeprom->offset;
6512         len = eeprom->len;
6513
6514         if ((b_offset = (offset & 3))) {
6515                 /* adjustments to start on required 4 byte boundary */
6516                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6517                 if (ret)
6518                         return ret;
6519                 start = cpu_to_le32(start);
6520                 len += b_offset;
6521                 offset &= ~3;
6522         }
6523
6524         odd_len = 0;
6525         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6526                 /* adjustments to end on required 4 byte boundary */
6527                 odd_len = 1;
6528                 len = (len + 3) & ~3;
6529                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6530                 if (ret)
6531                         return ret;
6532                 end = cpu_to_le32(end);
6533         }
6534
6535         buf = data;
6536         if (b_offset || odd_len) {
6537                 buf = kmalloc(len, GFP_KERNEL);
6538                 if (buf == 0)
6539                         return -ENOMEM;
6540                 if (b_offset)
6541                         memcpy(buf, &start, 4);
6542                 if (odd_len)
6543                         memcpy(buf+len-4, &end, 4);
6544                 memcpy(buf + b_offset, data, eeprom->len);
6545         }
6546
6547         ret = tg3_nvram_write_block(tp, offset, len, buf);
6548
6549         if (buf != data)
6550                 kfree(buf);
6551
6552         return ret;
6553 }
6554
6555 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6556 {
6557         struct tg3 *tp = netdev_priv(dev);
6558   
6559         cmd->supported = (SUPPORTED_Autoneg);
6560
6561         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6562                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6563                                    SUPPORTED_1000baseT_Full);
6564
6565         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6566                 cmd->supported |= (SUPPORTED_100baseT_Half |
6567                                   SUPPORTED_100baseT_Full |
6568                                   SUPPORTED_10baseT_Half |
6569                                   SUPPORTED_10baseT_Full |
6570                                   SUPPORTED_MII);
6571         else
6572                 cmd->supported |= SUPPORTED_FIBRE;
6573   
6574         cmd->advertising = tp->link_config.advertising;
6575         if (netif_running(dev)) {
6576                 cmd->speed = tp->link_config.active_speed;
6577                 cmd->duplex = tp->link_config.active_duplex;
6578         }
6579         cmd->port = 0;
6580         cmd->phy_address = PHY_ADDR;
6581         cmd->transceiver = 0;
6582         cmd->autoneg = tp->link_config.autoneg;
6583         cmd->maxtxpkt = 0;
6584         cmd->maxrxpkt = 0;
6585         return 0;
6586 }
6587   
6588 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6589 {
6590         struct tg3 *tp = netdev_priv(dev);
6591   
6592         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6593                 /* These are the only valid advertisement bits allowed.  */
6594                 if (cmd->autoneg == AUTONEG_ENABLE &&
6595                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6596                                           ADVERTISED_1000baseT_Full |
6597                                           ADVERTISED_Autoneg |
6598                                           ADVERTISED_FIBRE)))
6599                         return -EINVAL;
6600         }
6601
6602         spin_lock_irq(&tp->lock);
6603         spin_lock(&tp->tx_lock);
6604
6605         tp->link_config.autoneg = cmd->autoneg;
6606         if (cmd->autoneg == AUTONEG_ENABLE) {
6607                 tp->link_config.advertising = cmd->advertising;
6608                 tp->link_config.speed = SPEED_INVALID;
6609                 tp->link_config.duplex = DUPLEX_INVALID;
6610         } else {
6611                 tp->link_config.advertising = 0;
6612                 tp->link_config.speed = cmd->speed;
6613                 tp->link_config.duplex = cmd->duplex;
6614         }
6615   
6616         if (netif_running(dev))
6617                 tg3_setup_phy(tp, 1);
6618
6619         spin_unlock(&tp->tx_lock);
6620         spin_unlock_irq(&tp->lock);
6621   
6622         return 0;
6623 }
6624   
6625 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6626 {
6627         struct tg3 *tp = netdev_priv(dev);
6628   
6629         strcpy(info->driver, DRV_MODULE_NAME);
6630         strcpy(info->version, DRV_MODULE_VERSION);
6631         strcpy(info->bus_info, pci_name(tp->pdev));
6632 }
6633   
6634 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6635 {
6636         struct tg3 *tp = netdev_priv(dev);
6637   
6638         wol->supported = WAKE_MAGIC;
6639         wol->wolopts = 0;
6640         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6641                 wol->wolopts = WAKE_MAGIC;
6642         memset(&wol->sopass, 0, sizeof(wol->sopass));
6643 }
6644   
6645 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6646 {
6647         struct tg3 *tp = netdev_priv(dev);
6648   
6649         if (wol->wolopts & ~WAKE_MAGIC)
6650                 return -EINVAL;
6651         if ((wol->wolopts & WAKE_MAGIC) &&
6652             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6653             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6654                 return -EINVAL;
6655   
6656         spin_lock_irq(&tp->lock);
6657         if (wol->wolopts & WAKE_MAGIC)
6658                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6659         else
6660                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6661         spin_unlock_irq(&tp->lock);
6662   
6663         return 0;
6664 }
6665   
6666 static u32 tg3_get_msglevel(struct net_device *dev)
6667 {
6668         struct tg3 *tp = netdev_priv(dev);
6669         return tp->msg_enable;
6670 }
6671   
6672 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6673 {
6674         struct tg3 *tp = netdev_priv(dev);
6675         tp->msg_enable = value;
6676 }
6677   
6678 #if TG3_TSO_SUPPORT != 0
6679 static int tg3_set_tso(struct net_device *dev, u32 value)
6680 {
6681         struct tg3 *tp = netdev_priv(dev);
6682
6683         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6684                 if (value)
6685                         return -EINVAL;
6686                 return 0;
6687         }
6688         return ethtool_op_set_tso(dev, value);
6689 }
6690 #endif
6691   
6692 static int tg3_nway_reset(struct net_device *dev)
6693 {
6694         struct tg3 *tp = netdev_priv(dev);
6695         u32 bmcr;
6696         int r;
6697   
6698         if (!netif_running(dev))
6699                 return -EAGAIN;
6700
6701         spin_lock_irq(&tp->lock);
6702         r = -EINVAL;
6703         tg3_readphy(tp, MII_BMCR, &bmcr);
6704         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6705             (bmcr & BMCR_ANENABLE)) {
6706                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6707                 r = 0;
6708         }
6709         spin_unlock_irq(&tp->lock);
6710   
6711         return r;
6712 }
6713   
6714 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6715 {
6716         struct tg3 *tp = netdev_priv(dev);
6717   
6718         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6719         ering->rx_mini_max_pending = 0;
6720         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6721
6722         ering->rx_pending = tp->rx_pending;
6723         ering->rx_mini_pending = 0;
6724         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6725         ering->tx_pending = tp->tx_pending;
6726 }
6727   
6728 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6729 {
6730         struct tg3 *tp = netdev_priv(dev);
6731   
6732         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6733             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6734             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6735                 return -EINVAL;
6736   
6737         if (netif_running(dev))
6738                 tg3_netif_stop(tp);
6739
6740         spin_lock_irq(&tp->lock);
6741         spin_lock(&tp->tx_lock);
6742   
6743         tp->rx_pending = ering->rx_pending;
6744
6745         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6746             tp->rx_pending > 63)
6747                 tp->rx_pending = 63;
6748         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6749         tp->tx_pending = ering->tx_pending;
6750
6751         if (netif_running(dev)) {
6752                 tg3_halt(tp);
6753                 tg3_init_hw(tp);
6754                 tg3_netif_start(tp);
6755         }
6756
6757         spin_unlock(&tp->tx_lock);
6758         spin_unlock_irq(&tp->lock);
6759   
6760         return 0;
6761 }
6762   
6763 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6764 {
6765         struct tg3 *tp = netdev_priv(dev);
6766   
6767         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6768         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6769         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6770 }
6771   
6772 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6773 {
6774         struct tg3 *tp = netdev_priv(dev);
6775   
6776         if (netif_running(dev))
6777                 tg3_netif_stop(tp);
6778
6779         spin_lock_irq(&tp->lock);
6780         spin_lock(&tp->tx_lock);
6781         if (epause->autoneg)
6782                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6783         else
6784                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6785         if (epause->rx_pause)
6786                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6787         else
6788                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6789         if (epause->tx_pause)
6790                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6791         else
6792                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6793
6794         if (netif_running(dev)) {
6795                 tg3_halt(tp);
6796                 tg3_init_hw(tp);
6797                 tg3_netif_start(tp);
6798         }
6799         spin_unlock(&tp->tx_lock);
6800         spin_unlock_irq(&tp->lock);
6801   
6802         return 0;
6803 }
6804   
6805 static u32 tg3_get_rx_csum(struct net_device *dev)
6806 {
6807         struct tg3 *tp = netdev_priv(dev);
6808         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6809 }
6810   
6811 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6812 {
6813         struct tg3 *tp = netdev_priv(dev);
6814   
6815         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6816                 if (data != 0)
6817                         return -EINVAL;
6818                 return 0;
6819         }
6820   
6821         spin_lock_irq(&tp->lock);
6822         if (data)
6823                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6824         else
6825                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6826         spin_unlock_irq(&tp->lock);
6827   
6828         return 0;
6829 }
6830   
6831 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6832 {
6833         struct tg3 *tp = netdev_priv(dev);
6834   
6835         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6836                 if (data != 0)
6837                         return -EINVAL;
6838                 return 0;
6839         }
6840   
6841         if (data)
6842                 dev->features |= NETIF_F_IP_CSUM;
6843         else
6844                 dev->features &= ~NETIF_F_IP_CSUM;
6845
6846         return 0;
6847 }
6848
6849 static int tg3_get_stats_count (struct net_device *dev)
6850 {
6851         return TG3_NUM_STATS;
6852 }
6853
6854 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6855 {
6856         switch (stringset) {
6857         case ETH_SS_STATS:
6858                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6859                 break;
6860         default:
6861                 WARN_ON(1);     /* we need a WARN() */
6862                 break;
6863         }
6864 }
6865
6866 static void tg3_get_ethtool_stats (struct net_device *dev,
6867                                    struct ethtool_stats *estats, u64 *tmp_stats)
6868 {
6869         struct tg3 *tp = netdev_priv(dev);
6870         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6871 }
6872
6873 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6874 {
6875         struct mii_ioctl_data *data = if_mii(ifr);
6876         struct tg3 *tp = netdev_priv(dev);
6877         int err;
6878
6879         switch(cmd) {
6880         case SIOCGMIIPHY:
6881                 data->phy_id = PHY_ADDR;
6882
6883                 /* fallthru */
6884         case SIOCGMIIREG: {
6885                 u32 mii_regval;
6886
6887                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6888                         break;                  /* We have no PHY */
6889
6890                 spin_lock_irq(&tp->lock);
6891                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6892                 spin_unlock_irq(&tp->lock);
6893
6894                 data->val_out = mii_regval;
6895
6896                 return err;
6897         }
6898
6899         case SIOCSMIIREG:
6900                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6901                         break;                  /* We have no PHY */
6902
6903                 if (!capable(CAP_NET_ADMIN))
6904                         return -EPERM;
6905
6906                 spin_lock_irq(&tp->lock);
6907                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6908                 spin_unlock_irq(&tp->lock);
6909
6910                 return err;
6911
6912         default:
6913                 /* do nothing */
6914                 break;
6915         }
6916         return -EOPNOTSUPP;
6917 }
6918
6919 #if TG3_VLAN_TAG_USED
6920 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6921 {
6922         struct tg3 *tp = netdev_priv(dev);
6923
6924         spin_lock_irq(&tp->lock);
6925         spin_lock(&tp->tx_lock);
6926
6927         tp->vlgrp = grp;
6928
6929         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6930         __tg3_set_rx_mode(dev);
6931
6932         spin_unlock(&tp->tx_lock);
6933         spin_unlock_irq(&tp->lock);
6934 }
6935
6936 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6937 {
6938         struct tg3 *tp = netdev_priv(dev);
6939
6940         spin_lock_irq(&tp->lock);
6941         spin_lock(&tp->tx_lock);
6942         if (tp->vlgrp)
6943                 tp->vlgrp->vlan_devices[vid] = NULL;
6944         spin_unlock(&tp->tx_lock);
6945         spin_unlock_irq(&tp->lock);
6946 }
6947 #endif
6948
6949 static struct ethtool_ops tg3_ethtool_ops = {
6950         .get_settings           = tg3_get_settings,
6951         .set_settings           = tg3_set_settings,
6952         .get_drvinfo            = tg3_get_drvinfo,
6953         .get_regs_len           = tg3_get_regs_len,
6954         .get_regs               = tg3_get_regs,
6955         .get_wol                = tg3_get_wol,
6956         .set_wol                = tg3_set_wol,
6957         .get_msglevel           = tg3_get_msglevel,
6958         .set_msglevel           = tg3_set_msglevel,
6959         .nway_reset             = tg3_nway_reset,
6960         .get_link               = ethtool_op_get_link,
6961         .get_eeprom_len         = tg3_get_eeprom_len,
6962         .get_eeprom             = tg3_get_eeprom,
6963         .set_eeprom             = tg3_set_eeprom,
6964         .get_ringparam          = tg3_get_ringparam,
6965         .set_ringparam          = tg3_set_ringparam,
6966         .get_pauseparam         = tg3_get_pauseparam,
6967         .set_pauseparam         = tg3_set_pauseparam,
6968         .get_rx_csum            = tg3_get_rx_csum,
6969         .set_rx_csum            = tg3_set_rx_csum,
6970         .get_tx_csum            = ethtool_op_get_tx_csum,
6971         .set_tx_csum            = tg3_set_tx_csum,
6972         .get_sg                 = ethtool_op_get_sg,
6973         .set_sg                 = ethtool_op_set_sg,
6974 #if TG3_TSO_SUPPORT != 0
6975         .get_tso                = ethtool_op_get_tso,
6976         .set_tso                = tg3_set_tso,
6977 #endif
6978         .get_strings            = tg3_get_strings,
6979         .get_stats_count        = tg3_get_stats_count,
6980         .get_ethtool_stats      = tg3_get_ethtool_stats,
6981 };
6982
6983 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6984 {
6985         u32 cursize, val;
6986
6987         tp->nvram_size = EEPROM_CHIP_SIZE;
6988
6989         if (tg3_nvram_read(tp, 0, &val) != 0)
6990                 return;
6991
6992         if (swab32(val) != TG3_EEPROM_MAGIC)
6993                 return;
6994
6995         /*
6996          * Size the chip by reading offsets at increasing powers of two.
6997          * When we encounter our validation signature, we know the addressing
6998          * has wrapped around, and thus have our chip size.
6999          */
7000         cursize = 0x800;
7001
7002         while (cursize < tp->nvram_size) {
7003                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7004                         return;
7005
7006                 if (swab32(val) == TG3_EEPROM_MAGIC)
7007                         break;
7008
7009                 cursize <<= 1;
7010         }
7011
7012         tp->nvram_size = cursize;
7013 }
7014                 
7015 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7016 {
7017         u32 val;
7018
7019         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7020                 if (val != 0) {
7021                         tp->nvram_size = (val >> 16) * 1024;
7022                         return;
7023                 }
7024         }
7025         tp->nvram_size = 0x20000;
7026 }
7027
7028 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7029 {
7030         u32 nvcfg1;
7031
7032         nvcfg1 = tr32(NVRAM_CFG1);
7033         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7034                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7035         }
7036         else {
7037                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7038                 tw32(NVRAM_CFG1, nvcfg1);
7039         }
7040
7041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7043                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7044                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7045                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7046                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7047                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7048                                 break;
7049                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7050                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7051                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7052                                 break;
7053                         case FLASH_VENDOR_ATMEL_EEPROM:
7054                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7055                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7056                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7057                                 break;
7058                         case FLASH_VENDOR_ST:
7059                                 tp->nvram_jedecnum = JEDEC_ST;
7060                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7061                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7062                                 break;
7063                         case FLASH_VENDOR_SAIFUN:
7064                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7065                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7066                                 break;
7067                         case FLASH_VENDOR_SST_SMALL:
7068                         case FLASH_VENDOR_SST_LARGE:
7069                                 tp->nvram_jedecnum = JEDEC_SST;
7070                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7071                                 break;
7072                 }
7073         }
7074         else {
7075                 tp->nvram_jedecnum = JEDEC_ATMEL;
7076                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7077                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7078         }
7079 }
7080
7081 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7082 static void __devinit tg3_nvram_init(struct tg3 *tp)
7083 {
7084         int j;
7085
7086         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7087                 return;
7088
7089         tw32_f(GRC_EEPROM_ADDR,
7090              (EEPROM_ADDR_FSM_RESET |
7091               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7092                EEPROM_ADDR_CLKPERD_SHIFT)));
7093
7094         /* XXX schedule_timeout() ... */
7095         for (j = 0; j < 100; j++)
7096                 udelay(10);
7097
7098         /* Enable seeprom accesses. */
7099         tw32_f(GRC_LOCAL_CTRL,
7100              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7101         udelay(100);
7102
7103         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7104             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7105                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7106
7107                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7108                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7109                         u32 nvaccess = tr32(NVRAM_ACCESS);
7110
7111                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7112                 }
7113
7114                 tg3_get_nvram_info(tp);
7115                 tg3_get_nvram_size(tp);
7116
7117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7118                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7119                         u32 nvaccess = tr32(NVRAM_ACCESS);
7120
7121                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7122                 }
7123
7124         } else {
7125                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7126
7127                 tg3_get_eeprom_size(tp);
7128         }
7129 }
7130
7131 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7132                                         u32 offset, u32 *val)
7133 {
7134         u32 tmp;
7135         int i;
7136
7137         if (offset > EEPROM_ADDR_ADDR_MASK ||
7138             (offset % 4) != 0)
7139                 return -EINVAL;
7140
7141         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7142                                         EEPROM_ADDR_DEVID_MASK |
7143                                         EEPROM_ADDR_READ);
7144         tw32(GRC_EEPROM_ADDR,
7145              tmp |
7146              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7147              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7148               EEPROM_ADDR_ADDR_MASK) |
7149              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7150
7151         for (i = 0; i < 10000; i++) {
7152                 tmp = tr32(GRC_EEPROM_ADDR);
7153
7154                 if (tmp & EEPROM_ADDR_COMPLETE)
7155                         break;
7156                 udelay(100);
7157         }
7158         if (!(tmp & EEPROM_ADDR_COMPLETE))
7159                 return -EBUSY;
7160
7161         *val = tr32(GRC_EEPROM_DATA);
7162         return 0;
7163 }
7164
7165 #define NVRAM_CMD_TIMEOUT 10000
7166
7167 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7168 {
7169         int i;
7170
7171         tw32(NVRAM_CMD, nvram_cmd);
7172         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7173                 udelay(10);
7174                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7175                         udelay(10);
7176                         break;
7177                 }
7178         }
7179         if (i == NVRAM_CMD_TIMEOUT) {
7180                 return -EBUSY;
7181         }
7182         return 0;
7183 }
7184
7185 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7186 {
7187         int ret;
7188
7189         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7190                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7191                 return -EINVAL;
7192         }
7193
7194         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7195                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7196
7197         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7198                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7199                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7200
7201                 offset = ((offset / tp->nvram_pagesize) <<
7202                           ATMEL_AT45DB0X1B_PAGE_POS) +
7203                         (offset % tp->nvram_pagesize);
7204         }
7205
7206         if (offset > NVRAM_ADDR_MSK)
7207                 return -EINVAL;
7208
7209         tg3_nvram_lock(tp);
7210
7211         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7213                 u32 nvaccess = tr32(NVRAM_ACCESS);
7214
7215                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7216         }
7217
7218         tw32(NVRAM_ADDR, offset);
7219         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7220                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7221
7222         if (ret == 0)
7223                 *val = swab32(tr32(NVRAM_RDDATA));
7224
7225         tg3_nvram_unlock(tp);
7226
7227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7228             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7229                 u32 nvaccess = tr32(NVRAM_ACCESS);
7230
7231                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7232         }
7233
7234         return ret;
7235 }
7236
7237 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7238                                     u32 offset, u32 len, u8 *buf)
7239 {
7240         int i, j, rc = 0;
7241         u32 val;
7242
7243         for (i = 0; i < len; i += 4) {
7244                 u32 addr, data;
7245
7246                 addr = offset + i;
7247
7248                 memcpy(&data, buf + i, 4);
7249
7250                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7251
7252                 val = tr32(GRC_EEPROM_ADDR);
7253                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7254
7255                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7256                         EEPROM_ADDR_READ);
7257                 tw32(GRC_EEPROM_ADDR, val |
7258                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7259                         (addr & EEPROM_ADDR_ADDR_MASK) |
7260                         EEPROM_ADDR_START |
7261                         EEPROM_ADDR_WRITE);
7262                 
7263                 for (j = 0; j < 10000; j++) {
7264                         val = tr32(GRC_EEPROM_ADDR);
7265
7266                         if (val & EEPROM_ADDR_COMPLETE)
7267                                 break;
7268                         udelay(100);
7269                 }
7270                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7271                         rc = -EBUSY;
7272                         break;
7273                 }
7274         }
7275
7276         return rc;
7277 }
7278
7279 /* offset and length are dword aligned */
7280 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7281                 u8 *buf)
7282 {
7283         int ret = 0;
7284         u32 pagesize = tp->nvram_pagesize;
7285         u32 pagemask = pagesize - 1;
7286         u32 nvram_cmd;
7287         u8 *tmp;
7288
7289         tmp = kmalloc(pagesize, GFP_KERNEL);
7290         if (tmp == NULL)
7291                 return -ENOMEM;
7292
7293         while (len) {
7294                 int j;
7295                 u32 phy_addr, page_off, size, nvaccess;
7296
7297                 phy_addr = offset & ~pagemask;
7298         
7299                 for (j = 0; j < pagesize; j += 4) {
7300                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7301                                                 (u32 *) (tmp + j))))
7302                                 break;
7303                 }
7304                 if (ret)
7305                         break;
7306
7307                 page_off = offset & pagemask;
7308                 size = pagesize;
7309                 if (len < size)
7310                         size = len;
7311
7312                 len -= size;
7313
7314                 memcpy(tmp + page_off, buf, size);
7315
7316                 offset = offset + (pagesize - page_off);
7317
7318                 nvaccess = tr32(NVRAM_ACCESS);
7319                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7320
7321                 /*
7322                  * Before we can erase the flash page, we need
7323                  * to issue a special "write enable" command.
7324                  */
7325                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7326
7327                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7328                         break;
7329
7330                 /* Erase the target page */
7331                 tw32(NVRAM_ADDR, phy_addr);
7332
7333                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7334                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7335
7336                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7337                         break;
7338
7339                 /* Issue another write enable to start the write. */
7340                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7341
7342                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7343                         break;
7344
7345                 for (j = 0; j < pagesize; j += 4) {
7346                         u32 data;
7347
7348                         data = *((u32 *) (tmp + j));
7349                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7350
7351                         tw32(NVRAM_ADDR, phy_addr + j);
7352
7353                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7354                                 NVRAM_CMD_WR;
7355
7356                         if (j == 0)
7357                                 nvram_cmd |= NVRAM_CMD_FIRST;
7358                         else if (j == (pagesize - 4))
7359                                 nvram_cmd |= NVRAM_CMD_LAST;
7360
7361                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7362                                 break;
7363                 }
7364                 if (ret)
7365                         break;
7366         }
7367
7368         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7369         tg3_nvram_exec_cmd(tp, nvram_cmd);
7370
7371         kfree(tmp);
7372
7373         return ret;
7374 }
7375
7376 /* offset and length are dword aligned */
7377 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7378                 u8 *buf)
7379 {
7380         int i, ret = 0;
7381
7382         for (i = 0; i < len; i += 4, offset += 4) {
7383                 u32 data, page_off, phy_addr, nvram_cmd;
7384
7385                 memcpy(&data, buf + i, 4);
7386                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7387
7388                 page_off = offset % tp->nvram_pagesize;
7389
7390                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7391                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7392
7393                         phy_addr = ((offset / tp->nvram_pagesize) <<
7394                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7395                 }
7396                 else {
7397                         phy_addr = offset;
7398                 }
7399
7400                 tw32(NVRAM_ADDR, phy_addr);
7401
7402                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7403
7404                 if ((page_off == 0) || (i == 0))
7405                         nvram_cmd |= NVRAM_CMD_FIRST;
7406                 else if (page_off == (tp->nvram_pagesize - 4))
7407                         nvram_cmd |= NVRAM_CMD_LAST;
7408
7409                 if (i == (len - 4))
7410                         nvram_cmd |= NVRAM_CMD_LAST;
7411
7412                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7413                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7414
7415                         if ((ret = tg3_nvram_exec_cmd(tp,
7416                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7417                                 NVRAM_CMD_DONE)))
7418
7419                                 break;
7420                 }
7421                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7422                         /* We always do complete word writes to eeprom. */
7423                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7424                 }
7425
7426                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7427                         break;
7428         }
7429         return ret;
7430 }
7431
7432 /* offset and length are dword aligned */
7433 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7434 {
7435         int ret;
7436
7437         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7438                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7439                 return -EINVAL;
7440         }
7441
7442         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7443                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7444                        GRC_LCLCTRL_GPIO_OE1);
7445                 udelay(40);
7446         }
7447
7448         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7449                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7450         }
7451         else {
7452                 u32 grc_mode;
7453
7454                 tg3_nvram_lock(tp);
7455
7456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7457                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7458                         u32 nvaccess = tr32(NVRAM_ACCESS);
7459
7460                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7461
7462                         tw32(NVRAM_WRITE1, 0x406);
7463                 }
7464
7465                 grc_mode = tr32(GRC_MODE);
7466                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7467
7468                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7469                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7470
7471                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7472                                 buf);
7473                 }
7474                 else {
7475                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7476                                 buf);
7477                 }
7478
7479                 grc_mode = tr32(GRC_MODE);
7480                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7481
7482                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7483                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7484                         u32 nvaccess = tr32(NVRAM_ACCESS);
7485
7486                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7487                 }
7488                 tg3_nvram_unlock(tp);
7489         }
7490
7491         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7492                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7493                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7494                 udelay(40);
7495         }
7496
7497         return ret;
7498 }
7499
7500 struct subsys_tbl_ent {
7501         u16 subsys_vendor, subsys_devid;
7502         u32 phy_id;
7503 };
7504
7505 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7506         /* Broadcom boards. */
7507         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7508         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7509         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7510         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7511         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7512         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7513         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7514         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7515         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7516         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7517         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7518
7519         /* 3com boards. */
7520         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7521         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7522         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7523         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7524         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7525
7526         /* DELL boards. */
7527         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7528         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7529         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7530         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7531
7532         /* Compaq boards. */
7533         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7534         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7535         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7536         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7537         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7538
7539         /* IBM boards. */
7540         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7541 };
7542
7543 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7544 {
7545         int i;
7546
7547         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7548                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7549                      tp->pdev->subsystem_vendor) &&
7550                     (subsys_id_to_phy_id[i].subsys_devid ==
7551                      tp->pdev->subsystem_device))
7552                         return &subsys_id_to_phy_id[i];
7553         }
7554         return NULL;
7555 }
7556
7557 static int __devinit tg3_phy_probe(struct tg3 *tp)
7558 {
7559         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7560         u32 hw_phy_id, hw_phy_id_masked;
7561         u32 val;
7562         int eeprom_signature_found, eeprom_phy_serdes, err;
7563
7564         tp->phy_id = PHY_ID_INVALID;
7565         eeprom_phy_id = PHY_ID_INVALID;
7566         eeprom_phy_serdes = 0;
7567         eeprom_signature_found = 0;
7568         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7569         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7570                 u32 nic_cfg, led_cfg;
7571                 u32 nic_phy_id, ver, cfg2 = 0;
7572
7573                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7574                 tp->nic_sram_data_cfg = nic_cfg;
7575
7576                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7577                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7578                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7579                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7580                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7581                     (ver > 0) && (ver < 0x100))
7582                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7583
7584                 eeprom_signature_found = 1;
7585
7586                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7587                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7588                         eeprom_phy_serdes = 1;
7589
7590                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7591                 if (nic_phy_id != 0) {
7592                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7593                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7594
7595                         eeprom_phy_id  = (id1 >> 16) << 10;
7596                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7597                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7598                 } else
7599                         eeprom_phy_id = 0;
7600
7601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7602                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7603                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7604                                     SHASTA_EXT_LED_MODE_MASK);
7605                 } else
7606                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7607
7608                 switch (led_cfg) {
7609                 default:
7610                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7611                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7612                         break;
7613
7614                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7615                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7616                         break;
7617
7618                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7619                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7620                         break;
7621
7622                 case SHASTA_EXT_LED_SHARED:
7623                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7624                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7625                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7626                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7627                                                  LED_CTRL_MODE_PHY_2);
7628                         break;
7629
7630                 case SHASTA_EXT_LED_MAC:
7631                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7632                         break;
7633
7634                 case SHASTA_EXT_LED_COMBO:
7635                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7636                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7637                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7638                                                  LED_CTRL_MODE_PHY_2);
7639                         break;
7640
7641                 };
7642
7643                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7644                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7645                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7646                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7647
7648                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7649                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7650                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7651                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7652
7653                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7654                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7655                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7656                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7657                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7658                 }
7659                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7660                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7661
7662                 if (cfg2 & (1 << 17))
7663                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7664
7665                 /* serdes signal pre-emphasis in register 0x590 set by */
7666                 /* bootcode if bit 18 is set */
7667                 if (cfg2 & (1 << 18))
7668                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7669         }
7670
7671         /* Reading the PHY ID register can conflict with ASF
7672          * firwmare access to the PHY hardware.
7673          */
7674         err = 0;
7675         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7676                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7677         } else {
7678                 /* Now read the physical PHY_ID from the chip and verify
7679                  * that it is sane.  If it doesn't look good, we fall back
7680                  * to either the hard-coded table based PHY_ID and failing
7681                  * that the value found in the eeprom area.
7682                  */
7683                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7684                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7685
7686                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7687                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7688                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7689
7690                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7691         }
7692
7693         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7694                 tp->phy_id = hw_phy_id;
7695                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7696                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7697         } else {
7698                 if (eeprom_signature_found) {
7699                         tp->phy_id = eeprom_phy_id;
7700                         if (eeprom_phy_serdes)
7701                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7702                 } else {
7703                         struct subsys_tbl_ent *p;
7704
7705                         /* No eeprom signature?  Try the hardcoded
7706                          * subsys device table.
7707                          */
7708                         p = lookup_by_subsys(tp);
7709                         if (!p)
7710                                 return -ENODEV;
7711
7712                         tp->phy_id = p->phy_id;
7713                         if (!tp->phy_id ||
7714                             tp->phy_id == PHY_ID_BCM8002)
7715                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7716                 }
7717         }
7718
7719         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7720             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7721                 u32 bmsr, adv_reg, tg3_ctrl;
7722
7723                 tg3_readphy(tp, MII_BMSR, &bmsr);
7724                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7725                     (bmsr & BMSR_LSTATUS))
7726                         goto skip_phy_reset;
7727                     
7728                 err = tg3_phy_reset(tp);
7729                 if (err)
7730                         return err;
7731
7732                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7733                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7734                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7735                 tg3_ctrl = 0;
7736                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7737                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7738                                     MII_TG3_CTRL_ADV_1000_FULL);
7739                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7740                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7741                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7742                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7743                 }
7744
7745                 if (!tg3_copper_is_advertising_all(tp)) {
7746                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7747
7748                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7749                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7750
7751                         tg3_writephy(tp, MII_BMCR,
7752                                      BMCR_ANENABLE | BMCR_ANRESTART);
7753                 }
7754                 tg3_phy_set_wirespeed(tp);
7755
7756                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7757                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7758                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7759         }
7760
7761 skip_phy_reset:
7762         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7763                 err = tg3_init_5401phy_dsp(tp);
7764                 if (err)
7765                         return err;
7766         }
7767
7768         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7769                 err = tg3_init_5401phy_dsp(tp);
7770         }
7771
7772         if (!eeprom_signature_found)
7773                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7774
7775         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7776                 tp->link_config.advertising =
7777                         (ADVERTISED_1000baseT_Half |
7778                          ADVERTISED_1000baseT_Full |
7779                          ADVERTISED_Autoneg |
7780                          ADVERTISED_FIBRE);
7781         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7782                 tp->link_config.advertising &=
7783                         ~(ADVERTISED_1000baseT_Half |
7784                           ADVERTISED_1000baseT_Full);
7785
7786         return err;
7787 }
7788
7789 static void __devinit tg3_read_partno(struct tg3 *tp)
7790 {
7791         unsigned char vpd_data[256];
7792         int i;
7793
7794         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7795                 /* Sun decided not to put the necessary bits in the
7796                  * NVRAM of their onboard tg3 parts :(
7797                  */
7798                 strcpy(tp->board_part_number, "Sun 570X");
7799                 return;
7800         }
7801
7802         for (i = 0; i < 256; i += 4) {
7803                 u32 tmp;
7804
7805                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7806                         goto out_not_found;
7807
7808                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7809                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7810                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7811                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7812         }
7813
7814         /* Now parse and find the part number. */
7815         for (i = 0; i < 256; ) {
7816                 unsigned char val = vpd_data[i];
7817                 int block_end;
7818
7819                 if (val == 0x82 || val == 0x91) {
7820                         i = (i + 3 +
7821                              (vpd_data[i + 1] +
7822                               (vpd_data[i + 2] << 8)));
7823                         continue;
7824                 }
7825
7826                 if (val != 0x90)
7827                         goto out_not_found;
7828
7829                 block_end = (i + 3 +
7830                              (vpd_data[i + 1] +
7831                               (vpd_data[i + 2] << 8)));
7832                 i += 3;
7833                 while (i < block_end) {
7834                         if (vpd_data[i + 0] == 'P' &&
7835                             vpd_data[i + 1] == 'N') {
7836                                 int partno_len = vpd_data[i + 2];
7837
7838                                 if (partno_len > 24)
7839                                         goto out_not_found;
7840
7841                                 memcpy(tp->board_part_number,
7842                                        &vpd_data[i + 3],
7843                                        partno_len);
7844
7845                                 /* Success. */
7846                                 return;
7847                         }
7848                 }
7849
7850                 /* Part number not found. */
7851                 goto out_not_found;
7852         }
7853
7854 out_not_found:
7855         strcpy(tp->board_part_number, "none");
7856 }
7857
7858 #ifdef CONFIG_SPARC64
7859 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7860 {
7861         struct pci_dev *pdev = tp->pdev;
7862         struct pcidev_cookie *pcp = pdev->sysdata;
7863
7864         if (pcp != NULL) {
7865                 int node = pcp->prom_node;
7866                 u32 venid;
7867                 int err;
7868
7869                 err = prom_getproperty(node, "subsystem-vendor-id",
7870                                        (char *) &venid, sizeof(venid));
7871                 if (err == 0 || err == -1)
7872                         return 0;
7873                 if (venid == PCI_VENDOR_ID_SUN)
7874                         return 1;
7875         }
7876         return 0;
7877 }
7878 #endif
7879
7880 static int __devinit tg3_get_invariants(struct tg3 *tp)
7881 {
7882         static struct pci_device_id write_reorder_chipsets[] = {
7883                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7884                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7885                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7886                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7887                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7888                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7889                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7890                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7891                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7892                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7893                 { },
7894         };
7895         u32 misc_ctrl_reg;
7896         u32 cacheline_sz_reg;
7897         u32 pci_state_reg, grc_misc_cfg;
7898         u32 val;
7899         u16 pci_cmd;
7900         int err;
7901
7902 #ifdef CONFIG_SPARC64
7903         if (tg3_is_sun_570X(tp))
7904                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7905 #endif
7906
7907         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7908          * reordering to the mailbox registers done by the host
7909          * controller can cause major troubles.  We read back from
7910          * every mailbox register write to force the writes to be
7911          * posted to the chip in order.
7912          */
7913         if (pci_dev_present(write_reorder_chipsets))
7914                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7915
7916         /* Force memory write invalidate off.  If we leave it on,
7917          * then on 5700_BX chips we have to enable a workaround.
7918          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7919          * to match the cacheline size.  The Broadcom driver have this
7920          * workaround but turns MWI off all the times so never uses
7921          * it.  This seems to suggest that the workaround is insufficient.
7922          */
7923         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7924         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7925         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7926
7927         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7928          * has the register indirect write enable bit set before
7929          * we try to access any of the MMIO registers.  It is also
7930          * critical that the PCI-X hw workaround situation is decided
7931          * before that as well.
7932          */
7933         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7934                               &misc_ctrl_reg);
7935
7936         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7937                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7938
7939         /* Initialize misc host control in PCI block. */
7940         tp->misc_host_ctrl |= (misc_ctrl_reg &
7941                                MISC_HOST_CTRL_CHIPREV);
7942         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7943                                tp->misc_host_ctrl);
7944
7945         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7946                               &cacheline_sz_reg);
7947
7948         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7949         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7950         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7951         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7952
7953         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
7954             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
7955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752))
7956                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7957
7958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7960                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7961
7962         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7963                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7964
7965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7966             tp->pci_lat_timer < 64) {
7967                 tp->pci_lat_timer = 64;
7968
7969                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7970                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7971                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7972                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7973
7974                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7975                                        cacheline_sz_reg);
7976         }
7977
7978         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7979                               &pci_state_reg);
7980
7981         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7982                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7983
7984                 /* If this is a 5700 BX chipset, and we are in PCI-X
7985                  * mode, enable register write workaround.
7986                  *
7987                  * The workaround is to use indirect register accesses
7988                  * for all chip writes not to mailbox registers.
7989                  */
7990                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7991                         u32 pm_reg;
7992                         u16 pci_cmd;
7993
7994                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7995
7996                         /* The chip can have it's power management PCI config
7997                          * space registers clobbered due to this bug.
7998                          * So explicitly force the chip into D0 here.
7999                          */
8000                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8001                                               &pm_reg);
8002                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8003                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8004                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8005                                                pm_reg);
8006
8007                         /* Also, force SERR#/PERR# in PCI command. */
8008                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8009                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8010                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8011                 }
8012         }
8013
8014         /* Back to back register writes can cause problems on this chip,
8015          * the workaround is to read back all reg writes except those to
8016          * mailbox regs.  See tg3_write_indirect_reg32().
8017          *
8018          * PCI Express 5750_A0 rev chips need this workaround too.
8019          */
8020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8021             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8022              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8023                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8024
8025         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8026                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8027         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8028                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8029
8030         /* Chip-specific fixup from Broadcom driver */
8031         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8032             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8033                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8034                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8035         }
8036
8037         /* Force the chip into D0. */
8038         err = tg3_set_power_state(tp, 0);
8039         if (err) {
8040                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8041                        pci_name(tp->pdev));
8042                 return err;
8043         }
8044
8045         /* 5700 B0 chips do not support checksumming correctly due
8046          * to hardware bugs.
8047          */
8048         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8049                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8050
8051         /* Pseudo-header checksum is done by hardware logic and not
8052          * the offload processers, so make the chip do the pseudo-
8053          * header checksums on receive.  For transmit it is more
8054          * convenient to do the pseudo-header checksum in software
8055          * as Linux does that on transmit for us in all cases.
8056          */
8057         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8058         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8059
8060         /* Derive initial jumbo mode from MTU assigned in
8061          * ether_setup() via the alloc_etherdev() call
8062          */
8063         if (tp->dev->mtu > ETH_DATA_LEN)
8064                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8065
8066         /* Determine WakeOnLan speed to use. */
8067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8068             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8069             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8070             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8071                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8072         } else {
8073                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8074         }
8075
8076         /* A few boards don't want Ethernet@WireSpeed phy feature */
8077         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8078             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8079              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8080              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8081                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8082
8083         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8084             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8085                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8086         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8087                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8088
8089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8092                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8093
8094         /* Only 5701 and later support tagged irq status mode.
8095          * Also, 5788 chips cannot use tagged irq status.
8096          *
8097          * However, since we are using NAPI avoid tagged irq status
8098          * because the interrupt condition is more difficult to
8099          * fully clear in that mode.
8100          */
8101         tp->coalesce_mode = 0;
8102
8103         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8104             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8105                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8106
8107         /* Initialize MAC MI mode, polling disabled. */
8108         tw32_f(MAC_MI_MODE, tp->mi_mode);
8109         udelay(80);
8110
8111         /* Initialize data/descriptor byte/word swapping. */
8112         val = tr32(GRC_MODE);
8113         val &= GRC_MODE_HOST_STACKUP;
8114         tw32(GRC_MODE, val | tp->grc_mode);
8115
8116         tg3_switch_clocks(tp);
8117
8118         /* Clear this out for sanity. */
8119         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8120
8121         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8122                               &pci_state_reg);
8123         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8124             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8125                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8126
8127                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8128                     chiprevid == CHIPREV_ID_5701_B0 ||
8129                     chiprevid == CHIPREV_ID_5701_B2 ||
8130                     chiprevid == CHIPREV_ID_5701_B5) {
8131                         void __iomem *sram_base;
8132
8133                         /* Write some dummy words into the SRAM status block
8134                          * area, see if it reads back correctly.  If the return
8135                          * value is bad, force enable the PCIX workaround.
8136                          */
8137                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8138
8139                         writel(0x00000000, sram_base);
8140                         writel(0x00000000, sram_base + 4);
8141                         writel(0xffffffff, sram_base + 4);
8142                         if (readl(sram_base) != 0x00000000)
8143                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8144                 }
8145         }
8146
8147         udelay(50);
8148         tg3_nvram_init(tp);
8149
8150         grc_misc_cfg = tr32(GRC_MISC_CFG);
8151         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8152
8153         /* Broadcom's driver says that CIOBE multisplit has a bug */
8154 #if 0
8155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8156             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8157                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8158                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8159         }
8160 #endif
8161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8162             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8163              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8164                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8165
8166         /* these are limited to 10/100 only */
8167         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8168              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8170              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8171              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8172               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8173               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8174             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8175              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8176               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8177                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8178
8179         err = tg3_phy_probe(tp);
8180         if (err) {
8181                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8182                        pci_name(tp->pdev), err);
8183                 /* ... but do not return immediately ... */
8184         }
8185
8186         tg3_read_partno(tp);
8187
8188         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8189                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8190         } else {
8191                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8192                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8193                 else
8194                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8195         }
8196
8197         /* 5700 {AX,BX} chips have a broken status block link
8198          * change bit implementation, so we must use the
8199          * status register in those cases.
8200          */
8201         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8202                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8203         else
8204                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8205
8206         /* The led_ctrl is set during tg3_phy_probe, here we might
8207          * have to force the link status polling mechanism based
8208          * upon subsystem IDs.
8209          */
8210         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8211             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8212                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8213                                   TG3_FLAG_USE_LINKCHG_REG);
8214         }
8215
8216         /* For all SERDES we poll the MAC status register. */
8217         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8218                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8219         else
8220                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8221
8222         /* 5700 BX chips need to have their TX producer index mailboxes
8223          * written twice to workaround a bug.
8224          */
8225         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8226                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8227         else
8228                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8229
8230         /* It seems all chips can get confused if TX buffers
8231          * straddle the 4GB address boundary in some cases.
8232          */
8233         tp->dev->hard_start_xmit = tg3_start_xmit;
8234
8235         tp->rx_offset = 2;
8236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8237             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8238                 tp->rx_offset = 0;
8239
8240         /* By default, disable wake-on-lan.  User can change this
8241          * using ETHTOOL_SWOL.
8242          */
8243         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8244
8245         return err;
8246 }
8247
8248 #ifdef CONFIG_SPARC64
8249 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8250 {
8251         struct net_device *dev = tp->dev;
8252         struct pci_dev *pdev = tp->pdev;
8253         struct pcidev_cookie *pcp = pdev->sysdata;
8254
8255         if (pcp != NULL) {
8256                 int node = pcp->prom_node;
8257
8258                 if (prom_getproplen(node, "local-mac-address") == 6) {
8259                         prom_getproperty(node, "local-mac-address",
8260                                          dev->dev_addr, 6);
8261                         return 0;
8262                 }
8263         }
8264         return -ENODEV;
8265 }
8266
8267 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8268 {
8269         struct net_device *dev = tp->dev;
8270
8271         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8272         return 0;
8273 }
8274 #endif
8275
8276 static int __devinit tg3_get_device_address(struct tg3 *tp)
8277 {
8278         struct net_device *dev = tp->dev;
8279         u32 hi, lo, mac_offset;
8280
8281 #ifdef CONFIG_SPARC64
8282         if (!tg3_get_macaddr_sparc(tp))
8283                 return 0;
8284 #endif
8285
8286         mac_offset = 0x7c;
8287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8288             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8289                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8290                         mac_offset = 0xcc;
8291                 if (tg3_nvram_lock(tp))
8292                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8293                 else
8294                         tg3_nvram_unlock(tp);
8295         }
8296
8297         /* First try to get it from MAC address mailbox. */
8298         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8299         if ((hi >> 16) == 0x484b) {
8300                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8301                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8302
8303                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8304                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8305                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8306                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8307                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8308         }
8309         /* Next, try NVRAM. */
8310         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8311                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8312                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8313                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8314                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8315                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8316                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8317                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8318                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8319         }
8320         /* Finally just fetch it out of the MAC control regs. */
8321         else {
8322                 hi = tr32(MAC_ADDR_0_HIGH);
8323                 lo = tr32(MAC_ADDR_0_LOW);
8324
8325                 dev->dev_addr[5] = lo & 0xff;
8326                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8327                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8328                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8329                 dev->dev_addr[1] = hi & 0xff;
8330                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8331         }
8332
8333         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8334 #ifdef CONFIG_SPARC64
8335                 if (!tg3_get_default_macaddr_sparc(tp))
8336                         return 0;
8337 #endif
8338                 return -EINVAL;
8339         }
8340         return 0;
8341 }
8342
8343 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8344 {
8345         struct tg3_internal_buffer_desc test_desc;
8346         u32 sram_dma_descs;
8347         int i, ret;
8348
8349         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8350
8351         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8352         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8353         tw32(RDMAC_STATUS, 0);
8354         tw32(WDMAC_STATUS, 0);
8355
8356         tw32(BUFMGR_MODE, 0);
8357         tw32(FTQ_RESET, 0);
8358
8359         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8360         test_desc.addr_lo = buf_dma & 0xffffffff;
8361         test_desc.nic_mbuf = 0x00002100;
8362         test_desc.len = size;
8363
8364         /*
8365          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8366          * the *second* time the tg3 driver was getting loaded after an
8367          * initial scan.
8368          *
8369          * Broadcom tells me:
8370          *   ...the DMA engine is connected to the GRC block and a DMA
8371          *   reset may affect the GRC block in some unpredictable way...
8372          *   The behavior of resets to individual blocks has not been tested.
8373          *
8374          * Broadcom noted the GRC reset will also reset all sub-components.
8375          */
8376         if (to_device) {
8377                 test_desc.cqid_sqid = (13 << 8) | 2;
8378
8379                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8380                 udelay(40);
8381         } else {
8382                 test_desc.cqid_sqid = (16 << 8) | 7;
8383
8384                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8385                 udelay(40);
8386         }
8387         test_desc.flags = 0x00000005;
8388
8389         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8390                 u32 val;
8391
8392                 val = *(((u32 *)&test_desc) + i);
8393                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8394                                        sram_dma_descs + (i * sizeof(u32)));
8395                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8396         }
8397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8398
8399         if (to_device) {
8400                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8401         } else {
8402                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8403         }
8404
8405         ret = -ENODEV;
8406         for (i = 0; i < 40; i++) {
8407                 u32 val;
8408
8409                 if (to_device)
8410                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8411                 else
8412                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8413                 if ((val & 0xffff) == sram_dma_descs) {
8414                         ret = 0;
8415                         break;
8416                 }
8417
8418                 udelay(100);
8419         }
8420
8421         return ret;
8422 }
8423
8424 #define TEST_BUFFER_SIZE        0x400
8425
8426 static int __devinit tg3_test_dma(struct tg3 *tp)
8427 {
8428         dma_addr_t buf_dma;
8429         u32 *buf;
8430         int ret;
8431
8432         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8433         if (!buf) {
8434                 ret = -ENOMEM;
8435                 goto out_nofree;
8436         }
8437
8438         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8439                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8440
8441 #ifndef CONFIG_X86
8442         {
8443                 u8 byte;
8444                 int cacheline_size;
8445                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8446
8447                 if (byte == 0)
8448                         cacheline_size = 1024;
8449                 else
8450                         cacheline_size = (int) byte * 4;
8451
8452                 switch (cacheline_size) {
8453                 case 16:
8454                 case 32:
8455                 case 64:
8456                 case 128:
8457                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8458                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8459                                 tp->dma_rwctrl |=
8460                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8461                                 break;
8462                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8463                                 tp->dma_rwctrl &=
8464                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8465                                 tp->dma_rwctrl |=
8466                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8467                                 break;
8468                         }
8469                         /* fallthrough */
8470                 case 256:
8471                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8472                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8473                                 tp->dma_rwctrl |=
8474                                         DMA_RWCTRL_WRITE_BNDRY_256;
8475                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8476                                 tp->dma_rwctrl |=
8477                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8478                 };
8479         }
8480 #endif
8481
8482         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8483                 /* DMA read watermark not used on PCIE */
8484                 tp->dma_rwctrl |= 0x00180000;
8485         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8486                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8487                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8488                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8489                         tp->dma_rwctrl |= 0x003f0000;
8490                 else
8491                         tp->dma_rwctrl |= 0x003f000f;
8492         } else {
8493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8494                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8495                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8496
8497                         if (ccval == 0x6 || ccval == 0x7)
8498                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8499
8500                         /* Set bit 23 to renable PCIX hw bug fix */
8501                         tp->dma_rwctrl |= 0x009f0000;
8502                 } else {
8503                         tp->dma_rwctrl |= 0x001b000f;
8504                 }
8505         }
8506
8507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8509                 tp->dma_rwctrl &= 0xfffffff0;
8510
8511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8513                 /* Remove this if it causes problems for some boards. */
8514                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8515
8516                 /* On 5700/5701 chips, we need to set this bit.
8517                  * Otherwise the chip will issue cacheline transactions
8518                  * to streamable DMA memory with not all the byte
8519                  * enables turned on.  This is an error on several
8520                  * RISC PCI controllers, in particular sparc64.
8521                  *
8522                  * On 5703/5704 chips, this bit has been reassigned
8523                  * a different meaning.  In particular, it is used
8524                  * on those chips to enable a PCI-X workaround.
8525                  */
8526                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8527         }
8528
8529         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8530
8531 #if 0
8532         /* Unneeded, already done by tg3_get_invariants.  */
8533         tg3_switch_clocks(tp);
8534 #endif
8535
8536         ret = 0;
8537         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8538             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8539                 goto out;
8540
8541         while (1) {
8542                 u32 *p = buf, i;
8543
8544                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8545                         p[i] = i;
8546
8547                 /* Send the buffer to the chip. */
8548                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8549                 if (ret) {
8550                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8551                         break;
8552                 }
8553
8554 #if 0
8555                 /* validate data reached card RAM correctly. */
8556                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8557                         u32 val;
8558                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8559                         if (le32_to_cpu(val) != p[i]) {
8560                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8561                                 /* ret = -ENODEV here? */
8562                         }
8563                         p[i] = 0;
8564                 }
8565 #endif
8566                 /* Now read it back. */
8567                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8568                 if (ret) {
8569                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8570
8571                         break;
8572                 }
8573
8574                 /* Verify it. */
8575                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8576                         if (p[i] == i)
8577                                 continue;
8578
8579                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8580                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8581                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8582                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8583                                 break;
8584                         } else {
8585                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8586                                 ret = -ENODEV;
8587                                 goto out;
8588                         }
8589                 }
8590
8591                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8592                         /* Success. */
8593                         ret = 0;
8594                         break;
8595                 }
8596         }
8597
8598 out:
8599         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8600 out_nofree:
8601         return ret;
8602 }
8603
8604 static void __devinit tg3_init_link_config(struct tg3 *tp)
8605 {
8606         tp->link_config.advertising =
8607                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8608                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8609                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8610                  ADVERTISED_Autoneg | ADVERTISED_MII);
8611         tp->link_config.speed = SPEED_INVALID;
8612         tp->link_config.duplex = DUPLEX_INVALID;
8613         tp->link_config.autoneg = AUTONEG_ENABLE;
8614         netif_carrier_off(tp->dev);
8615         tp->link_config.active_speed = SPEED_INVALID;
8616         tp->link_config.active_duplex = DUPLEX_INVALID;
8617         tp->link_config.phy_is_low_power = 0;
8618         tp->link_config.orig_speed = SPEED_INVALID;
8619         tp->link_config.orig_duplex = DUPLEX_INVALID;
8620         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8621 }
8622
8623 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8624 {
8625         tp->bufmgr_config.mbuf_read_dma_low_water =
8626                 DEFAULT_MB_RDMA_LOW_WATER;
8627         tp->bufmgr_config.mbuf_mac_rx_low_water =
8628                 DEFAULT_MB_MACRX_LOW_WATER;
8629         tp->bufmgr_config.mbuf_high_water =
8630                 DEFAULT_MB_HIGH_WATER;
8631
8632         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8633                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8634         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8635                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8636         tp->bufmgr_config.mbuf_high_water_jumbo =
8637                 DEFAULT_MB_HIGH_WATER_JUMBO;
8638
8639         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8640         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8641 }
8642
8643 static char * __devinit tg3_phy_string(struct tg3 *tp)
8644 {
8645         switch (tp->phy_id & PHY_ID_MASK) {
8646         case PHY_ID_BCM5400:    return "5400";
8647         case PHY_ID_BCM5401:    return "5401";
8648         case PHY_ID_BCM5411:    return "5411";
8649         case PHY_ID_BCM5701:    return "5701";
8650         case PHY_ID_BCM5703:    return "5703";
8651         case PHY_ID_BCM5704:    return "5704";
8652         case PHY_ID_BCM5705:    return "5705";
8653         case PHY_ID_BCM5750:    return "5750";
8654         case PHY_ID_BCM8002:    return "8002/serdes";
8655         case 0:                 return "serdes";
8656         default:                return "unknown";
8657         };
8658 }
8659
8660 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8661 {
8662         struct pci_dev *peer;
8663         unsigned int func, devnr = tp->pdev->devfn & ~7;
8664
8665         for (func = 0; func < 8; func++) {
8666                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8667                 if (peer && peer != tp->pdev)
8668                         break;
8669                 pci_dev_put(peer);
8670         }
8671         if (!peer || peer == tp->pdev)
8672                 BUG();
8673
8674         /*
8675          * We don't need to keep the refcount elevated; there's no way
8676          * to remove one half of this device without removing the other
8677          */
8678         pci_dev_put(peer);
8679
8680         return peer;
8681 }
8682
8683 static int __devinit tg3_init_one(struct pci_dev *pdev,
8684                                   const struct pci_device_id *ent)
8685 {
8686         static int tg3_version_printed = 0;
8687         unsigned long tg3reg_base, tg3reg_len;
8688         struct net_device *dev;
8689         struct tg3 *tp;
8690         int i, err, pci_using_dac, pm_cap;
8691
8692         if (tg3_version_printed++ == 0)
8693                 printk(KERN_INFO "%s", version);
8694
8695         err = pci_enable_device(pdev);
8696         if (err) {
8697                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8698                        "aborting.\n");
8699                 return err;
8700         }
8701
8702         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8703                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8704                        "base address, aborting.\n");
8705                 err = -ENODEV;
8706                 goto err_out_disable_pdev;
8707         }
8708
8709         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8710         if (err) {
8711                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8712                        "aborting.\n");
8713                 goto err_out_disable_pdev;
8714         }
8715
8716         pci_set_master(pdev);
8717
8718         /* Find power-management capability. */
8719         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8720         if (pm_cap == 0) {
8721                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8722                        "aborting.\n");
8723                 err = -EIO;
8724                 goto err_out_free_res;
8725         }
8726
8727         /* Configure DMA attributes. */
8728         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8729         if (!err) {
8730                 pci_using_dac = 1;
8731                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8732                 if (err < 0) {
8733                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8734                                "for consistent allocations\n");
8735                         goto err_out_free_res;
8736                 }
8737         } else {
8738                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8739                 if (err) {
8740                         printk(KERN_ERR PFX "No usable DMA configuration, "
8741                                "aborting.\n");
8742                         goto err_out_free_res;
8743                 }
8744                 pci_using_dac = 0;
8745         }
8746
8747         tg3reg_base = pci_resource_start(pdev, 0);
8748         tg3reg_len = pci_resource_len(pdev, 0);
8749
8750         dev = alloc_etherdev(sizeof(*tp));
8751         if (!dev) {
8752                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8753                 err = -ENOMEM;
8754                 goto err_out_free_res;
8755         }
8756
8757         SET_MODULE_OWNER(dev);
8758         SET_NETDEV_DEV(dev, &pdev->dev);
8759
8760         if (pci_using_dac)
8761                 dev->features |= NETIF_F_HIGHDMA;
8762         dev->features |= NETIF_F_LLTX;
8763 #if TG3_VLAN_TAG_USED
8764         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8765         dev->vlan_rx_register = tg3_vlan_rx_register;
8766         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8767 #endif
8768
8769         tp = netdev_priv(dev);
8770         tp->pdev = pdev;
8771         tp->dev = dev;
8772         tp->pm_cap = pm_cap;
8773         tp->mac_mode = TG3_DEF_MAC_MODE;
8774         tp->rx_mode = TG3_DEF_RX_MODE;
8775         tp->tx_mode = TG3_DEF_TX_MODE;
8776         tp->mi_mode = MAC_MI_MODE_BASE;
8777         if (tg3_debug > 0)
8778                 tp->msg_enable = tg3_debug;
8779         else
8780                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8781
8782         /* The word/byte swap controls here control register access byte
8783          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8784          * setting below.
8785          */
8786         tp->misc_host_ctrl =
8787                 MISC_HOST_CTRL_MASK_PCI_INT |
8788                 MISC_HOST_CTRL_WORD_SWAP |
8789                 MISC_HOST_CTRL_INDIR_ACCESS |
8790                 MISC_HOST_CTRL_PCISTATE_RW;
8791
8792         /* The NONFRM (non-frame) byte/word swap controls take effect
8793          * on descriptor entries, anything which isn't packet data.
8794          *
8795          * The StrongARM chips on the board (one for tx, one for rx)
8796          * are running in big-endian mode.
8797          */
8798         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8799                         GRC_MODE_WSWAP_NONFRM_DATA);
8800 #ifdef __BIG_ENDIAN
8801         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8802 #endif
8803         spin_lock_init(&tp->lock);
8804         spin_lock_init(&tp->tx_lock);
8805         spin_lock_init(&tp->indirect_lock);
8806         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8807
8808         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8809         if (tp->regs == 0UL) {
8810                 printk(KERN_ERR PFX "Cannot map device registers, "
8811                        "aborting.\n");
8812                 err = -ENOMEM;
8813                 goto err_out_free_dev;
8814         }
8815
8816         tg3_init_link_config(tp);
8817
8818         tg3_init_bufmgr_config(tp);
8819
8820         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8821         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8822         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8823
8824         dev->open = tg3_open;
8825         dev->stop = tg3_close;
8826         dev->get_stats = tg3_get_stats;
8827         dev->set_multicast_list = tg3_set_rx_mode;
8828         dev->set_mac_address = tg3_set_mac_addr;
8829         dev->do_ioctl = tg3_ioctl;
8830         dev->tx_timeout = tg3_tx_timeout;
8831         dev->poll = tg3_poll;
8832         dev->ethtool_ops = &tg3_ethtool_ops;
8833         dev->weight = 64;
8834         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8835         dev->change_mtu = tg3_change_mtu;
8836         dev->irq = pdev->irq;
8837 #ifdef CONFIG_NET_POLL_CONTROLLER
8838         dev->poll_controller = tg3_poll_controller;
8839 #endif
8840
8841         err = tg3_get_invariants(tp);
8842         if (err) {
8843                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8844                        "aborting.\n");
8845                 goto err_out_iounmap;
8846         }
8847
8848         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8849                 tp->bufmgr_config.mbuf_read_dma_low_water =
8850                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8851                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8852                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8853                 tp->bufmgr_config.mbuf_high_water =
8854                         DEFAULT_MB_HIGH_WATER_5705;
8855         }
8856
8857 #if TG3_TSO_SUPPORT != 0
8858         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8859                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8860         }
8861         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8862             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8863             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8864             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8865                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8866         } else {
8867                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8868         }
8869
8870         /* TSO is off by default, user can enable using ethtool.  */
8871 #if 0
8872         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8873                 dev->features |= NETIF_F_TSO;
8874 #endif
8875
8876 #endif
8877
8878         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8879             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8880             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8881                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8882                 tp->rx_pending = 63;
8883         }
8884
8885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8886                 tp->pdev_peer = tg3_find_5704_peer(tp);
8887
8888         err = tg3_get_device_address(tp);
8889         if (err) {
8890                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8891                        "aborting.\n");
8892                 goto err_out_iounmap;
8893         }
8894
8895         /*
8896          * Reset chip in case UNDI or EFI driver did not shutdown
8897          * DMA self test will enable WDMAC and we'll see (spurious)
8898          * pending DMA on the PCI bus at that point.
8899          */
8900         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8901             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8902                 pci_save_state(tp->pdev);
8903                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8904                 tg3_halt(tp);
8905         }
8906
8907         err = tg3_test_dma(tp);
8908         if (err) {
8909                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8910                 goto err_out_iounmap;
8911         }
8912
8913         /* Tigon3 can do ipv4 only... and some chips have buggy
8914          * checksumming.
8915          */
8916         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8917                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8918                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8919         } else
8920                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8921
8922         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8923                 dev->features &= ~NETIF_F_HIGHDMA;
8924
8925         /* flow control autonegotiation is default behavior */
8926         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8927
8928         err = register_netdev(dev);
8929         if (err) {
8930                 printk(KERN_ERR PFX "Cannot register net device, "
8931                        "aborting.\n");
8932                 goto err_out_iounmap;
8933         }
8934
8935         pci_set_drvdata(pdev, dev);
8936
8937         /* Now that we have fully setup the chip, save away a snapshot
8938          * of the PCI config space.  We need to restore this after
8939          * GRC_MISC_CFG core clock resets and some resume events.
8940          */
8941         pci_save_state(tp->pdev);
8942
8943         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8944                dev->name,
8945                tp->board_part_number,
8946                tp->pci_chip_rev_id,
8947                tg3_phy_string(tp),
8948                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8949                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8950                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8951                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8952                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8953                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8954
8955         for (i = 0; i < 6; i++)
8956                 printk("%2.2x%c", dev->dev_addr[i],
8957                        i == 5 ? '\n' : ':');
8958
8959         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8960                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8961                "TSOcap[%d] \n",
8962                dev->name,
8963                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8964                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8965                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8966                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8967                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8968                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8969                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8970
8971         return 0;
8972
8973 err_out_iounmap:
8974         iounmap(tp->regs);
8975
8976 err_out_free_dev:
8977         free_netdev(dev);
8978
8979 err_out_free_res:
8980         pci_release_regions(pdev);
8981
8982 err_out_disable_pdev:
8983         pci_disable_device(pdev);
8984         pci_set_drvdata(pdev, NULL);
8985         return err;
8986 }
8987
8988 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8989 {
8990         struct net_device *dev = pci_get_drvdata(pdev);
8991
8992         if (dev) {
8993                 struct tg3 *tp = netdev_priv(dev);
8994
8995                 unregister_netdev(dev);
8996                 iounmap(tp->regs);
8997                 free_netdev(dev);
8998                 pci_release_regions(pdev);
8999                 pci_disable_device(pdev);
9000                 pci_set_drvdata(pdev, NULL);
9001         }
9002 }
9003
9004 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9005 {
9006         struct net_device *dev = pci_get_drvdata(pdev);
9007         struct tg3 *tp = netdev_priv(dev);
9008         int err;
9009
9010         if (!netif_running(dev))
9011                 return 0;
9012
9013         tg3_netif_stop(tp);
9014
9015         del_timer_sync(&tp->timer);
9016
9017         spin_lock_irq(&tp->lock);
9018         spin_lock(&tp->tx_lock);
9019         tg3_disable_ints(tp);
9020         spin_unlock(&tp->tx_lock);
9021         spin_unlock_irq(&tp->lock);
9022
9023         netif_device_detach(dev);
9024
9025         spin_lock_irq(&tp->lock);
9026         spin_lock(&tp->tx_lock);
9027         tg3_halt(tp);
9028         spin_unlock(&tp->tx_lock);
9029         spin_unlock_irq(&tp->lock);
9030
9031         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9032         if (err) {
9033                 spin_lock_irq(&tp->lock);
9034                 spin_lock(&tp->tx_lock);
9035
9036                 tg3_init_hw(tp);
9037
9038                 tp->timer.expires = jiffies + tp->timer_offset;
9039                 add_timer(&tp->timer);
9040
9041                 netif_device_attach(dev);
9042                 tg3_netif_start(tp);
9043
9044                 spin_unlock(&tp->tx_lock);
9045                 spin_unlock_irq(&tp->lock);
9046         }
9047
9048         return err;
9049 }
9050
9051 static int tg3_resume(struct pci_dev *pdev)
9052 {
9053         struct net_device *dev = pci_get_drvdata(pdev);
9054         struct tg3 *tp = netdev_priv(dev);
9055         int err;
9056
9057         if (!netif_running(dev))
9058                 return 0;
9059
9060         pci_restore_state(tp->pdev);
9061
9062         err = tg3_set_power_state(tp, 0);
9063         if (err)
9064                 return err;
9065
9066         netif_device_attach(dev);
9067
9068         spin_lock_irq(&tp->lock);
9069         spin_lock(&tp->tx_lock);
9070
9071         tg3_init_hw(tp);
9072
9073         tp->timer.expires = jiffies + tp->timer_offset;
9074         add_timer(&tp->timer);
9075
9076         tg3_enable_ints(tp);
9077
9078         tg3_netif_start(tp);
9079
9080         spin_unlock(&tp->tx_lock);
9081         spin_unlock_irq(&tp->lock);
9082
9083         return 0;
9084 }
9085
9086 static struct pci_driver tg3_driver = {
9087         .name           = DRV_MODULE_NAME,
9088         .id_table       = tg3_pci_tbl,
9089         .probe          = tg3_init_one,
9090         .remove         = __devexit_p(tg3_remove_one),
9091         .suspend        = tg3_suspend,
9092         .resume         = tg3_resume
9093 };
9094
9095 static int __init tg3_init(void)
9096 {
9097         return pci_module_init(&tg3_driver);
9098 }
9099
9100 static void __exit tg3_cleanup(void)
9101 {
9102         pci_unregister_driver(&tg3_driver);
9103 }
9104
9105 module_init(tg3_init);
9106 module_exit(tg3_cleanup);