]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Fix tg3_restart_ints()
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.26"
65 #define DRV_MODULE_RELDATE      "April 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 static inline unsigned int tg3_has_work(struct tg3 *tp)
430 {
431         struct tg3_hw_status *sblk = tp->hw_status;
432         unsigned int work_exists = 0;
433
434         /* check for phy events */
435         if (!(tp->tg3_flags &
436               (TG3_FLAG_USE_LINKCHG_REG |
437                TG3_FLAG_POLL_SERDES))) {
438                 if (sblk->status & SD_STATUS_LINK_CHG)
439                         work_exists = 1;
440         }
441         /* check for RX/TX work to do */
442         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
443             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
444                 work_exists = 1;
445
446         return work_exists;
447 }
448
449 /* tg3_restart_ints
450  *  similar to tg3_enable_ints, but it accurately determines whether there
451  *  is new work pending and can return without flushing the PIO write
452  *  which reenables interrupts 
453  */
454 static void tg3_restart_ints(struct tg3 *tp)
455 {
456         tw32(TG3PCI_MISC_HOST_CTRL,
457                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
459         mmiowb();
460
461         if (tg3_has_work(tp))
462                 tw32(HOSTCC_MODE, tp->coalesce_mode |
463                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464 }
465
466 static inline void tg3_netif_stop(struct tg3 *tp)
467 {
468         netif_poll_disable(tp->dev);
469         netif_tx_disable(tp->dev);
470 }
471
472 static inline void tg3_netif_start(struct tg3 *tp)
473 {
474         netif_wake_queue(tp->dev);
475         /* NOTE: unconditional netif_wake_queue is only appropriate
476          * so long as all callers are assured to have free tx slots
477          * (such as after tg3_init_hw)
478          */
479         netif_poll_enable(tp->dev);
480         tg3_cond_int(tp);
481 }
482
483 static void tg3_switch_clocks(struct tg3 *tp)
484 {
485         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
486         u32 orig_clock_ctrl;
487
488         orig_clock_ctrl = clock_ctrl;
489         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
490                        CLOCK_CTRL_CLKRUN_OENABLE |
491                        0x1f);
492         tp->pci_clock_ctrl = clock_ctrl;
493
494         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
495                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
496                         tw32_f(TG3PCI_CLOCK_CTRL,
497                                clock_ctrl | CLOCK_CTRL_625_CORE);
498                         udelay(40);
499                 }
500         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
501                 tw32_f(TG3PCI_CLOCK_CTRL,
502                      clock_ctrl |
503                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
504                 udelay(40);
505                 tw32_f(TG3PCI_CLOCK_CTRL,
506                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
507                 udelay(40);
508         }
509         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
510         udelay(40);
511 }
512
513 #define PHY_BUSY_LOOPS  5000
514
515 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
516 {
517         u32 frame_val;
518         unsigned int loops;
519         int ret;
520
521         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
522                 tw32_f(MAC_MI_MODE,
523                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
524                 udelay(80);
525         }
526
527         *val = 0x0;
528
529         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
530                       MI_COM_PHY_ADDR_MASK);
531         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
532                       MI_COM_REG_ADDR_MASK);
533         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
534         
535         tw32_f(MAC_MI_COM, frame_val);
536
537         loops = PHY_BUSY_LOOPS;
538         while (loops != 0) {
539                 udelay(10);
540                 frame_val = tr32(MAC_MI_COM);
541
542                 if ((frame_val & MI_COM_BUSY) == 0) {
543                         udelay(5);
544                         frame_val = tr32(MAC_MI_COM);
545                         break;
546                 }
547                 loops -= 1;
548         }
549
550         ret = -EBUSY;
551         if (loops != 0) {
552                 *val = frame_val & MI_COM_DATA_MASK;
553                 ret = 0;
554         }
555
556         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
557                 tw32_f(MAC_MI_MODE, tp->mi_mode);
558                 udelay(80);
559         }
560
561         return ret;
562 }
563
564 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
565 {
566         u32 frame_val;
567         unsigned int loops;
568         int ret;
569
570         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
571                 tw32_f(MAC_MI_MODE,
572                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
573                 udelay(80);
574         }
575
576         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
577                       MI_COM_PHY_ADDR_MASK);
578         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
579                       MI_COM_REG_ADDR_MASK);
580         frame_val |= (val & MI_COM_DATA_MASK);
581         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
582         
583         tw32_f(MAC_MI_COM, frame_val);
584
585         loops = PHY_BUSY_LOOPS;
586         while (loops != 0) {
587                 udelay(10);
588                 frame_val = tr32(MAC_MI_COM);
589                 if ((frame_val & MI_COM_BUSY) == 0) {
590                         udelay(5);
591                         frame_val = tr32(MAC_MI_COM);
592                         break;
593                 }
594                 loops -= 1;
595         }
596
597         ret = -EBUSY;
598         if (loops != 0)
599                 ret = 0;
600
601         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
602                 tw32_f(MAC_MI_MODE, tp->mi_mode);
603                 udelay(80);
604         }
605
606         return ret;
607 }
608
609 static void tg3_phy_set_wirespeed(struct tg3 *tp)
610 {
611         u32 val;
612
613         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
614                 return;
615
616         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
617             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
618                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
619                              (val | (1 << 15) | (1 << 4)));
620 }
621
622 static int tg3_bmcr_reset(struct tg3 *tp)
623 {
624         u32 phy_control;
625         int limit, err;
626
627         /* OK, reset it, and poll the BMCR_RESET bit until it
628          * clears or we time out.
629          */
630         phy_control = BMCR_RESET;
631         err = tg3_writephy(tp, MII_BMCR, phy_control);
632         if (err != 0)
633                 return -EBUSY;
634
635         limit = 5000;
636         while (limit--) {
637                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
638                 if (err != 0)
639                         return -EBUSY;
640
641                 if ((phy_control & BMCR_RESET) == 0) {
642                         udelay(40);
643                         break;
644                 }
645                 udelay(10);
646         }
647         if (limit <= 0)
648                 return -EBUSY;
649
650         return 0;
651 }
652
653 static int tg3_wait_macro_done(struct tg3 *tp)
654 {
655         int limit = 100;
656
657         while (limit--) {
658                 u32 tmp32;
659
660                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
661                         if ((tmp32 & 0x1000) == 0)
662                                 break;
663                 }
664         }
665         if (limit <= 0)
666                 return -EBUSY;
667
668         return 0;
669 }
670
671 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
672 {
673         static const u32 test_pat[4][6] = {
674         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
675         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
676         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
677         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
678         };
679         int chan;
680
681         for (chan = 0; chan < 4; chan++) {
682                 int i;
683
684                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
685                              (chan * 0x2000) | 0x0200);
686                 tg3_writephy(tp, 0x16, 0x0002);
687
688                 for (i = 0; i < 6; i++)
689                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
690                                      test_pat[chan][i]);
691
692                 tg3_writephy(tp, 0x16, 0x0202);
693                 if (tg3_wait_macro_done(tp)) {
694                         *resetp = 1;
695                         return -EBUSY;
696                 }
697
698                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
699                              (chan * 0x2000) | 0x0200);
700                 tg3_writephy(tp, 0x16, 0x0082);
701                 if (tg3_wait_macro_done(tp)) {
702                         *resetp = 1;
703                         return -EBUSY;
704                 }
705
706                 tg3_writephy(tp, 0x16, 0x0802);
707                 if (tg3_wait_macro_done(tp)) {
708                         *resetp = 1;
709                         return -EBUSY;
710                 }
711
712                 for (i = 0; i < 6; i += 2) {
713                         u32 low, high;
714
715                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
716                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
717                             tg3_wait_macro_done(tp)) {
718                                 *resetp = 1;
719                                 return -EBUSY;
720                         }
721                         low &= 0x7fff;
722                         high &= 0x000f;
723                         if (low != test_pat[chan][i] ||
724                             high != test_pat[chan][i+1]) {
725                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
727                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
728
729                                 return -EBUSY;
730                         }
731                 }
732         }
733
734         return 0;
735 }
736
737 static int tg3_phy_reset_chanpat(struct tg3 *tp)
738 {
739         int chan;
740
741         for (chan = 0; chan < 4; chan++) {
742                 int i;
743
744                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
745                              (chan * 0x2000) | 0x0200);
746                 tg3_writephy(tp, 0x16, 0x0002);
747                 for (i = 0; i < 6; i++)
748                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
749                 tg3_writephy(tp, 0x16, 0x0202);
750                 if (tg3_wait_macro_done(tp))
751                         return -EBUSY;
752         }
753
754         return 0;
755 }
756
757 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
758 {
759         u32 reg32, phy9_orig;
760         int retries, do_phy_reset, err;
761
762         retries = 10;
763         do_phy_reset = 1;
764         do {
765                 if (do_phy_reset) {
766                         err = tg3_bmcr_reset(tp);
767                         if (err)
768                                 return err;
769                         do_phy_reset = 0;
770                 }
771
772                 /* Disable transmitter and interrupt.  */
773                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
774                         continue;
775
776                 reg32 |= 0x3000;
777                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
778
779                 /* Set full-duplex, 1000 mbps.  */
780                 tg3_writephy(tp, MII_BMCR,
781                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
782
783                 /* Set to master mode.  */
784                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
785                         continue;
786
787                 tg3_writephy(tp, MII_TG3_CTRL,
788                              (MII_TG3_CTRL_AS_MASTER |
789                               MII_TG3_CTRL_ENABLE_AS_MASTER));
790
791                 /* Enable SM_DSP_CLOCK and 6dB.  */
792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
793
794                 /* Block the PHY control access.  */
795                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
796                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
797
798                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
799                 if (!err)
800                         break;
801         } while (--retries);
802
803         err = tg3_phy_reset_chanpat(tp);
804         if (err)
805                 return err;
806
807         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
808         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
809
810         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
811         tg3_writephy(tp, 0x16, 0x0000);
812
813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
815                 /* Set Extended packet length bit for jumbo frames */
816                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
817         }
818         else {
819                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
820         }
821
822         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
823
824         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
825                 reg32 &= ~0x3000;
826                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
827         } else if (!err)
828                 err = -EBUSY;
829
830         return err;
831 }
832
833 /* This will reset the tigon3 PHY if there is no valid
834  * link unless the FORCE argument is non-zero.
835  */
836 static int tg3_phy_reset(struct tg3 *tp)
837 {
838         u32 phy_status;
839         int err;
840
841         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
842         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
843         if (err != 0)
844                 return -EBUSY;
845
846         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
849                 err = tg3_phy_reset_5703_4_5(tp);
850                 if (err)
851                         return err;
852                 goto out;
853         }
854
855         err = tg3_bmcr_reset(tp);
856         if (err)
857                 return err;
858
859 out:
860         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
863                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
864                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
865                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
867         }
868         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
869                 tg3_writephy(tp, 0x1c, 0x8d68);
870                 tg3_writephy(tp, 0x1c, 0x8d68);
871         }
872         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
873                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
875                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
876                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
877                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
878                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
879                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
881         }
882         /* Set Extended packet length bit (bit 14) on all chips that */
883         /* support jumbo frames */
884         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
885                 /* Cannot do read-modify-write on 5401 */
886                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
887         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
888                 u32 phy_reg;
889
890                 /* Set bit 14 with read-modify-write to preserve other bits */
891                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
892                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
893                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
894         }
895
896         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
897          * jumbo frames transmission.
898          */
899         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
900                 u32 phy_reg;
901
902                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
903                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
904                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
905         }
906
907         tg3_phy_set_wirespeed(tp);
908         return 0;
909 }
910
911 static void tg3_frob_aux_power(struct tg3 *tp)
912 {
913         struct tg3 *tp_peer = tp;
914
915         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
916                 return;
917
918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
919                 tp_peer = pci_get_drvdata(tp->pdev_peer);
920                 if (!tp_peer)
921                         BUG();
922         }
923
924
925         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
926             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
927                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
928                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
929                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
930                              (GRC_LCLCTRL_GPIO_OE0 |
931                               GRC_LCLCTRL_GPIO_OE1 |
932                               GRC_LCLCTRL_GPIO_OE2 |
933                               GRC_LCLCTRL_GPIO_OUTPUT0 |
934                               GRC_LCLCTRL_GPIO_OUTPUT1));
935                         udelay(100);
936                 } else {
937                         u32 no_gpio2;
938                         u32 grc_local_ctrl;
939
940                         if (tp_peer != tp &&
941                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
942                                 return;
943
944                         /* On 5753 and variants, GPIO2 cannot be used. */
945                         no_gpio2 = tp->nic_sram_data_cfg &
946                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
947
948                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
949                                          GRC_LCLCTRL_GPIO_OE1 |
950                                          GRC_LCLCTRL_GPIO_OE2 |
951                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
952                                          GRC_LCLCTRL_GPIO_OUTPUT2;
953                         if (no_gpio2) {
954                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
955                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
956                         }
957                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958                                                 grc_local_ctrl);
959                         udelay(100);
960
961                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                                                 grc_local_ctrl);
965                         udelay(100);
966
967                         if (!no_gpio2) {
968                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
969                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
970                                        grc_local_ctrl);
971                                 udelay(100);
972                         }
973                 }
974         } else {
975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
976                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
977                         if (tp_peer != tp &&
978                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
979                                 return;
980
981                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
982                              (GRC_LCLCTRL_GPIO_OE1 |
983                               GRC_LCLCTRL_GPIO_OUTPUT1));
984                         udelay(100);
985
986                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
987                              (GRC_LCLCTRL_GPIO_OE1));
988                         udelay(100);
989
990                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991                              (GRC_LCLCTRL_GPIO_OE1 |
992                               GRC_LCLCTRL_GPIO_OUTPUT1));
993                         udelay(100);
994                 }
995         }
996 }
997
998 static int tg3_setup_phy(struct tg3 *, int);
999
1000 #define RESET_KIND_SHUTDOWN     0
1001 #define RESET_KIND_INIT         1
1002 #define RESET_KIND_SUSPEND      2
1003
1004 static void tg3_write_sig_post_reset(struct tg3 *, int);
1005 static int tg3_halt_cpu(struct tg3 *, u32);
1006
1007 static int tg3_set_power_state(struct tg3 *tp, int state)
1008 {
1009         u32 misc_host_ctrl;
1010         u16 power_control, power_caps;
1011         int pm = tp->pm_cap;
1012
1013         /* Make sure register accesses (indirect or otherwise)
1014          * will function correctly.
1015          */
1016         pci_write_config_dword(tp->pdev,
1017                                TG3PCI_MISC_HOST_CTRL,
1018                                tp->misc_host_ctrl);
1019
1020         pci_read_config_word(tp->pdev,
1021                              pm + PCI_PM_CTRL,
1022                              &power_control);
1023         power_control |= PCI_PM_CTRL_PME_STATUS;
1024         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1025         switch (state) {
1026         case 0:
1027                 power_control |= 0;
1028                 pci_write_config_word(tp->pdev,
1029                                       pm + PCI_PM_CTRL,
1030                                       power_control);
1031                 udelay(100);    /* Delay after power state change */
1032
1033                 /* Switch out of Vaux if it is not a LOM */
1034                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1035                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1036                         udelay(100);
1037                 }
1038
1039                 return 0;
1040
1041         case 1:
1042                 power_control |= 1;
1043                 break;
1044
1045         case 2:
1046                 power_control |= 2;
1047                 break;
1048
1049         case 3:
1050                 power_control |= 3;
1051                 break;
1052
1053         default:
1054                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1055                        "requested.\n",
1056                        tp->dev->name, state);
1057                 return -EINVAL;
1058         };
1059
1060         power_control |= PCI_PM_CTRL_PME_ENABLE;
1061
1062         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1063         tw32(TG3PCI_MISC_HOST_CTRL,
1064              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1065
1066         if (tp->link_config.phy_is_low_power == 0) {
1067                 tp->link_config.phy_is_low_power = 1;
1068                 tp->link_config.orig_speed = tp->link_config.speed;
1069                 tp->link_config.orig_duplex = tp->link_config.duplex;
1070                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1071         }
1072
1073         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1074                 tp->link_config.speed = SPEED_10;
1075                 tp->link_config.duplex = DUPLEX_HALF;
1076                 tp->link_config.autoneg = AUTONEG_ENABLE;
1077                 tg3_setup_phy(tp, 0);
1078         }
1079
1080         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1081
1082         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1083                 u32 mac_mode;
1084
1085                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1086                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1087                         udelay(40);
1088
1089                         mac_mode = MAC_MODE_PORT_MODE_MII;
1090
1091                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1092                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1093                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1094                 } else {
1095                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1096                 }
1097
1098                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1099                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1100
1101                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1102                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1103                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1104
1105                 tw32_f(MAC_MODE, mac_mode);
1106                 udelay(100);
1107
1108                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1109                 udelay(10);
1110         }
1111
1112         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1113             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1114              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1115                 u32 base_val;
1116
1117                 base_val = tp->pci_clock_ctrl;
1118                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1119                              CLOCK_CTRL_TXCLK_DISABLE);
1120
1121                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1122                      CLOCK_CTRL_ALTCLK |
1123                      CLOCK_CTRL_PWRDOWN_PLL133);
1124                 udelay(40);
1125         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1126                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1127                 u32 newbits1, newbits2;
1128
1129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                     CLOCK_CTRL_TXCLK_DISABLE |
1133                                     CLOCK_CTRL_ALTCLK);
1134                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1135                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1136                         newbits1 = CLOCK_CTRL_625_CORE;
1137                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1138                 } else {
1139                         newbits1 = CLOCK_CTRL_ALTCLK;
1140                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1141                 }
1142
1143                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1144                 udelay(40);
1145
1146                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1147                 udelay(40);
1148
1149                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1150                         u32 newbits3;
1151
1152                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1153                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1154                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1155                                             CLOCK_CTRL_TXCLK_DISABLE |
1156                                             CLOCK_CTRL_44MHZ_CORE);
1157                         } else {
1158                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1159                         }
1160
1161                         tw32_f(TG3PCI_CLOCK_CTRL,
1162                                          tp->pci_clock_ctrl | newbits3);
1163                         udelay(40);
1164                 }
1165         }
1166
1167         tg3_frob_aux_power(tp);
1168
1169         /* Workaround for unstable PLL clock */
1170         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1171             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1172                 u32 val = tr32(0x7d00);
1173
1174                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1175                 tw32(0x7d00, val);
1176                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1177                         tg3_halt_cpu(tp, RX_CPU_BASE);
1178         }
1179
1180         /* Finally, set the new power state. */
1181         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1182         udelay(100);    /* Delay after power state change */
1183
1184         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1185
1186         return 0;
1187 }
1188
1189 static void tg3_link_report(struct tg3 *tp)
1190 {
1191         if (!netif_carrier_ok(tp->dev)) {
1192                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1193         } else {
1194                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1195                        tp->dev->name,
1196                        (tp->link_config.active_speed == SPEED_1000 ?
1197                         1000 :
1198                         (tp->link_config.active_speed == SPEED_100 ?
1199                          100 : 10)),
1200                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1201                         "full" : "half"));
1202
1203                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1204                        "%s for RX.\n",
1205                        tp->dev->name,
1206                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1207                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1208         }
1209 }
1210
1211 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1212 {
1213         u32 new_tg3_flags = 0;
1214         u32 old_rx_mode = tp->rx_mode;
1215         u32 old_tx_mode = tp->tx_mode;
1216
1217         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1218                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1219                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1220                                 if (remote_adv & LPA_PAUSE_CAP)
1221                                         new_tg3_flags |=
1222                                                 (TG3_FLAG_RX_PAUSE |
1223                                                 TG3_FLAG_TX_PAUSE);
1224                                 else if (remote_adv & LPA_PAUSE_ASYM)
1225                                         new_tg3_flags |=
1226                                                 (TG3_FLAG_RX_PAUSE);
1227                         } else {
1228                                 if (remote_adv & LPA_PAUSE_CAP)
1229                                         new_tg3_flags |=
1230                                                 (TG3_FLAG_RX_PAUSE |
1231                                                 TG3_FLAG_TX_PAUSE);
1232                         }
1233                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1234                         if ((remote_adv & LPA_PAUSE_CAP) &&
1235                         (remote_adv & LPA_PAUSE_ASYM))
1236                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1237                 }
1238
1239                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1240                 tp->tg3_flags |= new_tg3_flags;
1241         } else {
1242                 new_tg3_flags = tp->tg3_flags;
1243         }
1244
1245         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode) {
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252         }
1253         
1254         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1255                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1256         else
1257                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1258
1259         if (old_tx_mode != tp->tx_mode) {
1260                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1261         }
1262 }
1263
1264 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1265 {
1266         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1267         case MII_TG3_AUX_STAT_10HALF:
1268                 *speed = SPEED_10;
1269                 *duplex = DUPLEX_HALF;
1270                 break;
1271
1272         case MII_TG3_AUX_STAT_10FULL:
1273                 *speed = SPEED_10;
1274                 *duplex = DUPLEX_FULL;
1275                 break;
1276
1277         case MII_TG3_AUX_STAT_100HALF:
1278                 *speed = SPEED_100;
1279                 *duplex = DUPLEX_HALF;
1280                 break;
1281
1282         case MII_TG3_AUX_STAT_100FULL:
1283                 *speed = SPEED_100;
1284                 *duplex = DUPLEX_FULL;
1285                 break;
1286
1287         case MII_TG3_AUX_STAT_1000HALF:
1288                 *speed = SPEED_1000;
1289                 *duplex = DUPLEX_HALF;
1290                 break;
1291
1292         case MII_TG3_AUX_STAT_1000FULL:
1293                 *speed = SPEED_1000;
1294                 *duplex = DUPLEX_FULL;
1295                 break;
1296
1297         default:
1298                 *speed = SPEED_INVALID;
1299                 *duplex = DUPLEX_INVALID;
1300                 break;
1301         };
1302 }
1303
1304 static void tg3_phy_copper_begin(struct tg3 *tp)
1305 {
1306         u32 new_adv;
1307         int i;
1308
1309         if (tp->link_config.phy_is_low_power) {
1310                 /* Entering low power mode.  Disable gigabit and
1311                  * 100baseT advertisements.
1312                  */
1313                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1314
1315                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1316                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1317                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1318                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1319
1320                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1321         } else if (tp->link_config.speed == SPEED_INVALID) {
1322                 tp->link_config.advertising =
1323                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1324                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1325                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1326                          ADVERTISED_Autoneg | ADVERTISED_MII);
1327
1328                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1329                         tp->link_config.advertising &=
1330                                 ~(ADVERTISED_1000baseT_Half |
1331                                   ADVERTISED_1000baseT_Full);
1332
1333                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1334                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1335                         new_adv |= ADVERTISE_10HALF;
1336                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1337                         new_adv |= ADVERTISE_10FULL;
1338                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1339                         new_adv |= ADVERTISE_100HALF;
1340                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1341                         new_adv |= ADVERTISE_100FULL;
1342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1343
1344                 if (tp->link_config.advertising &
1345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1346                         new_adv = 0;
1347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1357                 } else {
1358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1359                 }
1360         } else {
1361                 /* Asking for a specific link mode. */
1362                 if (tp->link_config.speed == SPEED_1000) {
1363                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1364                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1365
1366                         if (tp->link_config.duplex == DUPLEX_FULL)
1367                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1368                         else
1369                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1370                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1371                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1372                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1373                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1374                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1375                 } else {
1376                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1377
1378                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1379                         if (tp->link_config.speed == SPEED_100) {
1380                                 if (tp->link_config.duplex == DUPLEX_FULL)
1381                                         new_adv |= ADVERTISE_100FULL;
1382                                 else
1383                                         new_adv |= ADVERTISE_100HALF;
1384                         } else {
1385                                 if (tp->link_config.duplex == DUPLEX_FULL)
1386                                         new_adv |= ADVERTISE_10FULL;
1387                                 else
1388                                         new_adv |= ADVERTISE_10HALF;
1389                         }
1390                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1391                 }
1392         }
1393
1394         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1395             tp->link_config.speed != SPEED_INVALID) {
1396                 u32 bmcr, orig_bmcr;
1397
1398                 tp->link_config.active_speed = tp->link_config.speed;
1399                 tp->link_config.active_duplex = tp->link_config.duplex;
1400
1401                 bmcr = 0;
1402                 switch (tp->link_config.speed) {
1403                 default:
1404                 case SPEED_10:
1405                         break;
1406
1407                 case SPEED_100:
1408                         bmcr |= BMCR_SPEED100;
1409                         break;
1410
1411                 case SPEED_1000:
1412                         bmcr |= TG3_BMCR_SPEED1000;
1413                         break;
1414                 };
1415
1416                 if (tp->link_config.duplex == DUPLEX_FULL)
1417                         bmcr |= BMCR_FULLDPLX;
1418
1419                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1420                     (bmcr != orig_bmcr)) {
1421                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1422                         for (i = 0; i < 1500; i++) {
1423                                 u32 tmp;
1424
1425                                 udelay(10);
1426                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1427                                     tg3_readphy(tp, MII_BMSR, &tmp))
1428                                         continue;
1429                                 if (!(tmp & BMSR_LSTATUS)) {
1430                                         udelay(40);
1431                                         break;
1432                                 }
1433                         }
1434                         tg3_writephy(tp, MII_BMCR, bmcr);
1435                         udelay(40);
1436                 }
1437         } else {
1438                 tg3_writephy(tp, MII_BMCR,
1439                              BMCR_ANENABLE | BMCR_ANRESTART);
1440         }
1441 }
1442
1443 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1444 {
1445         int err;
1446
1447         /* Turn off tap power management. */
1448         /* Set Extended packet length bit */
1449         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1450
1451         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1452         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1453
1454         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1455         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1456
1457         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1458         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1459
1460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1462
1463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1465
1466         udelay(40);
1467
1468         return err;
1469 }
1470
1471 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1472 {
1473         u32 adv_reg, all_mask;
1474
1475         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1476                 return 0;
1477
1478         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1479                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1480         if ((adv_reg & all_mask) != all_mask)
1481                 return 0;
1482         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483                 u32 tg3_ctrl;
1484
1485                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1486                         return 0;
1487
1488                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1489                             MII_TG3_CTRL_ADV_1000_FULL);
1490                 if ((tg3_ctrl & all_mask) != all_mask)
1491                         return 0;
1492         }
1493         return 1;
1494 }
1495
1496 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1497 {
1498         int current_link_up;
1499         u32 bmsr, dummy;
1500         u16 current_speed;
1501         u8 current_duplex;
1502         int i, err;
1503
1504         tw32(MAC_EVENT, 0);
1505
1506         tw32_f(MAC_STATUS,
1507              (MAC_STATUS_SYNC_CHANGED |
1508               MAC_STATUS_CFG_CHANGED |
1509               MAC_STATUS_MI_COMPLETION |
1510               MAC_STATUS_LNKSTATE_CHANGED));
1511         udelay(40);
1512
1513         tp->mi_mode = MAC_MI_MODE_BASE;
1514         tw32_f(MAC_MI_MODE, tp->mi_mode);
1515         udelay(80);
1516
1517         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1518
1519         /* Some third-party PHYs need to be reset on link going
1520          * down.
1521          */
1522         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1523              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1524              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1525             netif_carrier_ok(tp->dev)) {
1526                 tg3_readphy(tp, MII_BMSR, &bmsr);
1527                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1528                     !(bmsr & BMSR_LSTATUS))
1529                         force_reset = 1;
1530         }
1531         if (force_reset)
1532                 tg3_phy_reset(tp);
1533
1534         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1535                 tg3_readphy(tp, MII_BMSR, &bmsr);
1536                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1537                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1538                         bmsr = 0;
1539
1540                 if (!(bmsr & BMSR_LSTATUS)) {
1541                         err = tg3_init_5401phy_dsp(tp);
1542                         if (err)
1543                                 return err;
1544
1545                         tg3_readphy(tp, MII_BMSR, &bmsr);
1546                         for (i = 0; i < 1000; i++) {
1547                                 udelay(10);
1548                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1549                                     (bmsr & BMSR_LSTATUS)) {
1550                                         udelay(40);
1551                                         break;
1552                                 }
1553                         }
1554
1555                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1556                             !(bmsr & BMSR_LSTATUS) &&
1557                             tp->link_config.active_speed == SPEED_1000) {
1558                                 err = tg3_phy_reset(tp);
1559                                 if (!err)
1560                                         err = tg3_init_5401phy_dsp(tp);
1561                                 if (err)
1562                                         return err;
1563                         }
1564                 }
1565         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1566                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1567                 /* 5701 {A0,B0} CRC bug workaround */
1568                 tg3_writephy(tp, 0x15, 0x0a75);
1569                 tg3_writephy(tp, 0x1c, 0x8c68);
1570                 tg3_writephy(tp, 0x1c, 0x8d68);
1571                 tg3_writephy(tp, 0x1c, 0x8c68);
1572         }
1573
1574         /* Clear pending interrupts... */
1575         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1576         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1577
1578         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1579                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1580         else
1581                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1582
1583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1584             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1585                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1586                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1587                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1588                 else
1589                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1590         }
1591
1592         current_link_up = 0;
1593         current_speed = SPEED_INVALID;
1594         current_duplex = DUPLEX_INVALID;
1595
1596         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1597                 u32 val;
1598
1599                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1600                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1601                 if (!(val & (1 << 10))) {
1602                         val |= (1 << 10);
1603                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1604                         goto relink;
1605                 }
1606         }
1607
1608         bmsr = 0;
1609         for (i = 0; i < 100; i++) {
1610                 tg3_readphy(tp, MII_BMSR, &bmsr);
1611                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1612                     (bmsr & BMSR_LSTATUS))
1613                         break;
1614                 udelay(40);
1615         }
1616
1617         if (bmsr & BMSR_LSTATUS) {
1618                 u32 aux_stat, bmcr;
1619
1620                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1621                 for (i = 0; i < 2000; i++) {
1622                         udelay(10);
1623                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1624                             aux_stat)
1625                                 break;
1626                 }
1627
1628                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1629                                              &current_speed,
1630                                              &current_duplex);
1631
1632                 bmcr = 0;
1633                 for (i = 0; i < 200; i++) {
1634                         tg3_readphy(tp, MII_BMCR, &bmcr);
1635                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1636                                 continue;
1637                         if (bmcr && bmcr != 0x7fff)
1638                                 break;
1639                         udelay(10);
1640                 }
1641
1642                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1643                         if (bmcr & BMCR_ANENABLE) {
1644                                 current_link_up = 1;
1645
1646                                 /* Force autoneg restart if we are exiting
1647                                  * low power mode.
1648                                  */
1649                                 if (!tg3_copper_is_advertising_all(tp))
1650                                         current_link_up = 0;
1651                         } else {
1652                                 current_link_up = 0;
1653                         }
1654                 } else {
1655                         if (!(bmcr & BMCR_ANENABLE) &&
1656                             tp->link_config.speed == current_speed &&
1657                             tp->link_config.duplex == current_duplex) {
1658                                 current_link_up = 1;
1659                         } else {
1660                                 current_link_up = 0;
1661                         }
1662                 }
1663
1664                 tp->link_config.active_speed = current_speed;
1665                 tp->link_config.active_duplex = current_duplex;
1666         }
1667
1668         if (current_link_up == 1 &&
1669             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1670             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1671                 u32 local_adv, remote_adv;
1672
1673                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1674                         local_adv = 0;
1675                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1676
1677                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1678                         remote_adv = 0;
1679
1680                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1681
1682                 /* If we are not advertising full pause capability,
1683                  * something is wrong.  Bring the link down and reconfigure.
1684                  */
1685                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1686                         current_link_up = 0;
1687                 } else {
1688                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1689                 }
1690         }
1691 relink:
1692         if (current_link_up == 0) {
1693                 u32 tmp;
1694
1695                 tg3_phy_copper_begin(tp);
1696
1697                 tg3_readphy(tp, MII_BMSR, &tmp);
1698                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1699                     (tmp & BMSR_LSTATUS))
1700                         current_link_up = 1;
1701         }
1702
1703         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1704         if (current_link_up == 1) {
1705                 if (tp->link_config.active_speed == SPEED_100 ||
1706                     tp->link_config.active_speed == SPEED_10)
1707                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1708                 else
1709                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1710         } else
1711                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1712
1713         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1714         if (tp->link_config.active_duplex == DUPLEX_HALF)
1715                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1716
1717         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1719                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1720                     (current_link_up == 1 &&
1721                      tp->link_config.active_speed == SPEED_10))
1722                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1723         } else {
1724                 if (current_link_up == 1)
1725                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1726         }
1727
1728         /* ??? Without this setting Netgear GA302T PHY does not
1729          * ??? send/receive packets...
1730          */
1731         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1732             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1733                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1734                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1735                 udelay(80);
1736         }
1737
1738         tw32_f(MAC_MODE, tp->mac_mode);
1739         udelay(40);
1740
1741         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1742                 /* Polled via timer. */
1743                 tw32_f(MAC_EVENT, 0);
1744         } else {
1745                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1746         }
1747         udelay(40);
1748
1749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1750             current_link_up == 1 &&
1751             tp->link_config.active_speed == SPEED_1000 &&
1752             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1753              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1754                 udelay(120);
1755                 tw32_f(MAC_STATUS,
1756                      (MAC_STATUS_SYNC_CHANGED |
1757                       MAC_STATUS_CFG_CHANGED));
1758                 udelay(40);
1759                 tg3_write_mem(tp,
1760                               NIC_SRAM_FIRMWARE_MBOX,
1761                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1762         }
1763
1764         if (current_link_up != netif_carrier_ok(tp->dev)) {
1765                 if (current_link_up)
1766                         netif_carrier_on(tp->dev);
1767                 else
1768                         netif_carrier_off(tp->dev);
1769                 tg3_link_report(tp);
1770         }
1771
1772         return 0;
1773 }
1774
1775 struct tg3_fiber_aneginfo {
1776         int state;
1777 #define ANEG_STATE_UNKNOWN              0
1778 #define ANEG_STATE_AN_ENABLE            1
1779 #define ANEG_STATE_RESTART_INIT         2
1780 #define ANEG_STATE_RESTART              3
1781 #define ANEG_STATE_DISABLE_LINK_OK      4
1782 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1783 #define ANEG_STATE_ABILITY_DETECT       6
1784 #define ANEG_STATE_ACK_DETECT_INIT      7
1785 #define ANEG_STATE_ACK_DETECT           8
1786 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1787 #define ANEG_STATE_COMPLETE_ACK         10
1788 #define ANEG_STATE_IDLE_DETECT_INIT     11
1789 #define ANEG_STATE_IDLE_DETECT          12
1790 #define ANEG_STATE_LINK_OK              13
1791 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1792 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1793
1794         u32 flags;
1795 #define MR_AN_ENABLE            0x00000001
1796 #define MR_RESTART_AN           0x00000002
1797 #define MR_AN_COMPLETE          0x00000004
1798 #define MR_PAGE_RX              0x00000008
1799 #define MR_NP_LOADED            0x00000010
1800 #define MR_TOGGLE_TX            0x00000020
1801 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1802 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1803 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1804 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1805 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1806 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1807 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1808 #define MR_TOGGLE_RX            0x00002000
1809 #define MR_NP_RX                0x00004000
1810
1811 #define MR_LINK_OK              0x80000000
1812
1813         unsigned long link_time, cur_time;
1814
1815         u32 ability_match_cfg;
1816         int ability_match_count;
1817
1818         char ability_match, idle_match, ack_match;
1819
1820         u32 txconfig, rxconfig;
1821 #define ANEG_CFG_NP             0x00000080
1822 #define ANEG_CFG_ACK            0x00000040
1823 #define ANEG_CFG_RF2            0x00000020
1824 #define ANEG_CFG_RF1            0x00000010
1825 #define ANEG_CFG_PS2            0x00000001
1826 #define ANEG_CFG_PS1            0x00008000
1827 #define ANEG_CFG_HD             0x00004000
1828 #define ANEG_CFG_FD             0x00002000
1829 #define ANEG_CFG_INVAL          0x00001f06
1830
1831 };
1832 #define ANEG_OK         0
1833 #define ANEG_DONE       1
1834 #define ANEG_TIMER_ENAB 2
1835 #define ANEG_FAILED     -1
1836
1837 #define ANEG_STATE_SETTLE_TIME  10000
1838
1839 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1840                                    struct tg3_fiber_aneginfo *ap)
1841 {
1842         unsigned long delta;
1843         u32 rx_cfg_reg;
1844         int ret;
1845
1846         if (ap->state == ANEG_STATE_UNKNOWN) {
1847                 ap->rxconfig = 0;
1848                 ap->link_time = 0;
1849                 ap->cur_time = 0;
1850                 ap->ability_match_cfg = 0;
1851                 ap->ability_match_count = 0;
1852                 ap->ability_match = 0;
1853                 ap->idle_match = 0;
1854                 ap->ack_match = 0;
1855         }
1856         ap->cur_time++;
1857
1858         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1859                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1860
1861                 if (rx_cfg_reg != ap->ability_match_cfg) {
1862                         ap->ability_match_cfg = rx_cfg_reg;
1863                         ap->ability_match = 0;
1864                         ap->ability_match_count = 0;
1865                 } else {
1866                         if (++ap->ability_match_count > 1) {
1867                                 ap->ability_match = 1;
1868                                 ap->ability_match_cfg = rx_cfg_reg;
1869                         }
1870                 }
1871                 if (rx_cfg_reg & ANEG_CFG_ACK)
1872                         ap->ack_match = 1;
1873                 else
1874                         ap->ack_match = 0;
1875
1876                 ap->idle_match = 0;
1877         } else {
1878                 ap->idle_match = 1;
1879                 ap->ability_match_cfg = 0;
1880                 ap->ability_match_count = 0;
1881                 ap->ability_match = 0;
1882                 ap->ack_match = 0;
1883
1884                 rx_cfg_reg = 0;
1885         }
1886
1887         ap->rxconfig = rx_cfg_reg;
1888         ret = ANEG_OK;
1889
1890         switch(ap->state) {
1891         case ANEG_STATE_UNKNOWN:
1892                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1893                         ap->state = ANEG_STATE_AN_ENABLE;
1894
1895                 /* fallthru */
1896         case ANEG_STATE_AN_ENABLE:
1897                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1898                 if (ap->flags & MR_AN_ENABLE) {
1899                         ap->link_time = 0;
1900                         ap->cur_time = 0;
1901                         ap->ability_match_cfg = 0;
1902                         ap->ability_match_count = 0;
1903                         ap->ability_match = 0;
1904                         ap->idle_match = 0;
1905                         ap->ack_match = 0;
1906
1907                         ap->state = ANEG_STATE_RESTART_INIT;
1908                 } else {
1909                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1910                 }
1911                 break;
1912
1913         case ANEG_STATE_RESTART_INIT:
1914                 ap->link_time = ap->cur_time;
1915                 ap->flags &= ~(MR_NP_LOADED);
1916                 ap->txconfig = 0;
1917                 tw32(MAC_TX_AUTO_NEG, 0);
1918                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1919                 tw32_f(MAC_MODE, tp->mac_mode);
1920                 udelay(40);
1921
1922                 ret = ANEG_TIMER_ENAB;
1923                 ap->state = ANEG_STATE_RESTART;
1924
1925                 /* fallthru */
1926         case ANEG_STATE_RESTART:
1927                 delta = ap->cur_time - ap->link_time;
1928                 if (delta > ANEG_STATE_SETTLE_TIME) {
1929                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1930                 } else {
1931                         ret = ANEG_TIMER_ENAB;
1932                 }
1933                 break;
1934
1935         case ANEG_STATE_DISABLE_LINK_OK:
1936                 ret = ANEG_DONE;
1937                 break;
1938
1939         case ANEG_STATE_ABILITY_DETECT_INIT:
1940                 ap->flags &= ~(MR_TOGGLE_TX);
1941                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1942                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1943                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1944                 tw32_f(MAC_MODE, tp->mac_mode);
1945                 udelay(40);
1946
1947                 ap->state = ANEG_STATE_ABILITY_DETECT;
1948                 break;
1949
1950         case ANEG_STATE_ABILITY_DETECT:
1951                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1952                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1953                 }
1954                 break;
1955
1956         case ANEG_STATE_ACK_DETECT_INIT:
1957                 ap->txconfig |= ANEG_CFG_ACK;
1958                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1959                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1960                 tw32_f(MAC_MODE, tp->mac_mode);
1961                 udelay(40);
1962
1963                 ap->state = ANEG_STATE_ACK_DETECT;
1964
1965                 /* fallthru */
1966         case ANEG_STATE_ACK_DETECT:
1967                 if (ap->ack_match != 0) {
1968                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1969                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1970                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1971                         } else {
1972                                 ap->state = ANEG_STATE_AN_ENABLE;
1973                         }
1974                 } else if (ap->ability_match != 0 &&
1975                            ap->rxconfig == 0) {
1976                         ap->state = ANEG_STATE_AN_ENABLE;
1977                 }
1978                 break;
1979
1980         case ANEG_STATE_COMPLETE_ACK_INIT:
1981                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1982                         ret = ANEG_FAILED;
1983                         break;
1984                 }
1985                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1986                                MR_LP_ADV_HALF_DUPLEX |
1987                                MR_LP_ADV_SYM_PAUSE |
1988                                MR_LP_ADV_ASYM_PAUSE |
1989                                MR_LP_ADV_REMOTE_FAULT1 |
1990                                MR_LP_ADV_REMOTE_FAULT2 |
1991                                MR_LP_ADV_NEXT_PAGE |
1992                                MR_TOGGLE_RX |
1993                                MR_NP_RX);
1994                 if (ap->rxconfig & ANEG_CFG_FD)
1995                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1996                 if (ap->rxconfig & ANEG_CFG_HD)
1997                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1998                 if (ap->rxconfig & ANEG_CFG_PS1)
1999                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2000                 if (ap->rxconfig & ANEG_CFG_PS2)
2001                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2002                 if (ap->rxconfig & ANEG_CFG_RF1)
2003                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2004                 if (ap->rxconfig & ANEG_CFG_RF2)
2005                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2006                 if (ap->rxconfig & ANEG_CFG_NP)
2007                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2008
2009                 ap->link_time = ap->cur_time;
2010
2011                 ap->flags ^= (MR_TOGGLE_TX);
2012                 if (ap->rxconfig & 0x0008)
2013                         ap->flags |= MR_TOGGLE_RX;
2014                 if (ap->rxconfig & ANEG_CFG_NP)
2015                         ap->flags |= MR_NP_RX;
2016                 ap->flags |= MR_PAGE_RX;
2017
2018                 ap->state = ANEG_STATE_COMPLETE_ACK;
2019                 ret = ANEG_TIMER_ENAB;
2020                 break;
2021
2022         case ANEG_STATE_COMPLETE_ACK:
2023                 if (ap->ability_match != 0 &&
2024                     ap->rxconfig == 0) {
2025                         ap->state = ANEG_STATE_AN_ENABLE;
2026                         break;
2027                 }
2028                 delta = ap->cur_time - ap->link_time;
2029                 if (delta > ANEG_STATE_SETTLE_TIME) {
2030                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2031                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2032                         } else {
2033                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2034                                     !(ap->flags & MR_NP_RX)) {
2035                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2036                                 } else {
2037                                         ret = ANEG_FAILED;
2038                                 }
2039                         }
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_IDLE_DETECT_INIT:
2044                 ap->link_time = ap->cur_time;
2045                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2046                 tw32_f(MAC_MODE, tp->mac_mode);
2047                 udelay(40);
2048
2049                 ap->state = ANEG_STATE_IDLE_DETECT;
2050                 ret = ANEG_TIMER_ENAB;
2051                 break;
2052
2053         case ANEG_STATE_IDLE_DETECT:
2054                 if (ap->ability_match != 0 &&
2055                     ap->rxconfig == 0) {
2056                         ap->state = ANEG_STATE_AN_ENABLE;
2057                         break;
2058                 }
2059                 delta = ap->cur_time - ap->link_time;
2060                 if (delta > ANEG_STATE_SETTLE_TIME) {
2061                         /* XXX another gem from the Broadcom driver :( */
2062                         ap->state = ANEG_STATE_LINK_OK;
2063                 }
2064                 break;
2065
2066         case ANEG_STATE_LINK_OK:
2067                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2068                 ret = ANEG_DONE;
2069                 break;
2070
2071         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2072                 /* ??? unimplemented */
2073                 break;
2074
2075         case ANEG_STATE_NEXT_PAGE_WAIT:
2076                 /* ??? unimplemented */
2077                 break;
2078
2079         default:
2080                 ret = ANEG_FAILED;
2081                 break;
2082         };
2083
2084         return ret;
2085 }
2086
2087 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2088 {
2089         int res = 0;
2090         struct tg3_fiber_aneginfo aninfo;
2091         int status = ANEG_FAILED;
2092         unsigned int tick;
2093         u32 tmp;
2094
2095         tw32_f(MAC_TX_AUTO_NEG, 0);
2096
2097         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2098         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2099         udelay(40);
2100
2101         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2102         udelay(40);
2103
2104         memset(&aninfo, 0, sizeof(aninfo));
2105         aninfo.flags |= MR_AN_ENABLE;
2106         aninfo.state = ANEG_STATE_UNKNOWN;
2107         aninfo.cur_time = 0;
2108         tick = 0;
2109         while (++tick < 195000) {
2110                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2111                 if (status == ANEG_DONE || status == ANEG_FAILED)
2112                         break;
2113
2114                 udelay(1);
2115         }
2116
2117         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2118         tw32_f(MAC_MODE, tp->mac_mode);
2119         udelay(40);
2120
2121         *flags = aninfo.flags;
2122
2123         if (status == ANEG_DONE &&
2124             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2125                              MR_LP_ADV_FULL_DUPLEX)))
2126                 res = 1;
2127
2128         return res;
2129 }
2130
2131 static void tg3_init_bcm8002(struct tg3 *tp)
2132 {
2133         u32 mac_status = tr32(MAC_STATUS);
2134         int i;
2135
2136         /* Reset when initting first time or we have a link. */
2137         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2138             !(mac_status & MAC_STATUS_PCS_SYNCED))
2139                 return;
2140
2141         /* Set PLL lock range. */
2142         tg3_writephy(tp, 0x16, 0x8007);
2143
2144         /* SW reset */
2145         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2146
2147         /* Wait for reset to complete. */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 500; i++)
2150                 udelay(10);
2151
2152         /* Config mode; select PMA/Ch 1 regs. */
2153         tg3_writephy(tp, 0x10, 0x8411);
2154
2155         /* Enable auto-lock and comdet, select txclk for tx. */
2156         tg3_writephy(tp, 0x11, 0x0a10);
2157
2158         tg3_writephy(tp, 0x18, 0x00a0);
2159         tg3_writephy(tp, 0x16, 0x41ff);
2160
2161         /* Assert and deassert POR. */
2162         tg3_writephy(tp, 0x13, 0x0400);
2163         udelay(40);
2164         tg3_writephy(tp, 0x13, 0x0000);
2165
2166         tg3_writephy(tp, 0x11, 0x0a50);
2167         udelay(40);
2168         tg3_writephy(tp, 0x11, 0x0a10);
2169
2170         /* Wait for signal to stabilize */
2171         /* XXX schedule_timeout() ... */
2172         for (i = 0; i < 15000; i++)
2173                 udelay(10);
2174
2175         /* Deselect the channel register so we can read the PHYID
2176          * later.
2177          */
2178         tg3_writephy(tp, 0x10, 0x8011);
2179 }
2180
2181 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2182 {
2183         u32 sg_dig_ctrl, sg_dig_status;
2184         u32 serdes_cfg, expected_sg_dig_ctrl;
2185         int workaround, port_a;
2186         int current_link_up;
2187
2188         serdes_cfg = 0;
2189         expected_sg_dig_ctrl = 0;
2190         workaround = 0;
2191         port_a = 1;
2192         current_link_up = 0;
2193
2194         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2195             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2196                 workaround = 1;
2197                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2198                         port_a = 0;
2199
2200                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2201                 /* preserve bits 20-23 for voltage regulator */
2202                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2203         }
2204
2205         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2206
2207         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2208                 if (sg_dig_ctrl & (1 << 31)) {
2209                         if (workaround) {
2210                                 u32 val = serdes_cfg;
2211
2212                                 if (port_a)
2213                                         val |= 0xc010000;
2214                                 else
2215                                         val |= 0x4010000;
2216                                 tw32_f(MAC_SERDES_CFG, val);
2217                         }
2218                         tw32_f(SG_DIG_CTRL, 0x01388400);
2219                 }
2220                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2221                         tg3_setup_flow_control(tp, 0, 0);
2222                         current_link_up = 1;
2223                 }
2224                 goto out;
2225         }
2226
2227         /* Want auto-negotiation.  */
2228         expected_sg_dig_ctrl = 0x81388400;
2229
2230         /* Pause capability */
2231         expected_sg_dig_ctrl |= (1 << 11);
2232
2233         /* Asymettric pause */
2234         expected_sg_dig_ctrl |= (1 << 12);
2235
2236         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2237                 if (workaround)
2238                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2239                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2240                 udelay(5);
2241                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2242
2243                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2244         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2245                                  MAC_STATUS_SIGNAL_DET)) {
2246                 int i;
2247
2248                 /* Giver time to negotiate (~200ms) */
2249                 for (i = 0; i < 40000; i++) {
2250                         sg_dig_status = tr32(SG_DIG_STATUS);
2251                         if (sg_dig_status & (0x3))
2252                                 break;
2253                         udelay(5);
2254                 }
2255                 mac_status = tr32(MAC_STATUS);
2256
2257                 if ((sg_dig_status & (1 << 1)) &&
2258                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2259                         u32 local_adv, remote_adv;
2260
2261                         local_adv = ADVERTISE_PAUSE_CAP;
2262                         remote_adv = 0;
2263                         if (sg_dig_status & (1 << 19))
2264                                 remote_adv |= LPA_PAUSE_CAP;
2265                         if (sg_dig_status & (1 << 20))
2266                                 remote_adv |= LPA_PAUSE_ASYM;
2267
2268                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2269                         current_link_up = 1;
2270                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2271                 } else if (!(sg_dig_status & (1 << 1))) {
2272                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2273                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2274                         else {
2275                                 if (workaround) {
2276                                         u32 val = serdes_cfg;
2277
2278                                         if (port_a)
2279                                                 val |= 0xc010000;
2280                                         else
2281                                                 val |= 0x4010000;
2282
2283                                         tw32_f(MAC_SERDES_CFG, val);
2284                                 }
2285
2286                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2287                                 udelay(40);
2288
2289                                 /* Link parallel detection - link is up */
2290                                 /* only if we have PCS_SYNC and not */
2291                                 /* receiving config code words */
2292                                 mac_status = tr32(MAC_STATUS);
2293                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2294                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2295                                         tg3_setup_flow_control(tp, 0, 0);
2296                                         current_link_up = 1;
2297                                 }
2298                         }
2299                 }
2300         }
2301
2302 out:
2303         return current_link_up;
2304 }
2305
2306 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2307 {
2308         int current_link_up = 0;
2309
2310         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2311                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2312                 goto out;
2313         }
2314
2315         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2316                 u32 flags;
2317                 int i;
2318   
2319                 if (fiber_autoneg(tp, &flags)) {
2320                         u32 local_adv, remote_adv;
2321
2322                         local_adv = ADVERTISE_PAUSE_CAP;
2323                         remote_adv = 0;
2324                         if (flags & MR_LP_ADV_SYM_PAUSE)
2325                                 remote_adv |= LPA_PAUSE_CAP;
2326                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2327                                 remote_adv |= LPA_PAUSE_ASYM;
2328
2329                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2330
2331                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332                         current_link_up = 1;
2333                 }
2334                 for (i = 0; i < 30; i++) {
2335                         udelay(20);
2336                         tw32_f(MAC_STATUS,
2337                                (MAC_STATUS_SYNC_CHANGED |
2338                                 MAC_STATUS_CFG_CHANGED));
2339                         udelay(40);
2340                         if ((tr32(MAC_STATUS) &
2341                              (MAC_STATUS_SYNC_CHANGED |
2342                               MAC_STATUS_CFG_CHANGED)) == 0)
2343                                 break;
2344                 }
2345
2346                 mac_status = tr32(MAC_STATUS);
2347                 if (current_link_up == 0 &&
2348                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2349                     !(mac_status & MAC_STATUS_RCVD_CFG))
2350                         current_link_up = 1;
2351         } else {
2352                 /* Forcing 1000FD link up. */
2353                 current_link_up = 1;
2354                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2355
2356                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2357                 udelay(40);
2358         }
2359
2360 out:
2361         return current_link_up;
2362 }
2363
2364 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2365 {
2366         u32 orig_pause_cfg;
2367         u16 orig_active_speed;
2368         u8 orig_active_duplex;
2369         u32 mac_status;
2370         int current_link_up;
2371         int i;
2372
2373         orig_pause_cfg =
2374                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2375                                   TG3_FLAG_TX_PAUSE));
2376         orig_active_speed = tp->link_config.active_speed;
2377         orig_active_duplex = tp->link_config.active_duplex;
2378
2379         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2380             netif_carrier_ok(tp->dev) &&
2381             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2382                 mac_status = tr32(MAC_STATUS);
2383                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2384                                MAC_STATUS_SIGNAL_DET |
2385                                MAC_STATUS_CFG_CHANGED |
2386                                MAC_STATUS_RCVD_CFG);
2387                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2388                                    MAC_STATUS_SIGNAL_DET)) {
2389                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2390                                             MAC_STATUS_CFG_CHANGED));
2391                         return 0;
2392                 }
2393         }
2394
2395         tw32_f(MAC_TX_AUTO_NEG, 0);
2396
2397         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2398         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2399         tw32_f(MAC_MODE, tp->mac_mode);
2400         udelay(40);
2401
2402         if (tp->phy_id == PHY_ID_BCM8002)
2403                 tg3_init_bcm8002(tp);
2404
2405         /* Enable link change event even when serdes polling.  */
2406         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2407         udelay(40);
2408
2409         current_link_up = 0;
2410         mac_status = tr32(MAC_STATUS);
2411
2412         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2413                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2414         else
2415                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2416
2417         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2418         tw32_f(MAC_MODE, tp->mac_mode);
2419         udelay(40);
2420
2421         tp->hw_status->status =
2422                 (SD_STATUS_UPDATED |
2423                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2424
2425         for (i = 0; i < 100; i++) {
2426                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2427                                     MAC_STATUS_CFG_CHANGED));
2428                 udelay(5);
2429                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2430                                          MAC_STATUS_CFG_CHANGED)) == 0)
2431                         break;
2432         }
2433
2434         mac_status = tr32(MAC_STATUS);
2435         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2436                 current_link_up = 0;
2437                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438                         tw32_f(MAC_MODE, (tp->mac_mode |
2439                                           MAC_MODE_SEND_CONFIGS));
2440                         udelay(1);
2441                         tw32_f(MAC_MODE, tp->mac_mode);
2442                 }
2443         }
2444
2445         if (current_link_up == 1) {
2446                 tp->link_config.active_speed = SPEED_1000;
2447                 tp->link_config.active_duplex = DUPLEX_FULL;
2448                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2449                                     LED_CTRL_LNKLED_OVERRIDE |
2450                                     LED_CTRL_1000MBPS_ON));
2451         } else {
2452                 tp->link_config.active_speed = SPEED_INVALID;
2453                 tp->link_config.active_duplex = DUPLEX_INVALID;
2454                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2455                                     LED_CTRL_LNKLED_OVERRIDE |
2456                                     LED_CTRL_TRAFFIC_OVERRIDE));
2457         }
2458
2459         if (current_link_up != netif_carrier_ok(tp->dev)) {
2460                 if (current_link_up)
2461                         netif_carrier_on(tp->dev);
2462                 else
2463                         netif_carrier_off(tp->dev);
2464                 tg3_link_report(tp);
2465         } else {
2466                 u32 now_pause_cfg =
2467                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2468                                          TG3_FLAG_TX_PAUSE);
2469                 if (orig_pause_cfg != now_pause_cfg ||
2470                     orig_active_speed != tp->link_config.active_speed ||
2471                     orig_active_duplex != tp->link_config.active_duplex)
2472                         tg3_link_report(tp);
2473         }
2474
2475         return 0;
2476 }
2477
2478 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2479 {
2480         int err;
2481
2482         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2483                 err = tg3_setup_fiber_phy(tp, force_reset);
2484         } else {
2485                 err = tg3_setup_copper_phy(tp, force_reset);
2486         }
2487
2488         if (tp->link_config.active_speed == SPEED_1000 &&
2489             tp->link_config.active_duplex == DUPLEX_HALF)
2490                 tw32(MAC_TX_LENGTHS,
2491                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2492                       (6 << TX_LENGTHS_IPG_SHIFT) |
2493                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2494         else
2495                 tw32(MAC_TX_LENGTHS,
2496                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2497                       (6 << TX_LENGTHS_IPG_SHIFT) |
2498                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2499
2500         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501                 if (netif_carrier_ok(tp->dev)) {
2502                         tw32(HOSTCC_STAT_COAL_TICKS,
2503                              DEFAULT_STAT_COAL_TICKS);
2504                 } else {
2505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506                 }
2507         }
2508
2509         return err;
2510 }
2511
2512 /* Tigon3 never reports partial packet sends.  So we do not
2513  * need special logic to handle SKBs that have not had all
2514  * of their frags sent yet, like SunGEM does.
2515  */
2516 static void tg3_tx(struct tg3 *tp)
2517 {
2518         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2519         u32 sw_idx = tp->tx_cons;
2520
2521         while (sw_idx != hw_idx) {
2522                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2523                 struct sk_buff *skb = ri->skb;
2524                 int i;
2525
2526                 if (unlikely(skb == NULL))
2527                         BUG();
2528
2529                 pci_unmap_single(tp->pdev,
2530                                  pci_unmap_addr(ri, mapping),
2531                                  skb_headlen(skb),
2532                                  PCI_DMA_TODEVICE);
2533
2534                 ri->skb = NULL;
2535
2536                 sw_idx = NEXT_TX(sw_idx);
2537
2538                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2539                         if (unlikely(sw_idx == hw_idx))
2540                                 BUG();
2541
2542                         ri = &tp->tx_buffers[sw_idx];
2543                         if (unlikely(ri->skb != NULL))
2544                                 BUG();
2545
2546                         pci_unmap_page(tp->pdev,
2547                                        pci_unmap_addr(ri, mapping),
2548                                        skb_shinfo(skb)->frags[i].size,
2549                                        PCI_DMA_TODEVICE);
2550
2551                         sw_idx = NEXT_TX(sw_idx);
2552                 }
2553
2554                 dev_kfree_skb_irq(skb);
2555         }
2556
2557         tp->tx_cons = sw_idx;
2558
2559         if (netif_queue_stopped(tp->dev) &&
2560             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2561                 netif_wake_queue(tp->dev);
2562 }
2563
2564 /* Returns size of skb allocated or < 0 on error.
2565  *
2566  * We only need to fill in the address because the other members
2567  * of the RX descriptor are invariant, see tg3_init_rings.
2568  *
2569  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2570  * posting buffers we only dirty the first cache line of the RX
2571  * descriptor (containing the address).  Whereas for the RX status
2572  * buffers the cpu only reads the last cacheline of the RX descriptor
2573  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2574  */
2575 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2576                             int src_idx, u32 dest_idx_unmasked)
2577 {
2578         struct tg3_rx_buffer_desc *desc;
2579         struct ring_info *map, *src_map;
2580         struct sk_buff *skb;
2581         dma_addr_t mapping;
2582         int skb_size, dest_idx;
2583
2584         src_map = NULL;
2585         switch (opaque_key) {
2586         case RXD_OPAQUE_RING_STD:
2587                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2588                 desc = &tp->rx_std[dest_idx];
2589                 map = &tp->rx_std_buffers[dest_idx];
2590                 if (src_idx >= 0)
2591                         src_map = &tp->rx_std_buffers[src_idx];
2592                 skb_size = RX_PKT_BUF_SZ;
2593                 break;
2594
2595         case RXD_OPAQUE_RING_JUMBO:
2596                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2597                 desc = &tp->rx_jumbo[dest_idx];
2598                 map = &tp->rx_jumbo_buffers[dest_idx];
2599                 if (src_idx >= 0)
2600                         src_map = &tp->rx_jumbo_buffers[src_idx];
2601                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2602                 break;
2603
2604         default:
2605                 return -EINVAL;
2606         };
2607
2608         /* Do not overwrite any of the map or rp information
2609          * until we are sure we can commit to a new buffer.
2610          *
2611          * Callers depend upon this behavior and assume that
2612          * we leave everything unchanged if we fail.
2613          */
2614         skb = dev_alloc_skb(skb_size);
2615         if (skb == NULL)
2616                 return -ENOMEM;
2617
2618         skb->dev = tp->dev;
2619         skb_reserve(skb, tp->rx_offset);
2620
2621         mapping = pci_map_single(tp->pdev, skb->data,
2622                                  skb_size - tp->rx_offset,
2623                                  PCI_DMA_FROMDEVICE);
2624
2625         map->skb = skb;
2626         pci_unmap_addr_set(map, mapping, mapping);
2627
2628         if (src_map != NULL)
2629                 src_map->skb = NULL;
2630
2631         desc->addr_hi = ((u64)mapping >> 32);
2632         desc->addr_lo = ((u64)mapping & 0xffffffff);
2633
2634         return skb_size;
2635 }
2636
2637 /* We only need to move over in the address because the other
2638  * members of the RX descriptor are invariant.  See notes above
2639  * tg3_alloc_rx_skb for full details.
2640  */
2641 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2642                            int src_idx, u32 dest_idx_unmasked)
2643 {
2644         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2645         struct ring_info *src_map, *dest_map;
2646         int dest_idx;
2647
2648         switch (opaque_key) {
2649         case RXD_OPAQUE_RING_STD:
2650                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2651                 dest_desc = &tp->rx_std[dest_idx];
2652                 dest_map = &tp->rx_std_buffers[dest_idx];
2653                 src_desc = &tp->rx_std[src_idx];
2654                 src_map = &tp->rx_std_buffers[src_idx];
2655                 break;
2656
2657         case RXD_OPAQUE_RING_JUMBO:
2658                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2659                 dest_desc = &tp->rx_jumbo[dest_idx];
2660                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2661                 src_desc = &tp->rx_jumbo[src_idx];
2662                 src_map = &tp->rx_jumbo_buffers[src_idx];
2663                 break;
2664
2665         default:
2666                 return;
2667         };
2668
2669         dest_map->skb = src_map->skb;
2670         pci_unmap_addr_set(dest_map, mapping,
2671                            pci_unmap_addr(src_map, mapping));
2672         dest_desc->addr_hi = src_desc->addr_hi;
2673         dest_desc->addr_lo = src_desc->addr_lo;
2674
2675         src_map->skb = NULL;
2676 }
2677
2678 #if TG3_VLAN_TAG_USED
2679 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2680 {
2681         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2682 }
2683 #endif
2684
2685 /* The RX ring scheme is composed of multiple rings which post fresh
2686  * buffers to the chip, and one special ring the chip uses to report
2687  * status back to the host.
2688  *
2689  * The special ring reports the status of received packets to the
2690  * host.  The chip does not write into the original descriptor the
2691  * RX buffer was obtained from.  The chip simply takes the original
2692  * descriptor as provided by the host, updates the status and length
2693  * field, then writes this into the next status ring entry.
2694  *
2695  * Each ring the host uses to post buffers to the chip is described
2696  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2697  * it is first placed into the on-chip ram.  When the packet's length
2698  * is known, it walks down the TG3_BDINFO entries to select the ring.
2699  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2700  * which is within the range of the new packet's length is chosen.
2701  *
2702  * The "separate ring for rx status" scheme may sound queer, but it makes
2703  * sense from a cache coherency perspective.  If only the host writes
2704  * to the buffer post rings, and only the chip writes to the rx status
2705  * rings, then cache lines never move beyond shared-modified state.
2706  * If both the host and chip were to write into the same ring, cache line
2707  * eviction could occur since both entities want it in an exclusive state.
2708  */
2709 static int tg3_rx(struct tg3 *tp, int budget)
2710 {
2711         u32 work_mask;
2712         u32 sw_idx = tp->rx_rcb_ptr;
2713         u16 hw_idx;
2714         int received;
2715
2716         hw_idx = tp->hw_status->idx[0].rx_producer;
2717         /*
2718          * We need to order the read of hw_idx and the read of
2719          * the opaque cookie.
2720          */
2721         rmb();
2722         work_mask = 0;
2723         received = 0;
2724         while (sw_idx != hw_idx && budget > 0) {
2725                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2726                 unsigned int len;
2727                 struct sk_buff *skb;
2728                 dma_addr_t dma_addr;
2729                 u32 opaque_key, desc_idx, *post_ptr;
2730
2731                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2732                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2733                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2734                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2735                                                   mapping);
2736                         skb = tp->rx_std_buffers[desc_idx].skb;
2737                         post_ptr = &tp->rx_std_ptr;
2738                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2739                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2740                                                   mapping);
2741                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2742                         post_ptr = &tp->rx_jumbo_ptr;
2743                 }
2744                 else {
2745                         goto next_pkt_nopost;
2746                 }
2747
2748                 work_mask |= opaque_key;
2749
2750                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2751                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2752                 drop_it:
2753                         tg3_recycle_rx(tp, opaque_key,
2754                                        desc_idx, *post_ptr);
2755                 drop_it_no_recycle:
2756                         /* Other statistics kept track of by card. */
2757                         tp->net_stats.rx_dropped++;
2758                         goto next_pkt;
2759                 }
2760
2761                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2762
2763                 if (len > RX_COPY_THRESHOLD 
2764                         && tp->rx_offset == 2
2765                         /* rx_offset != 2 iff this is a 5701 card running
2766                          * in PCI-X mode [see tg3_get_invariants()] */
2767                 ) {
2768                         int skb_size;
2769
2770                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2771                                                     desc_idx, *post_ptr);
2772                         if (skb_size < 0)
2773                                 goto drop_it;
2774
2775                         pci_unmap_single(tp->pdev, dma_addr,
2776                                          skb_size - tp->rx_offset,
2777                                          PCI_DMA_FROMDEVICE);
2778
2779                         skb_put(skb, len);
2780                 } else {
2781                         struct sk_buff *copy_skb;
2782
2783                         tg3_recycle_rx(tp, opaque_key,
2784                                        desc_idx, *post_ptr);
2785
2786                         copy_skb = dev_alloc_skb(len + 2);
2787                         if (copy_skb == NULL)
2788                                 goto drop_it_no_recycle;
2789
2790                         copy_skb->dev = tp->dev;
2791                         skb_reserve(copy_skb, 2);
2792                         skb_put(copy_skb, len);
2793                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2794                         memcpy(copy_skb->data, skb->data, len);
2795                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2796
2797                         /* We'll reuse the original ring buffer. */
2798                         skb = copy_skb;
2799                 }
2800
2801                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2802                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2803                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2804                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2805                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2806                 else
2807                         skb->ip_summed = CHECKSUM_NONE;
2808
2809                 skb->protocol = eth_type_trans(skb, tp->dev);
2810 #if TG3_VLAN_TAG_USED
2811                 if (tp->vlgrp != NULL &&
2812                     desc->type_flags & RXD_FLAG_VLAN) {
2813                         tg3_vlan_rx(tp, skb,
2814                                     desc->err_vlan & RXD_VLAN_MASK);
2815                 } else
2816 #endif
2817                         netif_receive_skb(skb);
2818
2819                 tp->dev->last_rx = jiffies;
2820                 received++;
2821                 budget--;
2822
2823 next_pkt:
2824                 (*post_ptr)++;
2825 next_pkt_nopost:
2826                 sw_idx++;
2827                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2828
2829                 /* Refresh hw_idx to see if there is new work */
2830                 if (sw_idx == hw_idx) {
2831                         hw_idx = tp->hw_status->idx[0].rx_producer;
2832                         rmb();
2833                 }
2834         }
2835
2836         /* ACK the status ring. */
2837         tp->rx_rcb_ptr = sw_idx;
2838         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2839
2840         /* Refill RX ring(s). */
2841         if (work_mask & RXD_OPAQUE_RING_STD) {
2842                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2843                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2844                              sw_idx);
2845         }
2846         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2847                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2848                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2849                              sw_idx);
2850         }
2851         mmiowb();
2852
2853         return received;
2854 }
2855
2856 static int tg3_poll(struct net_device *netdev, int *budget)
2857 {
2858         struct tg3 *tp = netdev_priv(netdev);
2859         struct tg3_hw_status *sblk = tp->hw_status;
2860         unsigned long flags;
2861         int done;
2862
2863         spin_lock_irqsave(&tp->lock, flags);
2864
2865         /* handle link change and other phy events */
2866         if (!(tp->tg3_flags &
2867               (TG3_FLAG_USE_LINKCHG_REG |
2868                TG3_FLAG_POLL_SERDES))) {
2869                 if (sblk->status & SD_STATUS_LINK_CHG) {
2870                         sblk->status = SD_STATUS_UPDATED |
2871                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2872                         tg3_setup_phy(tp, 0);
2873                 }
2874         }
2875
2876         /* run TX completion thread */
2877         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2878                 spin_lock(&tp->tx_lock);
2879                 tg3_tx(tp);
2880                 spin_unlock(&tp->tx_lock);
2881         }
2882
2883         spin_unlock_irqrestore(&tp->lock, flags);
2884
2885         /* run RX thread, within the bounds set by NAPI.
2886          * All RX "locking" is done by ensuring outside
2887          * code synchronizes with dev->poll()
2888          */
2889         done = 1;
2890         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891                 int orig_budget = *budget;
2892                 int work_done;
2893
2894                 if (orig_budget > netdev->quota)
2895                         orig_budget = netdev->quota;
2896
2897                 work_done = tg3_rx(tp, orig_budget);
2898
2899                 *budget -= work_done;
2900                 netdev->quota -= work_done;
2901
2902                 if (work_done >= orig_budget)
2903                         done = 0;
2904         }
2905
2906         /* if no more work, tell net stack and NIC we're done */
2907         if (done) {
2908                 spin_lock_irqsave(&tp->lock, flags);
2909                 __netif_rx_complete(netdev);
2910                 tg3_restart_ints(tp);
2911                 spin_unlock_irqrestore(&tp->lock, flags);
2912         }
2913
2914         return (done ? 0 : 1);
2915 }
2916
2917 /* MSI ISR - No need to check for interrupt sharing and no need to
2918  * flush status block and interrupt mailbox. PCI ordering rules
2919  * guarantee that MSI will arrive after the status block.
2920  */
2921 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2922 {
2923         struct net_device *dev = dev_id;
2924         struct tg3 *tp = netdev_priv(dev);
2925         struct tg3_hw_status *sblk = tp->hw_status;
2926         unsigned long flags;
2927
2928         spin_lock_irqsave(&tp->lock, flags);
2929
2930         /*
2931          * writing any value to intr-mbox-0 clears PCI INTA# and
2932          * chip-internal interrupt pending events.
2933          * writing non-zero to intr-mbox-0 additional tells the
2934          * NIC to stop sending us irqs, engaging "in-intr-handler"
2935          * event coalescing.
2936          */
2937         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2938         sblk->status &= ~SD_STATUS_UPDATED;
2939
2940         if (likely(tg3_has_work(tp)))
2941                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2942         else {
2943                 /* no work, re-enable interrupts
2944                  */
2945                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946                              0x00000000);
2947         }
2948
2949         spin_unlock_irqrestore(&tp->lock, flags);
2950
2951         return IRQ_RETVAL(1);
2952 }
2953
2954 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2955 {
2956         struct net_device *dev = dev_id;
2957         struct tg3 *tp = netdev_priv(dev);
2958         struct tg3_hw_status *sblk = tp->hw_status;
2959         unsigned long flags;
2960         unsigned int handled = 1;
2961
2962         spin_lock_irqsave(&tp->lock, flags);
2963
2964         /* In INTx mode, it is possible for the interrupt to arrive at
2965          * the CPU before the status block posted prior to the interrupt.
2966          * Reading the PCI State register will confirm whether the
2967          * interrupt is ours and will flush the status block.
2968          */
2969         if ((sblk->status & SD_STATUS_UPDATED) ||
2970             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971                 /*
2972                  * writing any value to intr-mbox-0 clears PCI INTA# and
2973                  * chip-internal interrupt pending events.
2974                  * writing non-zero to intr-mbox-0 additional tells the
2975                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2976                  * event coalescing.
2977                  */
2978                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979                              0x00000001);
2980                 /*
2981                  * Flush PCI write.  This also guarantees that our
2982                  * status block has been flushed to host memory.
2983                  */
2984                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2985                 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987                 if (likely(tg3_has_work(tp)))
2988                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2989                 else {
2990                         /* no work, shared interrupt perhaps?  re-enable
2991                          * interrupts, and flush that PCI write
2992                          */
2993                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994                                 0x00000000);
2995                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996                 }
2997         } else {        /* shared interrupt */
2998                 handled = 0;
2999         }
3000
3001         spin_unlock_irqrestore(&tp->lock, flags);
3002
3003         return IRQ_RETVAL(handled);
3004 }
3005
3006 /* ISR for interrupt test */
3007 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3008                 struct pt_regs *regs)
3009 {
3010         struct net_device *dev = dev_id;
3011         struct tg3 *tp = netdev_priv(dev);
3012         struct tg3_hw_status *sblk = tp->hw_status;
3013
3014         if (sblk->status & SD_STATUS_UPDATED) {
3015                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3016                              0x00000001);
3017                 return IRQ_RETVAL(1);
3018         }
3019         return IRQ_RETVAL(0);
3020 }
3021
3022 static int tg3_init_hw(struct tg3 *);
3023 static int tg3_halt(struct tg3 *);
3024
3025 #ifdef CONFIG_NET_POLL_CONTROLLER
3026 static void tg3_poll_controller(struct net_device *dev)
3027 {
3028         struct tg3 *tp = netdev_priv(dev);
3029
3030         tg3_interrupt(tp->pdev->irq, dev, NULL);
3031 }
3032 #endif
3033
3034 static void tg3_reset_task(void *_data)
3035 {
3036         struct tg3 *tp = _data;
3037         unsigned int restart_timer;
3038
3039         tg3_netif_stop(tp);
3040
3041         spin_lock_irq(&tp->lock);
3042         spin_lock(&tp->tx_lock);
3043
3044         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046
3047         tg3_halt(tp);
3048         tg3_init_hw(tp);
3049
3050         tg3_netif_start(tp);
3051
3052         spin_unlock(&tp->tx_lock);
3053         spin_unlock_irq(&tp->lock);
3054
3055         if (restart_timer)
3056                 mod_timer(&tp->timer, jiffies + 1);
3057 }
3058
3059 static void tg3_tx_timeout(struct net_device *dev)
3060 {
3061         struct tg3 *tp = netdev_priv(dev);
3062
3063         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3064                dev->name);
3065
3066         schedule_work(&tp->reset_task);
3067 }
3068
3069 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3070
3071 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3072                                        u32 guilty_entry, int guilty_len,
3073                                        u32 last_plus_one, u32 *start, u32 mss)
3074 {
3075         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3076         dma_addr_t new_addr;
3077         u32 entry = *start;
3078         int i;
3079
3080         if (!new_skb) {
3081                 dev_kfree_skb(skb);
3082                 return -1;
3083         }
3084
3085         /* New SKB is guaranteed to be linear. */
3086         entry = *start;
3087         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3088                                   PCI_DMA_TODEVICE);
3089         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3090                     (skb->ip_summed == CHECKSUM_HW) ?
3091                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3092         *start = NEXT_TX(entry);
3093
3094         /* Now clean up the sw ring entries. */
3095         i = 0;
3096         while (entry != last_plus_one) {
3097                 int len;
3098
3099                 if (i == 0)
3100                         len = skb_headlen(skb);
3101                 else
3102                         len = skb_shinfo(skb)->frags[i-1].size;
3103                 pci_unmap_single(tp->pdev,
3104                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3105                                  len, PCI_DMA_TODEVICE);
3106                 if (i == 0) {
3107                         tp->tx_buffers[entry].skb = new_skb;
3108                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3109                 } else {
3110                         tp->tx_buffers[entry].skb = NULL;
3111                 }
3112                 entry = NEXT_TX(entry);
3113                 i++;
3114         }
3115
3116         dev_kfree_skb(skb);
3117
3118         return 0;
3119 }
3120
3121 static void tg3_set_txd(struct tg3 *tp, int entry,
3122                         dma_addr_t mapping, int len, u32 flags,
3123                         u32 mss_and_is_end)
3124 {
3125         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3126         int is_end = (mss_and_is_end & 0x1);
3127         u32 mss = (mss_and_is_end >> 1);
3128         u32 vlan_tag = 0;
3129
3130         if (is_end)
3131                 flags |= TXD_FLAG_END;
3132         if (flags & TXD_FLAG_VLAN) {
3133                 vlan_tag = flags >> 16;
3134                 flags &= 0xffff;
3135         }
3136         vlan_tag |= (mss << TXD_MSS_SHIFT);
3137
3138         txd->addr_hi = ((u64) mapping >> 32);
3139         txd->addr_lo = ((u64) mapping & 0xffffffff);
3140         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3141         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3142 }
3143
3144 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3145 {
3146         u32 base = (u32) mapping & 0xffffffff;
3147
3148         return ((base > 0xffffdcc0) &&
3149                 (base + len + 8 < base));
3150 }
3151
3152 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3153 {
3154         struct tg3 *tp = netdev_priv(dev);
3155         dma_addr_t mapping;
3156         unsigned int i;
3157         u32 len, entry, base_flags, mss;
3158         int would_hit_hwbug;
3159         unsigned long flags;
3160
3161         len = skb_headlen(skb);
3162
3163         /* No BH disabling for tx_lock here.  We are running in BH disabled
3164          * context and TX reclaim runs via tp->poll inside of a software
3165          * interrupt.  Rejoice!
3166          *
3167          * Actually, things are not so simple.  If we are to take a hw
3168          * IRQ here, we can deadlock, consider:
3169          *
3170          *       CPU1           CPU2
3171          *   tg3_start_xmit
3172          *   take tp->tx_lock
3173          *                      tg3_timer
3174          *                      take tp->lock
3175          *   tg3_interrupt
3176          *   spin on tp->lock
3177          *                      spin on tp->tx_lock
3178          *
3179          * So we really do need to disable interrupts when taking
3180          * tx_lock here.
3181          */
3182         local_irq_save(flags);
3183         if (!spin_trylock(&tp->tx_lock)) { 
3184                 local_irq_restore(flags);
3185                 return NETDEV_TX_LOCKED; 
3186         } 
3187
3188         /* This is a hard error, log it. */
3189         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3190                 netif_stop_queue(dev);
3191                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3192                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3193                        dev->name);
3194                 return NETDEV_TX_BUSY;
3195         }
3196
3197         entry = tp->tx_prod;
3198         base_flags = 0;
3199         if (skb->ip_summed == CHECKSUM_HW)
3200                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3201 #if TG3_TSO_SUPPORT != 0
3202         mss = 0;
3203         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3204             (mss = skb_shinfo(skb)->tso_size) != 0) {
3205                 int tcp_opt_len, ip_tcp_len;
3206
3207                 if (skb_header_cloned(skb) &&
3208                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3209                         dev_kfree_skb(skb);
3210                         goto out_unlock;
3211                 }
3212
3213                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3214                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3215
3216                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3217                                TXD_FLAG_CPU_POST_DMA);
3218
3219                 skb->nh.iph->check = 0;
3220                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3221                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3222                         skb->h.th->check = 0;
3223                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3224                 }
3225                 else {
3226                         skb->h.th->check =
3227                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3228                                                    skb->nh.iph->daddr,
3229                                                    0, IPPROTO_TCP, 0);
3230                 }
3231
3232                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3233                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3234                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3235                                 int tsflags;
3236
3237                                 tsflags = ((skb->nh.iph->ihl - 5) +
3238                                            (tcp_opt_len >> 2));
3239                                 mss |= (tsflags << 11);
3240                         }
3241                 } else {
3242                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3243                                 int tsflags;
3244
3245                                 tsflags = ((skb->nh.iph->ihl - 5) +
3246                                            (tcp_opt_len >> 2));
3247                                 base_flags |= tsflags << 12;
3248                         }
3249                 }
3250         }
3251 #else
3252         mss = 0;
3253 #endif
3254 #if TG3_VLAN_TAG_USED
3255         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3256                 base_flags |= (TXD_FLAG_VLAN |
3257                                (vlan_tx_tag_get(skb) << 16));
3258 #endif
3259
3260         /* Queue skb data, a.k.a. the main skb fragment. */
3261         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3262
3263         tp->tx_buffers[entry].skb = skb;
3264         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3265
3266         would_hit_hwbug = 0;
3267
3268         if (tg3_4g_overflow_test(mapping, len))
3269                 would_hit_hwbug = entry + 1;
3270
3271         tg3_set_txd(tp, entry, mapping, len, base_flags,
3272                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3273
3274         entry = NEXT_TX(entry);
3275
3276         /* Now loop through additional data fragments, and queue them. */
3277         if (skb_shinfo(skb)->nr_frags > 0) {
3278                 unsigned int i, last;
3279
3280                 last = skb_shinfo(skb)->nr_frags - 1;
3281                 for (i = 0; i <= last; i++) {
3282                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3283
3284                         len = frag->size;
3285                         mapping = pci_map_page(tp->pdev,
3286                                                frag->page,
3287                                                frag->page_offset,
3288                                                len, PCI_DMA_TODEVICE);
3289
3290                         tp->tx_buffers[entry].skb = NULL;
3291                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3292
3293                         if (tg3_4g_overflow_test(mapping, len)) {
3294                                 /* Only one should match. */
3295                                 if (would_hit_hwbug)
3296                                         BUG();
3297                                 would_hit_hwbug = entry + 1;
3298                         }
3299
3300                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3301                                 tg3_set_txd(tp, entry, mapping, len,
3302                                             base_flags, (i == last)|(mss << 1));
3303                         else
3304                                 tg3_set_txd(tp, entry, mapping, len,
3305                                             base_flags, (i == last));
3306
3307                         entry = NEXT_TX(entry);
3308                 }
3309         }
3310
3311         if (would_hit_hwbug) {
3312                 u32 last_plus_one = entry;
3313                 u32 start;
3314                 unsigned int len = 0;
3315
3316                 would_hit_hwbug -= 1;
3317                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3318                 entry &= (TG3_TX_RING_SIZE - 1);
3319                 start = entry;
3320                 i = 0;
3321                 while (entry != last_plus_one) {
3322                         if (i == 0)
3323                                 len = skb_headlen(skb);
3324                         else
3325                                 len = skb_shinfo(skb)->frags[i-1].size;
3326
3327                         if (entry == would_hit_hwbug)
3328                                 break;
3329
3330                         i++;
3331                         entry = NEXT_TX(entry);
3332
3333                 }
3334
3335                 /* If the workaround fails due to memory/mapping
3336                  * failure, silently drop this packet.
3337                  */
3338                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3339                                                 entry, len,
3340                                                 last_plus_one,
3341                                                 &start, mss))
3342                         goto out_unlock;
3343
3344                 entry = start;
3345         }
3346
3347         /* Packets are ready, update Tx producer idx local and on card. */
3348         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3349
3350         tp->tx_prod = entry;
3351         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3352                 netif_stop_queue(dev);
3353
3354 out_unlock:
3355         mmiowb();
3356         spin_unlock_irqrestore(&tp->tx_lock, flags);
3357
3358         dev->trans_start = jiffies;
3359
3360         return NETDEV_TX_OK;
3361 }
3362
3363 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3364                                int new_mtu)
3365 {
3366         dev->mtu = new_mtu;
3367
3368         if (new_mtu > ETH_DATA_LEN)
3369                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3370         else
3371                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3372 }
3373
3374 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3375 {
3376         struct tg3 *tp = netdev_priv(dev);
3377
3378         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3379                 return -EINVAL;
3380
3381         if (!netif_running(dev)) {
3382                 /* We'll just catch it later when the
3383                  * device is up'd.
3384                  */
3385                 tg3_set_mtu(dev, tp, new_mtu);
3386                 return 0;
3387         }
3388
3389         tg3_netif_stop(tp);
3390         spin_lock_irq(&tp->lock);
3391         spin_lock(&tp->tx_lock);
3392
3393         tg3_halt(tp);
3394
3395         tg3_set_mtu(dev, tp, new_mtu);
3396
3397         tg3_init_hw(tp);
3398
3399         tg3_netif_start(tp);
3400
3401         spin_unlock(&tp->tx_lock);
3402         spin_unlock_irq(&tp->lock);
3403
3404         return 0;
3405 }
3406
3407 /* Free up pending packets in all rx/tx rings.
3408  *
3409  * The chip has been shut down and the driver detached from
3410  * the networking, so no interrupts or new tx packets will
3411  * end up in the driver.  tp->{tx,}lock is not held and we are not
3412  * in an interrupt context and thus may sleep.
3413  */
3414 static void tg3_free_rings(struct tg3 *tp)
3415 {
3416         struct ring_info *rxp;
3417         int i;
3418
3419         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3420                 rxp = &tp->rx_std_buffers[i];
3421
3422                 if (rxp->skb == NULL)
3423                         continue;
3424                 pci_unmap_single(tp->pdev,
3425                                  pci_unmap_addr(rxp, mapping),
3426                                  RX_PKT_BUF_SZ - tp->rx_offset,
3427                                  PCI_DMA_FROMDEVICE);
3428                 dev_kfree_skb_any(rxp->skb);
3429                 rxp->skb = NULL;
3430         }
3431
3432         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3433                 rxp = &tp->rx_jumbo_buffers[i];
3434
3435                 if (rxp->skb == NULL)
3436                         continue;
3437                 pci_unmap_single(tp->pdev,
3438                                  pci_unmap_addr(rxp, mapping),
3439                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3440                                  PCI_DMA_FROMDEVICE);
3441                 dev_kfree_skb_any(rxp->skb);
3442                 rxp->skb = NULL;
3443         }
3444
3445         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3446                 struct tx_ring_info *txp;
3447                 struct sk_buff *skb;
3448                 int j;
3449
3450                 txp = &tp->tx_buffers[i];
3451                 skb = txp->skb;
3452
3453                 if (skb == NULL) {
3454                         i++;
3455                         continue;
3456                 }
3457
3458                 pci_unmap_single(tp->pdev,
3459                                  pci_unmap_addr(txp, mapping),
3460                                  skb_headlen(skb),
3461                                  PCI_DMA_TODEVICE);
3462                 txp->skb = NULL;
3463
3464                 i++;
3465
3466                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3467                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3468                         pci_unmap_page(tp->pdev,
3469                                        pci_unmap_addr(txp, mapping),
3470                                        skb_shinfo(skb)->frags[j].size,
3471                                        PCI_DMA_TODEVICE);
3472                         i++;
3473                 }
3474
3475                 dev_kfree_skb_any(skb);
3476         }
3477 }
3478
3479 /* Initialize tx/rx rings for packet processing.
3480  *
3481  * The chip has been shut down and the driver detached from
3482  * the networking, so no interrupts or new tx packets will
3483  * end up in the driver.  tp->{tx,}lock are held and thus
3484  * we may not sleep.
3485  */
3486 static void tg3_init_rings(struct tg3 *tp)
3487 {
3488         u32 i;
3489
3490         /* Free up all the SKBs. */
3491         tg3_free_rings(tp);
3492
3493         /* Zero out all descriptors. */
3494         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3495         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3496         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3497         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3498
3499         /* Initialize invariants of the rings, we only set this
3500          * stuff once.  This works because the card does not
3501          * write into the rx buffer posting rings.
3502          */
3503         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3504                 struct tg3_rx_buffer_desc *rxd;
3505
3506                 rxd = &tp->rx_std[i];
3507                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3508                         << RXD_LEN_SHIFT;
3509                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3510                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3511                                (i << RXD_OPAQUE_INDEX_SHIFT));
3512         }
3513
3514         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3515                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3516                         struct tg3_rx_buffer_desc *rxd;
3517
3518                         rxd = &tp->rx_jumbo[i];
3519                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3520                                 << RXD_LEN_SHIFT;
3521                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3522                                 RXD_FLAG_JUMBO;
3523                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3524                                (i << RXD_OPAQUE_INDEX_SHIFT));
3525                 }
3526         }
3527
3528         /* Now allocate fresh SKBs for each rx ring. */
3529         for (i = 0; i < tp->rx_pending; i++) {
3530                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3531                                      -1, i) < 0)
3532                         break;
3533         }
3534
3535         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3536                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3537                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3538                                              -1, i) < 0)
3539                                 break;
3540                 }
3541         }
3542 }
3543
3544 /*
3545  * Must not be invoked with interrupt sources disabled and
3546  * the hardware shutdown down.
3547  */
3548 static void tg3_free_consistent(struct tg3 *tp)
3549 {
3550         if (tp->rx_std_buffers) {
3551                 kfree(tp->rx_std_buffers);
3552                 tp->rx_std_buffers = NULL;
3553         }
3554         if (tp->rx_std) {
3555                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3556                                     tp->rx_std, tp->rx_std_mapping);
3557                 tp->rx_std = NULL;
3558         }
3559         if (tp->rx_jumbo) {
3560                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3561                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3562                 tp->rx_jumbo = NULL;
3563         }
3564         if (tp->rx_rcb) {
3565                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3566                                     tp->rx_rcb, tp->rx_rcb_mapping);
3567                 tp->rx_rcb = NULL;
3568         }
3569         if (tp->tx_ring) {
3570                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3571                         tp->tx_ring, tp->tx_desc_mapping);
3572                 tp->tx_ring = NULL;
3573         }
3574         if (tp->hw_status) {
3575                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3576                                     tp->hw_status, tp->status_mapping);
3577                 tp->hw_status = NULL;
3578         }
3579         if (tp->hw_stats) {
3580                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3581                                     tp->hw_stats, tp->stats_mapping);
3582                 tp->hw_stats = NULL;
3583         }
3584 }
3585
3586 /*
3587  * Must not be invoked with interrupt sources disabled and
3588  * the hardware shutdown down.  Can sleep.
3589  */
3590 static int tg3_alloc_consistent(struct tg3 *tp)
3591 {
3592         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3593                                       (TG3_RX_RING_SIZE +
3594                                        TG3_RX_JUMBO_RING_SIZE)) +
3595                                      (sizeof(struct tx_ring_info) *
3596                                       TG3_TX_RING_SIZE),
3597                                      GFP_KERNEL);
3598         if (!tp->rx_std_buffers)
3599                 return -ENOMEM;
3600
3601         memset(tp->rx_std_buffers, 0,
3602                (sizeof(struct ring_info) *
3603                 (TG3_RX_RING_SIZE +
3604                  TG3_RX_JUMBO_RING_SIZE)) +
3605                (sizeof(struct tx_ring_info) *
3606                 TG3_TX_RING_SIZE));
3607
3608         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3609         tp->tx_buffers = (struct tx_ring_info *)
3610                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3611
3612         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3613                                           &tp->rx_std_mapping);
3614         if (!tp->rx_std)
3615                 goto err_out;
3616
3617         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3618                                             &tp->rx_jumbo_mapping);
3619
3620         if (!tp->rx_jumbo)
3621                 goto err_out;
3622
3623         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3624                                           &tp->rx_rcb_mapping);
3625         if (!tp->rx_rcb)
3626                 goto err_out;
3627
3628         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3629                                            &tp->tx_desc_mapping);
3630         if (!tp->tx_ring)
3631                 goto err_out;
3632
3633         tp->hw_status = pci_alloc_consistent(tp->pdev,
3634                                              TG3_HW_STATUS_SIZE,
3635                                              &tp->status_mapping);
3636         if (!tp->hw_status)
3637                 goto err_out;
3638
3639         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3640                                             sizeof(struct tg3_hw_stats),
3641                                             &tp->stats_mapping);
3642         if (!tp->hw_stats)
3643                 goto err_out;
3644
3645         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3646         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3647
3648         return 0;
3649
3650 err_out:
3651         tg3_free_consistent(tp);
3652         return -ENOMEM;
3653 }
3654
3655 #define MAX_WAIT_CNT 1000
3656
3657 /* To stop a block, clear the enable bit and poll till it
3658  * clears.  tp->lock is held.
3659  */
3660 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3661 {
3662         unsigned int i;
3663         u32 val;
3664
3665         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3666                 switch (ofs) {
3667                 case RCVLSC_MODE:
3668                 case DMAC_MODE:
3669                 case MBFREE_MODE:
3670                 case BUFMGR_MODE:
3671                 case MEMARB_MODE:
3672                         /* We can't enable/disable these bits of the
3673                          * 5705/5750, just say success.
3674                          */
3675                         return 0;
3676
3677                 default:
3678                         break;
3679                 };
3680         }
3681
3682         val = tr32(ofs);
3683         val &= ~enable_bit;
3684         tw32_f(ofs, val);
3685
3686         for (i = 0; i < MAX_WAIT_CNT; i++) {
3687                 udelay(100);
3688                 val = tr32(ofs);
3689                 if ((val & enable_bit) == 0)
3690                         break;
3691         }
3692
3693         if (i == MAX_WAIT_CNT) {
3694                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3695                        "ofs=%lx enable_bit=%x\n",
3696                        ofs, enable_bit);
3697                 return -ENODEV;
3698         }
3699
3700         return 0;
3701 }
3702
3703 /* tp->lock is held. */
3704 static int tg3_abort_hw(struct tg3 *tp)
3705 {
3706         int i, err;
3707
3708         tg3_disable_ints(tp);
3709
3710         tp->rx_mode &= ~RX_MODE_ENABLE;
3711         tw32_f(MAC_RX_MODE, tp->rx_mode);
3712         udelay(10);
3713
3714         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3715         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3716         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3717         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3718         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3719         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3720
3721         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3722         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3723         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3724         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3725         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3726         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3727         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3728         if (err)
3729                 goto out;
3730
3731         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3732         tw32_f(MAC_MODE, tp->mac_mode);
3733         udelay(40);
3734
3735         tp->tx_mode &= ~TX_MODE_ENABLE;
3736         tw32_f(MAC_TX_MODE, tp->tx_mode);
3737
3738         for (i = 0; i < MAX_WAIT_CNT; i++) {
3739                 udelay(100);
3740                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3741                         break;
3742         }
3743         if (i >= MAX_WAIT_CNT) {
3744                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3745                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3746                        tp->dev->name, tr32(MAC_TX_MODE));
3747                 return -ENODEV;
3748         }
3749
3750         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3751         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3752         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3753
3754         tw32(FTQ_RESET, 0xffffffff);
3755         tw32(FTQ_RESET, 0x00000000);
3756
3757         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3758         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3759         if (err)
3760                 goto out;
3761
3762         if (tp->hw_status)
3763                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3764         if (tp->hw_stats)
3765                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3766
3767 out:
3768         return err;
3769 }
3770
3771 /* tp->lock is held. */
3772 static int tg3_nvram_lock(struct tg3 *tp)
3773 {
3774         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3775                 int i;
3776
3777                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3778                 for (i = 0; i < 8000; i++) {
3779                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3780                                 break;
3781                         udelay(20);
3782                 }
3783                 if (i == 8000)
3784                         return -ENODEV;
3785         }
3786         return 0;
3787 }
3788
3789 /* tp->lock is held. */
3790 static void tg3_nvram_unlock(struct tg3 *tp)
3791 {
3792         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3793                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3794 }
3795
3796 /* tp->lock is held. */
3797 static void tg3_enable_nvram_access(struct tg3 *tp)
3798 {
3799         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3800             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3801                 u32 nvaccess = tr32(NVRAM_ACCESS);
3802
3803                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3804         }
3805 }
3806
3807 /* tp->lock is held. */
3808 static void tg3_disable_nvram_access(struct tg3 *tp)
3809 {
3810         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3811             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3812                 u32 nvaccess = tr32(NVRAM_ACCESS);
3813
3814                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3815         }
3816 }
3817
3818 /* tp->lock is held. */
3819 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3820 {
3821         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3822                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3823                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3824
3825         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3826                 switch (kind) {
3827                 case RESET_KIND_INIT:
3828                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3829                                       DRV_STATE_START);
3830                         break;
3831
3832                 case RESET_KIND_SHUTDOWN:
3833                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3834                                       DRV_STATE_UNLOAD);
3835                         break;
3836
3837                 case RESET_KIND_SUSPEND:
3838                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3839                                       DRV_STATE_SUSPEND);
3840                         break;
3841
3842                 default:
3843                         break;
3844                 };
3845         }
3846 }
3847
3848 /* tp->lock is held. */
3849 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3850 {
3851         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3852                 switch (kind) {
3853                 case RESET_KIND_INIT:
3854                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3855                                       DRV_STATE_START_DONE);
3856                         break;
3857
3858                 case RESET_KIND_SHUTDOWN:
3859                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3860                                       DRV_STATE_UNLOAD_DONE);
3861                         break;
3862
3863                 default:
3864                         break;
3865                 };
3866         }
3867 }
3868
3869 /* tp->lock is held. */
3870 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3871 {
3872         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3873                 switch (kind) {
3874                 case RESET_KIND_INIT:
3875                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3876                                       DRV_STATE_START);
3877                         break;
3878
3879                 case RESET_KIND_SHUTDOWN:
3880                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3881                                       DRV_STATE_UNLOAD);
3882                         break;
3883
3884                 case RESET_KIND_SUSPEND:
3885                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3886                                       DRV_STATE_SUSPEND);
3887                         break;
3888
3889                 default:
3890                         break;
3891                 };
3892         }
3893 }
3894
3895 static void tg3_stop_fw(struct tg3 *);
3896
3897 /* tp->lock is held. */
3898 static int tg3_chip_reset(struct tg3 *tp)
3899 {
3900         u32 val;
3901         u32 flags_save;
3902         int i;
3903
3904         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3905                 tg3_nvram_lock(tp);
3906
3907         /*
3908          * We must avoid the readl() that normally takes place.
3909          * It locks machines, causes machine checks, and other
3910          * fun things.  So, temporarily disable the 5701
3911          * hardware workaround, while we do the reset.
3912          */
3913         flags_save = tp->tg3_flags;
3914         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3915
3916         /* do the reset */
3917         val = GRC_MISC_CFG_CORECLK_RESET;
3918
3919         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3920                 if (tr32(0x7e2c) == 0x60) {
3921                         tw32(0x7e2c, 0x20);
3922                 }
3923                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3924                         tw32(GRC_MISC_CFG, (1 << 29));
3925                         val |= (1 << 29);
3926                 }
3927         }
3928
3929         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3930                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3931         tw32(GRC_MISC_CFG, val);
3932
3933         /* restore 5701 hardware bug workaround flag */
3934         tp->tg3_flags = flags_save;
3935
3936         /* Unfortunately, we have to delay before the PCI read back.
3937          * Some 575X chips even will not respond to a PCI cfg access
3938          * when the reset command is given to the chip.
3939          *
3940          * How do these hardware designers expect things to work
3941          * properly if the PCI write is posted for a long period
3942          * of time?  It is always necessary to have some method by
3943          * which a register read back can occur to push the write
3944          * out which does the reset.
3945          *
3946          * For most tg3 variants the trick below was working.
3947          * Ho hum...
3948          */
3949         udelay(120);
3950
3951         /* Flush PCI posted writes.  The normal MMIO registers
3952          * are inaccessible at this time so this is the only
3953          * way to make this reliably (actually, this is no longer
3954          * the case, see above).  I tried to use indirect
3955          * register read/write but this upset some 5701 variants.
3956          */
3957         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3958
3959         udelay(120);
3960
3961         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3962                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3963                         int i;
3964                         u32 cfg_val;
3965
3966                         /* Wait for link training to complete.  */
3967                         for (i = 0; i < 5000; i++)
3968                                 udelay(100);
3969
3970                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3971                         pci_write_config_dword(tp->pdev, 0xc4,
3972                                                cfg_val | (1 << 15));
3973                 }
3974                 /* Set PCIE max payload size and clear error status.  */
3975                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3976         }
3977
3978         /* Re-enable indirect register accesses. */
3979         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3980                                tp->misc_host_ctrl);
3981
3982         /* Set MAX PCI retry to zero. */
3983         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3984         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3985             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3986                 val |= PCISTATE_RETRY_SAME_DMA;
3987         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3988
3989         pci_restore_state(tp->pdev);
3990
3991         /* Make sure PCI-X relaxed ordering bit is clear. */
3992         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3993         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3994         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3995
3996         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3997
3998         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3999                 tg3_stop_fw(tp);
4000                 tw32(0x5000, 0x400);
4001         }
4002
4003         tw32(GRC_MODE, tp->grc_mode);
4004
4005         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4006                 u32 val = tr32(0xc4);
4007
4008                 tw32(0xc4, val | (1 << 15));
4009         }
4010
4011         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4012             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4013                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4014                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4015                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4016                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4017         }
4018
4019         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4020                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4021                 tw32_f(MAC_MODE, tp->mac_mode);
4022         } else
4023                 tw32_f(MAC_MODE, 0);
4024         udelay(40);
4025
4026         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4027                 /* Wait for firmware initialization to complete. */
4028                 for (i = 0; i < 100000; i++) {
4029                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4030                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4031                                 break;
4032                         udelay(10);
4033                 }
4034                 if (i >= 100000) {
4035                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4036                                "firmware will not restart magic=%08x\n",
4037                                tp->dev->name, val);
4038                         return -ENODEV;
4039                 }
4040         }
4041
4042         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4043             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4044                 u32 val = tr32(0x7c00);
4045
4046                 tw32(0x7c00, val | (1 << 25));
4047         }
4048
4049         /* Reprobe ASF enable state.  */
4050         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4051         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4052         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4053         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4054                 u32 nic_cfg;
4055
4056                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4057                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4058                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4059                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4060                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4061                 }
4062         }
4063
4064         return 0;
4065 }
4066
4067 /* tp->lock is held. */
4068 static void tg3_stop_fw(struct tg3 *tp)
4069 {
4070         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4071                 u32 val;
4072                 int i;
4073
4074                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4075                 val = tr32(GRC_RX_CPU_EVENT);
4076                 val |= (1 << 14);
4077                 tw32(GRC_RX_CPU_EVENT, val);
4078
4079                 /* Wait for RX cpu to ACK the event.  */
4080                 for (i = 0; i < 100; i++) {
4081                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4082                                 break;
4083                         udelay(1);
4084                 }
4085         }
4086 }
4087
4088 /* tp->lock is held. */
4089 static int tg3_halt(struct tg3 *tp)
4090 {
4091         int err;
4092
4093         tg3_stop_fw(tp);
4094
4095         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4096
4097         tg3_abort_hw(tp);
4098         err = tg3_chip_reset(tp);
4099
4100         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4101         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4102
4103         if (err)
4104                 return err;
4105
4106         return 0;
4107 }
4108
4109 #define TG3_FW_RELEASE_MAJOR    0x0
4110 #define TG3_FW_RELASE_MINOR     0x0
4111 #define TG3_FW_RELEASE_FIX      0x0
4112 #define TG3_FW_START_ADDR       0x08000000
4113 #define TG3_FW_TEXT_ADDR        0x08000000
4114 #define TG3_FW_TEXT_LEN         0x9c0
4115 #define TG3_FW_RODATA_ADDR      0x080009c0
4116 #define TG3_FW_RODATA_LEN       0x60
4117 #define TG3_FW_DATA_ADDR        0x08000a40
4118 #define TG3_FW_DATA_LEN         0x20
4119 #define TG3_FW_SBSS_ADDR        0x08000a60
4120 #define TG3_FW_SBSS_LEN         0xc
4121 #define TG3_FW_BSS_ADDR         0x08000a70
4122 #define TG3_FW_BSS_LEN          0x10
4123
4124 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4125         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4126         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4127         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4128         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4129         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4130         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4131         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4132         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4133         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4134         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4135         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4136         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4137         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4138         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4139         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4140         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4141         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4142         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4143         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4144         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4145         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4146         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4147         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4148         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4149         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4150         0, 0, 0, 0, 0, 0,
4151         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4152         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4153         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4154         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4155         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4156         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4157         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4158         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4159         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4160         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4161         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4162         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4163         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4164         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4165         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4166         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4167         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4168         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4169         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4170         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4171         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4172         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4173         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4174         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4175         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4176         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4177         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4178         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4179         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4180         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4181         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4182         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4183         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4184         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4185         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4186         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4187         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4188         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4189         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4190         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4191         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4192         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4193         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4194         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4195         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4196         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4197         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4198         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4199         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4200         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4201         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4202         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4203         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4204         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4205         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4206         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4207         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4208         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4209         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4210         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4211         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4212         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4213         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4214         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4215         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4216 };
4217
4218 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4219         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4220         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4221         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4222         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4223         0x00000000
4224 };
4225
4226 #if 0 /* All zeros, don't eat up space with it. */
4227 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4228         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4229         0x00000000, 0x00000000, 0x00000000, 0x00000000
4230 };
4231 #endif
4232
4233 #define RX_CPU_SCRATCH_BASE     0x30000
4234 #define RX_CPU_SCRATCH_SIZE     0x04000
4235 #define TX_CPU_SCRATCH_BASE     0x34000
4236 #define TX_CPU_SCRATCH_SIZE     0x04000
4237
4238 /* tp->lock is held. */
4239 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4240 {
4241         int i;
4242
4243         if (offset == TX_CPU_BASE &&
4244             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4245                 BUG();
4246
4247         if (offset == RX_CPU_BASE) {
4248                 for (i = 0; i < 10000; i++) {
4249                         tw32(offset + CPU_STATE, 0xffffffff);
4250                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4251                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4252                                 break;
4253                 }
4254
4255                 tw32(offset + CPU_STATE, 0xffffffff);
4256                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4257                 udelay(10);
4258         } else {
4259                 for (i = 0; i < 10000; i++) {
4260                         tw32(offset + CPU_STATE, 0xffffffff);
4261                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4262                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4263                                 break;
4264                 }
4265         }
4266
4267         if (i >= 10000) {
4268                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4269                        "and %s CPU\n",
4270                        tp->dev->name,
4271                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4272                 return -ENODEV;
4273         }
4274         return 0;
4275 }
4276
4277 struct fw_info {
4278         unsigned int text_base;
4279         unsigned int text_len;
4280         u32 *text_data;
4281         unsigned int rodata_base;
4282         unsigned int rodata_len;
4283         u32 *rodata_data;
4284         unsigned int data_base;
4285         unsigned int data_len;
4286         u32 *data_data;
4287 };
4288
4289 /* tp->lock is held. */
4290 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4291                                  int cpu_scratch_size, struct fw_info *info)
4292 {
4293         int err, i;
4294         u32 orig_tg3_flags = tp->tg3_flags;
4295         void (*write_op)(struct tg3 *, u32, u32);
4296
4297         if (cpu_base == TX_CPU_BASE &&
4298             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4299                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4300                        "TX cpu firmware on %s which is 5705.\n",
4301                        tp->dev->name);
4302                 return -EINVAL;
4303         }
4304
4305         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4306                 write_op = tg3_write_mem;
4307         else
4308                 write_op = tg3_write_indirect_reg32;
4309
4310         /* Force use of PCI config space for indirect register
4311          * write calls.
4312          */
4313         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4314
4315         err = tg3_halt_cpu(tp, cpu_base);
4316         if (err)
4317                 goto out;
4318
4319         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4320                 write_op(tp, cpu_scratch_base + i, 0);
4321         tw32(cpu_base + CPU_STATE, 0xffffffff);
4322         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4323         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4324                 write_op(tp, (cpu_scratch_base +
4325                               (info->text_base & 0xffff) +
4326                               (i * sizeof(u32))),
4327                          (info->text_data ?
4328                           info->text_data[i] : 0));
4329         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4330                 write_op(tp, (cpu_scratch_base +
4331                               (info->rodata_base & 0xffff) +
4332                               (i * sizeof(u32))),
4333                          (info->rodata_data ?
4334                           info->rodata_data[i] : 0));
4335         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4336                 write_op(tp, (cpu_scratch_base +
4337                               (info->data_base & 0xffff) +
4338                               (i * sizeof(u32))),
4339                          (info->data_data ?
4340                           info->data_data[i] : 0));
4341
4342         err = 0;
4343
4344 out:
4345         tp->tg3_flags = orig_tg3_flags;
4346         return err;
4347 }
4348
4349 /* tp->lock is held. */
4350 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4351 {
4352         struct fw_info info;
4353         int err, i;
4354
4355         info.text_base = TG3_FW_TEXT_ADDR;
4356         info.text_len = TG3_FW_TEXT_LEN;
4357         info.text_data = &tg3FwText[0];
4358         info.rodata_base = TG3_FW_RODATA_ADDR;
4359         info.rodata_len = TG3_FW_RODATA_LEN;
4360         info.rodata_data = &tg3FwRodata[0];
4361         info.data_base = TG3_FW_DATA_ADDR;
4362         info.data_len = TG3_FW_DATA_LEN;
4363         info.data_data = NULL;
4364
4365         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4366                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4367                                     &info);
4368         if (err)
4369                 return err;
4370
4371         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4372                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4373                                     &info);
4374         if (err)
4375                 return err;
4376
4377         /* Now startup only the RX cpu. */
4378         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4379         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4380
4381         for (i = 0; i < 5; i++) {
4382                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4383                         break;
4384                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4385                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4386                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4387                 udelay(1000);
4388         }
4389         if (i >= 5) {
4390                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4391                        "to set RX CPU PC, is %08x should be %08x\n",
4392                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4393                        TG3_FW_TEXT_ADDR);
4394                 return -ENODEV;
4395         }
4396         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4397         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4398
4399         return 0;
4400 }
4401
4402 #if TG3_TSO_SUPPORT != 0
4403
4404 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4405 #define TG3_TSO_FW_RELASE_MINOR         0x6
4406 #define TG3_TSO_FW_RELEASE_FIX          0x0
4407 #define TG3_TSO_FW_START_ADDR           0x08000000
4408 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4409 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4410 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4411 #define TG3_TSO_FW_RODATA_LEN           0x60
4412 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4413 #define TG3_TSO_FW_DATA_LEN             0x30
4414 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4415 #define TG3_TSO_FW_SBSS_LEN             0x2c
4416 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4417 #define TG3_TSO_FW_BSS_LEN              0x894
4418
4419 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4420         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4421         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4422         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4423         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4424         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4425         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4426         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4427         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4428         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4429         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4430         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4431         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4432         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4433         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4434         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4435         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4436         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4437         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4438         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4439         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4440         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4441         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4442         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4443         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4444         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4445         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4446         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4447         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4448         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4449         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4450         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4451         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4452         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4453         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4454         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4455         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4456         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4457         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4458         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4459         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4460         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4461         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4462         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4463         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4464         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4465         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4466         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4467         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4468         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4469         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4470         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4471         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4472         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4473         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4474         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4475         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4476         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4477         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4478         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4479         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4480         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4481         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4482         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4483         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4484         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4485         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4486         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4487         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4488         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4489         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4490         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4491         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4492         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4493         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4494         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4495         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4496         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4497         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4498         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4499         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4500         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4501         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4502         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4503         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4504         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4505         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4506         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4507         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4508         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4509         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4510         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4511         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4512         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4513         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4514         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4515         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4516         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4517         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4518         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4519         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4520         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4521         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4522         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4523         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4524         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4525         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4526         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4527         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4528         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4529         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4530         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4531         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4532         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4533         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4534         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4535         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4536         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4537         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4538         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4539         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4540         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4541         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4542         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4543         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4544         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4545         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4546         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4547         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4548         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4549         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4550         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4551         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4552         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4553         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4554         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4555         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4556         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4557         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4558         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4559         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4560         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4561         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4562         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4563         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4564         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4565         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4566         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4567         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4568         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4569         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4570         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4571         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4572         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4573         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4574         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4575         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4576         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4577         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4578         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4579         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4580         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4581         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4582         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4583         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4584         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4585         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4586         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4587         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4588         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4589         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4590         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4591         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4592         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4593         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4594         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4595         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4596         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4597         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4598         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4599         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4600         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4601         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4602         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4603         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4604         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4605         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4606         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4607         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4608         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4609         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4610         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4611         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4612         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4613         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4614         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4615         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4616         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4617         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4618         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4619         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4620         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4621         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4622         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4623         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4624         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4625         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4626         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4627         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4628         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4629         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4630         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4631         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4632         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4633         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4634         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4635         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4636         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4637         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4638         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4639         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4640         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4641         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4642         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4643         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4644         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4645         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4646         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4647         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4648         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4649         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4650         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4651         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4652         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4653         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4654         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4655         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4656         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4657         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4658         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4659         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4660         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4661         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4662         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4663         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4664         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4665         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4666         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4667         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4668         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4669         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4670         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4671         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4672         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4673         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4674         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4675         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4676         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4677         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4678         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4679         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4680         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4681         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4682         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4683         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4684         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4685         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4686         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4687         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4688         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4689         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4690         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4691         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4692         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4693         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4694         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4695         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4696         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4697         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4698         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4699         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4700         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4701         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4702         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4703         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4704 };
4705
4706 static u32 tg3TsoFwRodata[] = {
4707         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4708         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4709         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4710         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4711         0x00000000,
4712 };
4713
4714 static u32 tg3TsoFwData[] = {
4715         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4716         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4717         0x00000000,
4718 };
4719
4720 /* 5705 needs a special version of the TSO firmware.  */
4721 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4722 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4723 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4724 #define TG3_TSO5_FW_START_ADDR          0x00010000
4725 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4726 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4727 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4728 #define TG3_TSO5_FW_RODATA_LEN          0x50
4729 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4730 #define TG3_TSO5_FW_DATA_LEN            0x20
4731 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4732 #define TG3_TSO5_FW_SBSS_LEN            0x28
4733 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4734 #define TG3_TSO5_FW_BSS_LEN             0x88
4735
4736 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4737         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4738         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4739         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4740         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4741         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4742         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4743         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4744         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4745         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4746         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4747         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4748         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4749         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4750         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4751         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4752         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4753         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4754         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4755         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4756         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4757         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4758         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4759         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4760         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4761         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4762         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4763         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4764         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4765         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4766         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4767         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4768         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4769         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4770         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4771         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4772         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4773         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4774         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4775         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4776         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4777         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4778         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4779         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4780         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4781         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4782         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4783         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4784         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4785         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4786         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4787         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4788         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4789         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4790         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4791         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4792         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4793         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4794         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4795         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4796         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4797         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4798         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4799         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4800         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4801         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4802         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4803         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4804         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4805         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4806         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4807         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4808         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4809         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4810         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4811         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4812         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4813         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4814         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4815         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4816         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4817         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4818         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4819         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4820         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4821         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4822         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4823         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4824         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4825         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4826         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4827         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4828         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4829         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4830         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4831         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4832         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4833         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4834         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4835         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4836         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4837         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4838         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4839         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4840         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4841         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4842         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4843         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4844         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4845         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4846         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4847         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4848         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4849         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4850         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4851         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4852         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4853         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4854         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4855         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4856         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4857         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4858         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4859         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4860         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4861         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4862         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4863         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4864         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4865         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4866         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4867         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4868         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4869         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4870         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4871         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4872         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4873         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4874         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4875         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4876         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4877         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4878         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4879         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4880         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4881         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4882         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4883         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4884         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4885         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4886         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4887         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4888         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4889         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4890         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4891         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4892         0x00000000, 0x00000000, 0x00000000,
4893 };
4894
4895 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4896         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4897         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4898         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4899         0x00000000, 0x00000000, 0x00000000,
4900 };
4901
4902 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4903         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4904         0x00000000, 0x00000000, 0x00000000,
4905 };
4906
4907 /* tp->lock is held. */
4908 static int tg3_load_tso_firmware(struct tg3 *tp)
4909 {
4910         struct fw_info info;
4911         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4912         int err, i;
4913
4914         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4915                 return 0;
4916
4917         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4918                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4919                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4920                 info.text_data = &tg3Tso5FwText[0];
4921                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4922                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4923                 info.rodata_data = &tg3Tso5FwRodata[0];
4924                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4925                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4926                 info.data_data = &tg3Tso5FwData[0];
4927                 cpu_base = RX_CPU_BASE;
4928                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4929                 cpu_scratch_size = (info.text_len +
4930                                     info.rodata_len +
4931                                     info.data_len +
4932                                     TG3_TSO5_FW_SBSS_LEN +
4933                                     TG3_TSO5_FW_BSS_LEN);
4934         } else {
4935                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4936                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4937                 info.text_data = &tg3TsoFwText[0];
4938                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4939                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4940                 info.rodata_data = &tg3TsoFwRodata[0];
4941                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4942                 info.data_len = TG3_TSO_FW_DATA_LEN;
4943                 info.data_data = &tg3TsoFwData[0];
4944                 cpu_base = TX_CPU_BASE;
4945                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4946                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4947         }
4948
4949         err = tg3_load_firmware_cpu(tp, cpu_base,
4950                                     cpu_scratch_base, cpu_scratch_size,
4951                                     &info);
4952         if (err)
4953                 return err;
4954
4955         /* Now startup the cpu. */
4956         tw32(cpu_base + CPU_STATE, 0xffffffff);
4957         tw32_f(cpu_base + CPU_PC,    info.text_base);
4958
4959         for (i = 0; i < 5; i++) {
4960                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4961                         break;
4962                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4963                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4964                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4965                 udelay(1000);
4966         }
4967         if (i >= 5) {
4968                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4969                        "to set CPU PC, is %08x should be %08x\n",
4970                        tp->dev->name, tr32(cpu_base + CPU_PC),
4971                        info.text_base);
4972                 return -ENODEV;
4973         }
4974         tw32(cpu_base + CPU_STATE, 0xffffffff);
4975         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4976         return 0;
4977 }
4978
4979 #endif /* TG3_TSO_SUPPORT != 0 */
4980
4981 /* tp->lock is held. */
4982 static void __tg3_set_mac_addr(struct tg3 *tp)
4983 {
4984         u32 addr_high, addr_low;
4985         int i;
4986
4987         addr_high = ((tp->dev->dev_addr[0] << 8) |
4988                      tp->dev->dev_addr[1]);
4989         addr_low = ((tp->dev->dev_addr[2] << 24) |
4990                     (tp->dev->dev_addr[3] << 16) |
4991                     (tp->dev->dev_addr[4] <<  8) |
4992                     (tp->dev->dev_addr[5] <<  0));
4993         for (i = 0; i < 4; i++) {
4994                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4995                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4996         }
4997
4998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5000                 for (i = 0; i < 12; i++) {
5001                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5002                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5003                 }
5004         }
5005
5006         addr_high = (tp->dev->dev_addr[0] +
5007                      tp->dev->dev_addr[1] +
5008                      tp->dev->dev_addr[2] +
5009                      tp->dev->dev_addr[3] +
5010                      tp->dev->dev_addr[4] +
5011                      tp->dev->dev_addr[5]) &
5012                 TX_BACKOFF_SEED_MASK;
5013         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5014 }
5015
5016 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5017 {
5018         struct tg3 *tp = netdev_priv(dev);
5019         struct sockaddr *addr = p;
5020
5021         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5022
5023         spin_lock_irq(&tp->lock);
5024         __tg3_set_mac_addr(tp);
5025         spin_unlock_irq(&tp->lock);
5026
5027         return 0;
5028 }
5029
5030 /* tp->lock is held. */
5031 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5032                            dma_addr_t mapping, u32 maxlen_flags,
5033                            u32 nic_addr)
5034 {
5035         tg3_write_mem(tp,
5036                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5037                       ((u64) mapping >> 32));
5038         tg3_write_mem(tp,
5039                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5040                       ((u64) mapping & 0xffffffff));
5041         tg3_write_mem(tp,
5042                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5043                        maxlen_flags);
5044
5045         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5046                 tg3_write_mem(tp,
5047                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5048                               nic_addr);
5049 }
5050
5051 static void __tg3_set_rx_mode(struct net_device *);
5052
5053 /* tp->lock is held. */
5054 static int tg3_reset_hw(struct tg3 *tp)
5055 {
5056         u32 val, rdmac_mode;
5057         int i, err, limit;
5058
5059         tg3_disable_ints(tp);
5060
5061         tg3_stop_fw(tp);
5062
5063         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5064
5065         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5066                 err = tg3_abort_hw(tp);
5067                 if (err)
5068                         return err;
5069         }
5070
5071         err = tg3_chip_reset(tp);
5072         if (err)
5073                 return err;
5074
5075         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5076
5077         /* This works around an issue with Athlon chipsets on
5078          * B3 tigon3 silicon.  This bit has no effect on any
5079          * other revision.  But do not set this on PCI Express
5080          * chips.
5081          */
5082         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5083                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5084         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5085
5086         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5087             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5088                 val = tr32(TG3PCI_PCISTATE);
5089                 val |= PCISTATE_RETRY_SAME_DMA;
5090                 tw32(TG3PCI_PCISTATE, val);
5091         }
5092
5093         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5094                 /* Enable some hw fixes.  */
5095                 val = tr32(TG3PCI_MSI_DATA);
5096                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5097                 tw32(TG3PCI_MSI_DATA, val);
5098         }
5099
5100         /* Descriptor ring init may make accesses to the
5101          * NIC SRAM area to setup the TX descriptors, so we
5102          * can only do this after the hardware has been
5103          * successfully reset.
5104          */
5105         tg3_init_rings(tp);
5106
5107         /* This value is determined during the probe time DMA
5108          * engine test, tg3_test_dma.
5109          */
5110         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5111
5112         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5113                           GRC_MODE_4X_NIC_SEND_RINGS |
5114                           GRC_MODE_NO_TX_PHDR_CSUM |
5115                           GRC_MODE_NO_RX_PHDR_CSUM);
5116         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5117         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5118                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5119         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5120                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5121
5122         tw32(GRC_MODE,
5123              tp->grc_mode |
5124              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5125
5126         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5127         val = tr32(GRC_MISC_CFG);
5128         val &= ~0xff;
5129         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5130         tw32(GRC_MISC_CFG, val);
5131
5132         /* Initialize MBUF/DESC pool. */
5133         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5134                 /* Do nothing.  */
5135         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5136                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5138                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5139                 else
5140                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5141                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5142                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5143         }
5144 #if TG3_TSO_SUPPORT != 0
5145         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5146                 int fw_len;
5147
5148                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5149                           TG3_TSO5_FW_RODATA_LEN +
5150                           TG3_TSO5_FW_DATA_LEN +
5151                           TG3_TSO5_FW_SBSS_LEN +
5152                           TG3_TSO5_FW_BSS_LEN);
5153                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5154                 tw32(BUFMGR_MB_POOL_ADDR,
5155                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5156                 tw32(BUFMGR_MB_POOL_SIZE,
5157                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5158         }
5159 #endif
5160
5161         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5162                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5163                      tp->bufmgr_config.mbuf_read_dma_low_water);
5164                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5165                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5166                 tw32(BUFMGR_MB_HIGH_WATER,
5167                      tp->bufmgr_config.mbuf_high_water);
5168         } else {
5169                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5170                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5171                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5172                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5173                 tw32(BUFMGR_MB_HIGH_WATER,
5174                      tp->bufmgr_config.mbuf_high_water_jumbo);
5175         }
5176         tw32(BUFMGR_DMA_LOW_WATER,
5177              tp->bufmgr_config.dma_low_water);
5178         tw32(BUFMGR_DMA_HIGH_WATER,
5179              tp->bufmgr_config.dma_high_water);
5180
5181         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5182         for (i = 0; i < 2000; i++) {
5183                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5184                         break;
5185                 udelay(10);
5186         }
5187         if (i >= 2000) {
5188                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5189                        tp->dev->name);
5190                 return -ENODEV;
5191         }
5192
5193         /* Setup replenish threshold. */
5194         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5195
5196         /* Initialize TG3_BDINFO's at:
5197          *  RCVDBDI_STD_BD:     standard eth size rx ring
5198          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5199          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5200          *
5201          * like so:
5202          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5203          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5204          *                              ring attribute flags
5205          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5206          *
5207          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5208          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5209          *
5210          * The size of each ring is fixed in the firmware, but the location is
5211          * configurable.
5212          */
5213         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5214              ((u64) tp->rx_std_mapping >> 32));
5215         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5216              ((u64) tp->rx_std_mapping & 0xffffffff));
5217         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5218              NIC_SRAM_RX_BUFFER_DESC);
5219
5220         /* Don't even try to program the JUMBO/MINI buffer descriptor
5221          * configs on 5705.
5222          */
5223         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5224                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5225                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5226         } else {
5227                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5228                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5229
5230                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5231                      BDINFO_FLAGS_DISABLED);
5232
5233                 /* Setup replenish threshold. */
5234                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5235
5236                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5237                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5238                              ((u64) tp->rx_jumbo_mapping >> 32));
5239                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5240                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5241                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5242                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5243                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5244                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5245                 } else {
5246                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5247                              BDINFO_FLAGS_DISABLED);
5248                 }
5249
5250         }
5251
5252         /* There is only one send ring on 5705/5750, no need to explicitly
5253          * disable the others.
5254          */
5255         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5256                 /* Clear out send RCB ring in SRAM. */
5257                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5258                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5259                                       BDINFO_FLAGS_DISABLED);
5260         }
5261
5262         tp->tx_prod = 0;
5263         tp->tx_cons = 0;
5264         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5265         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5266
5267         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5268                        tp->tx_desc_mapping,
5269                        (TG3_TX_RING_SIZE <<
5270                         BDINFO_FLAGS_MAXLEN_SHIFT),
5271                        NIC_SRAM_TX_BUFFER_DESC);
5272
5273         /* There is only one receive return ring on 5705/5750, no need
5274          * to explicitly disable the others.
5275          */
5276         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5277                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5278                      i += TG3_BDINFO_SIZE) {
5279                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5280                                       BDINFO_FLAGS_DISABLED);
5281                 }
5282         }
5283
5284         tp->rx_rcb_ptr = 0;
5285         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5286
5287         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5288                        tp->rx_rcb_mapping,
5289                        (TG3_RX_RCB_RING_SIZE(tp) <<
5290                         BDINFO_FLAGS_MAXLEN_SHIFT),
5291                        0);
5292
5293         tp->rx_std_ptr = tp->rx_pending;
5294         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5295                      tp->rx_std_ptr);
5296
5297         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5298                                                 tp->rx_jumbo_pending : 0;
5299         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5300                      tp->rx_jumbo_ptr);
5301
5302         /* Initialize MAC address and backoff seed. */
5303         __tg3_set_mac_addr(tp);
5304
5305         /* MTU + ethernet header + FCS + optional VLAN tag */
5306         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5307
5308         /* The slot time is changed by tg3_setup_phy if we
5309          * run at gigabit with half duplex.
5310          */
5311         tw32(MAC_TX_LENGTHS,
5312              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5313              (6 << TX_LENGTHS_IPG_SHIFT) |
5314              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5315
5316         /* Receive rules. */
5317         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5318         tw32(RCVLPC_CONFIG, 0x0181);
5319
5320         /* Calculate RDMAC_MODE setting early, we need it to determine
5321          * the RCVLPC_STATE_ENABLE mask.
5322          */
5323         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5324                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5325                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5326                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5327                       RDMAC_MODE_LNGREAD_ENAB);
5328         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5329                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5330
5331         /* If statement applies to 5705 and 5750 PCI devices only */
5332         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5333              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5334             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5335                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5336                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5337                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5338                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5339                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5340                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5341                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5342                 }
5343         }
5344
5345         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5346                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5347
5348 #if TG3_TSO_SUPPORT != 0
5349         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5350                 rdmac_mode |= (1 << 27);
5351 #endif
5352
5353         /* Receive/send statistics. */
5354         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5355             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5356                 val = tr32(RCVLPC_STATS_ENABLE);
5357                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5358                 tw32(RCVLPC_STATS_ENABLE, val);
5359         } else {
5360                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5361         }
5362         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5363         tw32(SNDDATAI_STATSENAB, 0xffffff);
5364         tw32(SNDDATAI_STATSCTRL,
5365              (SNDDATAI_SCTRL_ENABLE |
5366               SNDDATAI_SCTRL_FASTUPD));
5367
5368         /* Setup host coalescing engine. */
5369         tw32(HOSTCC_MODE, 0);
5370         for (i = 0; i < 2000; i++) {
5371                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5372                         break;
5373                 udelay(10);
5374         }
5375
5376         tw32(HOSTCC_RXCOL_TICKS, 0);
5377         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5378         tw32(HOSTCC_RXMAX_FRAMES, 1);
5379         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5380         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5381                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5382                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5383         }
5384         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5385         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5386
5387         /* set status block DMA address */
5388         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5389              ((u64) tp->status_mapping >> 32));
5390         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5391              ((u64) tp->status_mapping & 0xffffffff));
5392
5393         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5394                 /* Status/statistics block address.  See tg3_timer,
5395                  * the tg3_periodic_fetch_stats call there, and
5396                  * tg3_get_stats to see how this works for 5705/5750 chips.
5397                  */
5398                 tw32(HOSTCC_STAT_COAL_TICKS,
5399                      DEFAULT_STAT_COAL_TICKS);
5400                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5401                      ((u64) tp->stats_mapping >> 32));
5402                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5403                      ((u64) tp->stats_mapping & 0xffffffff));
5404                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5405                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5406         }
5407
5408         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5409
5410         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5411         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5412         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5413                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5414
5415         /* Clear statistics/status block in chip, and status block in ram. */
5416         for (i = NIC_SRAM_STATS_BLK;
5417              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5418              i += sizeof(u32)) {
5419                 tg3_write_mem(tp, i, 0);
5420                 udelay(40);
5421         }
5422         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5423
5424         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5425                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5426         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5427         udelay(40);
5428
5429         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5430          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5431          * register to preserve the GPIO settings for LOMs. The GPIOs,
5432          * whether used as inputs or outputs, are set by boot code after
5433          * reset.
5434          */
5435         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5436                 u32 gpio_mask;
5437
5438                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5439                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5440
5441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5442                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5443                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5444
5445                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5446
5447                 /* GPIO1 must be driven high for eeprom write protect */
5448                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5449                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5450         }
5451         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5452         udelay(100);
5453
5454         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5455         tr32(MAILBOX_INTERRUPT_0);
5456
5457         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5458                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5459                 udelay(40);
5460         }
5461
5462         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5463                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5464                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5465                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5466                WDMAC_MODE_LNGREAD_ENAB);
5467
5468         /* If statement applies to 5705 and 5750 PCI devices only */
5469         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5470              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5472                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5473                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5474                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5475                         /* nothing */
5476                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5477                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5478                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5479                         val |= WDMAC_MODE_RX_ACCEL;
5480                 }
5481         }
5482
5483         tw32_f(WDMAC_MODE, val);
5484         udelay(40);
5485
5486         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5487                 val = tr32(TG3PCI_X_CAPS);
5488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5489                         val &= ~PCIX_CAPS_BURST_MASK;
5490                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5491                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5492                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5493                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5494                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5495                                 val |= (tp->split_mode_max_reqs <<
5496                                         PCIX_CAPS_SPLIT_SHIFT);
5497                 }
5498                 tw32(TG3PCI_X_CAPS, val);
5499         }
5500
5501         tw32_f(RDMAC_MODE, rdmac_mode);
5502         udelay(40);
5503
5504         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5505         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5506                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5507         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5508         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5509         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5510         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5511         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5512 #if TG3_TSO_SUPPORT != 0
5513         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5514                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5515 #endif
5516         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5517         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5518
5519         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5520                 err = tg3_load_5701_a0_firmware_fix(tp);
5521                 if (err)
5522                         return err;
5523         }
5524
5525 #if TG3_TSO_SUPPORT != 0
5526         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5527                 err = tg3_load_tso_firmware(tp);
5528                 if (err)
5529                         return err;
5530         }
5531 #endif
5532
5533         tp->tx_mode = TX_MODE_ENABLE;
5534         tw32_f(MAC_TX_MODE, tp->tx_mode);
5535         udelay(100);
5536
5537         tp->rx_mode = RX_MODE_ENABLE;
5538         tw32_f(MAC_RX_MODE, tp->rx_mode);
5539         udelay(10);
5540
5541         if (tp->link_config.phy_is_low_power) {
5542                 tp->link_config.phy_is_low_power = 0;
5543                 tp->link_config.speed = tp->link_config.orig_speed;
5544                 tp->link_config.duplex = tp->link_config.orig_duplex;
5545                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5546         }
5547
5548         tp->mi_mode = MAC_MI_MODE_BASE;
5549         tw32_f(MAC_MI_MODE, tp->mi_mode);
5550         udelay(80);
5551
5552         tw32(MAC_LED_CTRL, tp->led_ctrl);
5553
5554         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5555         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5556                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5557                 udelay(10);
5558         }
5559         tw32_f(MAC_RX_MODE, tp->rx_mode);
5560         udelay(10);
5561
5562         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5563                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5564                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5565                         /* Set drive transmission level to 1.2V  */
5566                         /* only if the signal pre-emphasis bit is not set  */
5567                         val = tr32(MAC_SERDES_CFG);
5568                         val &= 0xfffff000;
5569                         val |= 0x880;
5570                         tw32(MAC_SERDES_CFG, val);
5571                 }
5572                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5573                         tw32(MAC_SERDES_CFG, 0x616000);
5574         }
5575
5576         /* Prevent chip from dropping frames when flow control
5577          * is enabled.
5578          */
5579         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5580
5581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5582             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5583                 /* Use hardware link auto-negotiation */
5584                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5585         }
5586
5587         err = tg3_setup_phy(tp, 1);
5588         if (err)
5589                 return err;
5590
5591         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5592                 u32 tmp;
5593
5594                 /* Clear CRC stats. */
5595                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5596                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5597                         tg3_readphy(tp, 0x14, &tmp);
5598                 }
5599         }
5600
5601         __tg3_set_rx_mode(tp->dev);
5602
5603         /* Initialize receive rules. */
5604         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5605         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5606         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5607         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5608
5609         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5610                 limit = 8;
5611         else
5612                 limit = 16;
5613         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5614                 limit -= 4;
5615         switch (limit) {
5616         case 16:
5617                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5618         case 15:
5619                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5620         case 14:
5621                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5622         case 13:
5623                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5624         case 12:
5625                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5626         case 11:
5627                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5628         case 10:
5629                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5630         case 9:
5631                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5632         case 8:
5633                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5634         case 7:
5635                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5636         case 6:
5637                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5638         case 5:
5639                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5640         case 4:
5641                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5642         case 3:
5643                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5644         case 2:
5645         case 1:
5646
5647         default:
5648                 break;
5649         };
5650
5651         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5652
5653         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5654                 tg3_enable_ints(tp);
5655
5656         return 0;
5657 }
5658
5659 /* Called at device open time to get the chip ready for
5660  * packet processing.  Invoked with tp->lock held.
5661  */
5662 static int tg3_init_hw(struct tg3 *tp)
5663 {
5664         int err;
5665
5666         /* Force the chip into D0. */
5667         err = tg3_set_power_state(tp, 0);
5668         if (err)
5669                 goto out;
5670
5671         tg3_switch_clocks(tp);
5672
5673         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5674
5675         err = tg3_reset_hw(tp);
5676
5677 out:
5678         return err;
5679 }
5680
5681 #define TG3_STAT_ADD32(PSTAT, REG) \
5682 do {    u32 __val = tr32(REG); \
5683         (PSTAT)->low += __val; \
5684         if ((PSTAT)->low < __val) \
5685                 (PSTAT)->high += 1; \
5686 } while (0)
5687
5688 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5689 {
5690         struct tg3_hw_stats *sp = tp->hw_stats;
5691
5692         if (!netif_carrier_ok(tp->dev))
5693                 return;
5694
5695         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5696         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5697         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5698         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5699         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5700         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5701         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5702         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5703         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5704         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5705         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5706         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5707         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5708
5709         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5710         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5711         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5712         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5713         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5714         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5715         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5716         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5717         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5718         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5719         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5720         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5721         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5722         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5723 }
5724
5725 static void tg3_timer(unsigned long __opaque)
5726 {
5727         struct tg3 *tp = (struct tg3 *) __opaque;
5728         unsigned long flags;
5729
5730         spin_lock_irqsave(&tp->lock, flags);
5731         spin_lock(&tp->tx_lock);
5732
5733         /* All of this garbage is because when using non-tagged
5734          * IRQ status the mailbox/status_block protocol the chip
5735          * uses with the cpu is race prone.
5736          */
5737         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5738                 tw32(GRC_LOCAL_CTRL,
5739                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5740         } else {
5741                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5742                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5743         }
5744
5745         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5746                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5747                 spin_unlock(&tp->tx_lock);
5748                 spin_unlock_irqrestore(&tp->lock, flags);
5749                 schedule_work(&tp->reset_task);
5750                 return;
5751         }
5752
5753         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5754                 tg3_periodic_fetch_stats(tp);
5755
5756         /* This part only runs once per second. */
5757         if (!--tp->timer_counter) {
5758                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5759                         u32 mac_stat;
5760                         int phy_event;
5761
5762                         mac_stat = tr32(MAC_STATUS);
5763
5764                         phy_event = 0;
5765                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5766                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5767                                         phy_event = 1;
5768                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5769                                 phy_event = 1;
5770
5771                         if (phy_event)
5772                                 tg3_setup_phy(tp, 0);
5773                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5774                         u32 mac_stat = tr32(MAC_STATUS);
5775                         int need_setup = 0;
5776
5777                         if (netif_carrier_ok(tp->dev) &&
5778                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5779                                 need_setup = 1;
5780                         }
5781                         if (! netif_carrier_ok(tp->dev) &&
5782                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5783                                          MAC_STATUS_SIGNAL_DET))) {
5784                                 need_setup = 1;
5785                         }
5786                         if (need_setup) {
5787                                 tw32_f(MAC_MODE,
5788                                      (tp->mac_mode &
5789                                       ~MAC_MODE_PORT_MODE_MASK));
5790                                 udelay(40);
5791                                 tw32_f(MAC_MODE, tp->mac_mode);
5792                                 udelay(40);
5793                                 tg3_setup_phy(tp, 0);
5794                         }
5795                 }
5796
5797                 tp->timer_counter = tp->timer_multiplier;
5798         }
5799
5800         /* Heartbeat is only sent once every 120 seconds.  */
5801         if (!--tp->asf_counter) {
5802                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5803                         u32 val;
5804
5805                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5806                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5807                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5808                         val = tr32(GRC_RX_CPU_EVENT);
5809                         val |= (1 << 14);
5810                         tw32(GRC_RX_CPU_EVENT, val);
5811                 }
5812                 tp->asf_counter = tp->asf_multiplier;
5813         }
5814
5815         spin_unlock(&tp->tx_lock);
5816         spin_unlock_irqrestore(&tp->lock, flags);
5817
5818         tp->timer.expires = jiffies + tp->timer_offset;
5819         add_timer(&tp->timer);
5820 }
5821
5822 static int tg3_test_interrupt(struct tg3 *tp)
5823 {
5824         struct net_device *dev = tp->dev;
5825         int err, i;
5826         u32 int_mbox = 0;
5827
5828         tg3_disable_ints(tp);
5829
5830         free_irq(tp->pdev->irq, dev);
5831
5832         err = request_irq(tp->pdev->irq, tg3_test_isr,
5833                           SA_SHIRQ, dev->name, dev);
5834         if (err)
5835                 return err;
5836
5837         tg3_enable_ints(tp);
5838
5839         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5840                HOSTCC_MODE_NOW);
5841
5842         for (i = 0; i < 5; i++) {
5843                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5844                 if (int_mbox != 0)
5845                         break;
5846                 msleep(10);
5847         }
5848
5849         tg3_disable_ints(tp);
5850
5851         free_irq(tp->pdev->irq, dev);
5852         
5853         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5854                 err = request_irq(tp->pdev->irq, tg3_msi,
5855                                   0, dev->name, dev);
5856         else
5857                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5858                                   SA_SHIRQ, dev->name, dev);
5859
5860         if (err)
5861                 return err;
5862
5863         if (int_mbox != 0)
5864                 return 0;
5865
5866         return -EIO;
5867 }
5868
5869 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5870  * successfully restored
5871  */
5872 static int tg3_test_msi(struct tg3 *tp)
5873 {
5874         struct net_device *dev = tp->dev;
5875         int err;
5876         u16 pci_cmd;
5877
5878         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5879                 return 0;
5880
5881         /* Turn off SERR reporting in case MSI terminates with Master
5882          * Abort.
5883          */
5884         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5885         pci_write_config_word(tp->pdev, PCI_COMMAND,
5886                               pci_cmd & ~PCI_COMMAND_SERR);
5887
5888         err = tg3_test_interrupt(tp);
5889
5890         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5891
5892         if (!err)
5893                 return 0;
5894
5895         /* other failures */
5896         if (err != -EIO)
5897                 return err;
5898
5899         /* MSI test failed, go back to INTx mode */
5900         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5901                "switching to INTx mode. Please report this failure to "
5902                "the PCI maintainer and include system chipset information.\n",
5903                        tp->dev->name);
5904
5905         free_irq(tp->pdev->irq, dev);
5906         pci_disable_msi(tp->pdev);
5907
5908         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5909
5910         err = request_irq(tp->pdev->irq, tg3_interrupt,
5911                           SA_SHIRQ, dev->name, dev);
5912
5913         if (err)
5914                 return err;
5915
5916         /* Need to reset the chip because the MSI cycle may have terminated
5917          * with Master Abort.
5918          */
5919         spin_lock_irq(&tp->lock);
5920         spin_lock(&tp->tx_lock);
5921
5922         tg3_halt(tp);
5923         err = tg3_init_hw(tp);
5924
5925         spin_unlock(&tp->tx_lock);
5926         spin_unlock_irq(&tp->lock);
5927
5928         if (err)
5929                 free_irq(tp->pdev->irq, dev);
5930
5931         return err;
5932 }
5933
5934 static int tg3_open(struct net_device *dev)
5935 {
5936         struct tg3 *tp = netdev_priv(dev);
5937         int err;
5938
5939         spin_lock_irq(&tp->lock);
5940         spin_lock(&tp->tx_lock);
5941
5942         tg3_disable_ints(tp);
5943         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5944
5945         spin_unlock(&tp->tx_lock);
5946         spin_unlock_irq(&tp->lock);
5947
5948         /* The placement of this call is tied
5949          * to the setup and use of Host TX descriptors.
5950          */
5951         err = tg3_alloc_consistent(tp);
5952         if (err)
5953                 return err;
5954
5955         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5956             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5957             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5958                 if (pci_enable_msi(tp->pdev) == 0) {
5959                         u32 msi_mode;
5960
5961                         msi_mode = tr32(MSGINT_MODE);
5962                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
5963                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
5964                 }
5965         }
5966         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5967                 err = request_irq(tp->pdev->irq, tg3_msi,
5968                                   0, dev->name, dev);
5969         else
5970                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5971                                   SA_SHIRQ, dev->name, dev);
5972
5973         if (err) {
5974                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5975                         pci_disable_msi(tp->pdev);
5976                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5977                 }
5978                 tg3_free_consistent(tp);
5979                 return err;
5980         }
5981
5982         spin_lock_irq(&tp->lock);
5983         spin_lock(&tp->tx_lock);
5984
5985         err = tg3_init_hw(tp);
5986         if (err) {
5987                 tg3_halt(tp);
5988                 tg3_free_rings(tp);
5989         } else {
5990                 tp->timer_offset = HZ / 10;
5991                 tp->timer_counter = tp->timer_multiplier = 10;
5992                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5993
5994                 init_timer(&tp->timer);
5995                 tp->timer.expires = jiffies + tp->timer_offset;
5996                 tp->timer.data = (unsigned long) tp;
5997                 tp->timer.function = tg3_timer;
5998         }
5999
6000         spin_unlock(&tp->tx_lock);
6001         spin_unlock_irq(&tp->lock);
6002
6003         if (err) {
6004                 free_irq(tp->pdev->irq, dev);
6005                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6006                         pci_disable_msi(tp->pdev);
6007                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6008                 }
6009                 tg3_free_consistent(tp);
6010                 return err;
6011         }
6012
6013         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6014                 err = tg3_test_msi(tp);
6015                 if (err) {
6016                         spin_lock_irq(&tp->lock);
6017                         spin_lock(&tp->tx_lock);
6018
6019                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6020                                 pci_disable_msi(tp->pdev);
6021                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6022                         }
6023                         tg3_halt(tp);
6024                         tg3_free_rings(tp);
6025                         tg3_free_consistent(tp);
6026
6027                         spin_unlock(&tp->tx_lock);
6028                         spin_unlock_irq(&tp->lock);
6029
6030                         return err;
6031                 }
6032         }
6033
6034         spin_lock_irq(&tp->lock);
6035         spin_lock(&tp->tx_lock);
6036
6037         add_timer(&tp->timer);
6038         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6039         tg3_enable_ints(tp);
6040
6041         spin_unlock(&tp->tx_lock);
6042         spin_unlock_irq(&tp->lock);
6043
6044         netif_start_queue(dev);
6045
6046         return 0;
6047 }
6048
6049 #if 0
6050 /*static*/ void tg3_dump_state(struct tg3 *tp)
6051 {
6052         u32 val32, val32_2, val32_3, val32_4, val32_5;
6053         u16 val16;
6054         int i;
6055
6056         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6057         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6058         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6059                val16, val32);
6060
6061         /* MAC block */
6062         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6063                tr32(MAC_MODE), tr32(MAC_STATUS));
6064         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6065                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6066         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6067                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6068         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6069                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6070
6071         /* Send data initiator control block */
6072         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6073                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6074         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6075                tr32(SNDDATAI_STATSCTRL));
6076
6077         /* Send data completion control block */
6078         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6079
6080         /* Send BD ring selector block */
6081         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6082                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6083
6084         /* Send BD initiator control block */
6085         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6086                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6087
6088         /* Send BD completion control block */
6089         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6090
6091         /* Receive list placement control block */
6092         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6093                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6094         printk("       RCVLPC_STATSCTRL[%08x]\n",
6095                tr32(RCVLPC_STATSCTRL));
6096
6097         /* Receive data and receive BD initiator control block */
6098         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6099                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6100
6101         /* Receive data completion control block */
6102         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6103                tr32(RCVDCC_MODE));
6104
6105         /* Receive BD initiator control block */
6106         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6107                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6108
6109         /* Receive BD completion control block */
6110         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6111                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6112
6113         /* Receive list selector control block */
6114         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6115                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6116
6117         /* Mbuf cluster free block */
6118         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6119                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6120
6121         /* Host coalescing control block */
6122         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6123                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6124         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6125                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6126                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6127         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6128                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6129                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6130         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6131                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6132         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6133                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6134
6135         /* Memory arbiter control block */
6136         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6137                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6138
6139         /* Buffer manager control block */
6140         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6141                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6142         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6143                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6144         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6145                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6146                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6147                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6148
6149         /* Read DMA control block */
6150         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6151                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6152
6153         /* Write DMA control block */
6154         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6155                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6156
6157         /* DMA completion block */
6158         printk("DEBUG: DMAC_MODE[%08x]\n",
6159                tr32(DMAC_MODE));
6160
6161         /* GRC block */
6162         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6163                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6164         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6165                tr32(GRC_LOCAL_CTRL));
6166
6167         /* TG3_BDINFOs */
6168         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6169                tr32(RCVDBDI_JUMBO_BD + 0x0),
6170                tr32(RCVDBDI_JUMBO_BD + 0x4),
6171                tr32(RCVDBDI_JUMBO_BD + 0x8),
6172                tr32(RCVDBDI_JUMBO_BD + 0xc));
6173         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6174                tr32(RCVDBDI_STD_BD + 0x0),
6175                tr32(RCVDBDI_STD_BD + 0x4),
6176                tr32(RCVDBDI_STD_BD + 0x8),
6177                tr32(RCVDBDI_STD_BD + 0xc));
6178         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6179                tr32(RCVDBDI_MINI_BD + 0x0),
6180                tr32(RCVDBDI_MINI_BD + 0x4),
6181                tr32(RCVDBDI_MINI_BD + 0x8),
6182                tr32(RCVDBDI_MINI_BD + 0xc));
6183
6184         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6185         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6186         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6187         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6188         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6189                val32, val32_2, val32_3, val32_4);
6190
6191         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6192         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6193         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6194         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6195         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6196                val32, val32_2, val32_3, val32_4);
6197
6198         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6199         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6200         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6201         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6202         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6203         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6204                val32, val32_2, val32_3, val32_4, val32_5);
6205
6206         /* SW status block */
6207         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6208                tp->hw_status->status,
6209                tp->hw_status->status_tag,
6210                tp->hw_status->rx_jumbo_consumer,
6211                tp->hw_status->rx_consumer,
6212                tp->hw_status->rx_mini_consumer,
6213                tp->hw_status->idx[0].rx_producer,
6214                tp->hw_status->idx[0].tx_consumer);
6215
6216         /* SW statistics block */
6217         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6218                ((u32 *)tp->hw_stats)[0],
6219                ((u32 *)tp->hw_stats)[1],
6220                ((u32 *)tp->hw_stats)[2],
6221                ((u32 *)tp->hw_stats)[3]);
6222
6223         /* Mailboxes */
6224         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6225                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6226                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6227                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6228                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6229
6230         /* NIC side send descriptors. */
6231         for (i = 0; i < 6; i++) {
6232                 unsigned long txd;
6233
6234                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6235                         + (i * sizeof(struct tg3_tx_buffer_desc));
6236                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6237                        i,
6238                        readl(txd + 0x0), readl(txd + 0x4),
6239                        readl(txd + 0x8), readl(txd + 0xc));
6240         }
6241
6242         /* NIC side RX descriptors. */
6243         for (i = 0; i < 6; i++) {
6244                 unsigned long rxd;
6245
6246                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6247                         + (i * sizeof(struct tg3_rx_buffer_desc));
6248                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6249                        i,
6250                        readl(rxd + 0x0), readl(rxd + 0x4),
6251                        readl(rxd + 0x8), readl(rxd + 0xc));
6252                 rxd += (4 * sizeof(u32));
6253                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6254                        i,
6255                        readl(rxd + 0x0), readl(rxd + 0x4),
6256                        readl(rxd + 0x8), readl(rxd + 0xc));
6257         }
6258
6259         for (i = 0; i < 6; i++) {
6260                 unsigned long rxd;
6261
6262                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6263                         + (i * sizeof(struct tg3_rx_buffer_desc));
6264                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6265                        i,
6266                        readl(rxd + 0x0), readl(rxd + 0x4),
6267                        readl(rxd + 0x8), readl(rxd + 0xc));
6268                 rxd += (4 * sizeof(u32));
6269                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6270                        i,
6271                        readl(rxd + 0x0), readl(rxd + 0x4),
6272                        readl(rxd + 0x8), readl(rxd + 0xc));
6273         }
6274 }
6275 #endif
6276
6277 static struct net_device_stats *tg3_get_stats(struct net_device *);
6278 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6279
6280 static int tg3_close(struct net_device *dev)
6281 {
6282         struct tg3 *tp = netdev_priv(dev);
6283
6284         netif_stop_queue(dev);
6285
6286         del_timer_sync(&tp->timer);
6287
6288         spin_lock_irq(&tp->lock);
6289         spin_lock(&tp->tx_lock);
6290 #if 0
6291         tg3_dump_state(tp);
6292 #endif
6293
6294         tg3_disable_ints(tp);
6295
6296         tg3_halt(tp);
6297         tg3_free_rings(tp);
6298         tp->tg3_flags &=
6299                 ~(TG3_FLAG_INIT_COMPLETE |
6300                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6301         netif_carrier_off(tp->dev);
6302
6303         spin_unlock(&tp->tx_lock);
6304         spin_unlock_irq(&tp->lock);
6305
6306         free_irq(tp->pdev->irq, dev);
6307         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6308                 pci_disable_msi(tp->pdev);
6309                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6310         }
6311
6312         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6313                sizeof(tp->net_stats_prev));
6314         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6315                sizeof(tp->estats_prev));
6316
6317         tg3_free_consistent(tp);
6318
6319         return 0;
6320 }
6321
6322 static inline unsigned long get_stat64(tg3_stat64_t *val)
6323 {
6324         unsigned long ret;
6325
6326 #if (BITS_PER_LONG == 32)
6327         ret = val->low;
6328 #else
6329         ret = ((u64)val->high << 32) | ((u64)val->low);
6330 #endif
6331         return ret;
6332 }
6333
6334 static unsigned long calc_crc_errors(struct tg3 *tp)
6335 {
6336         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6337
6338         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6339             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6340              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6341                 unsigned long flags;
6342                 u32 val;
6343
6344                 spin_lock_irqsave(&tp->lock, flags);
6345                 if (!tg3_readphy(tp, 0x1e, &val)) {
6346                         tg3_writephy(tp, 0x1e, val | 0x8000);
6347                         tg3_readphy(tp, 0x14, &val);
6348                 } else
6349                         val = 0;
6350                 spin_unlock_irqrestore(&tp->lock, flags);
6351
6352                 tp->phy_crc_errors += val;
6353
6354                 return tp->phy_crc_errors;
6355         }
6356
6357         return get_stat64(&hw_stats->rx_fcs_errors);
6358 }
6359
6360 #define ESTAT_ADD(member) \
6361         estats->member =        old_estats->member + \
6362                                 get_stat64(&hw_stats->member)
6363
6364 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6365 {
6366         struct tg3_ethtool_stats *estats = &tp->estats;
6367         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6368         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6369
6370         if (!hw_stats)
6371                 return old_estats;
6372
6373         ESTAT_ADD(rx_octets);
6374         ESTAT_ADD(rx_fragments);
6375         ESTAT_ADD(rx_ucast_packets);
6376         ESTAT_ADD(rx_mcast_packets);
6377         ESTAT_ADD(rx_bcast_packets);
6378         ESTAT_ADD(rx_fcs_errors);
6379         ESTAT_ADD(rx_align_errors);
6380         ESTAT_ADD(rx_xon_pause_rcvd);
6381         ESTAT_ADD(rx_xoff_pause_rcvd);
6382         ESTAT_ADD(rx_mac_ctrl_rcvd);
6383         ESTAT_ADD(rx_xoff_entered);
6384         ESTAT_ADD(rx_frame_too_long_errors);
6385         ESTAT_ADD(rx_jabbers);
6386         ESTAT_ADD(rx_undersize_packets);
6387         ESTAT_ADD(rx_in_length_errors);
6388         ESTAT_ADD(rx_out_length_errors);
6389         ESTAT_ADD(rx_64_or_less_octet_packets);
6390         ESTAT_ADD(rx_65_to_127_octet_packets);
6391         ESTAT_ADD(rx_128_to_255_octet_packets);
6392         ESTAT_ADD(rx_256_to_511_octet_packets);
6393         ESTAT_ADD(rx_512_to_1023_octet_packets);
6394         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6395         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6396         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6397         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6398         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6399
6400         ESTAT_ADD(tx_octets);
6401         ESTAT_ADD(tx_collisions);
6402         ESTAT_ADD(tx_xon_sent);
6403         ESTAT_ADD(tx_xoff_sent);
6404         ESTAT_ADD(tx_flow_control);
6405         ESTAT_ADD(tx_mac_errors);
6406         ESTAT_ADD(tx_single_collisions);
6407         ESTAT_ADD(tx_mult_collisions);
6408         ESTAT_ADD(tx_deferred);
6409         ESTAT_ADD(tx_excessive_collisions);
6410         ESTAT_ADD(tx_late_collisions);
6411         ESTAT_ADD(tx_collide_2times);
6412         ESTAT_ADD(tx_collide_3times);
6413         ESTAT_ADD(tx_collide_4times);
6414         ESTAT_ADD(tx_collide_5times);
6415         ESTAT_ADD(tx_collide_6times);
6416         ESTAT_ADD(tx_collide_7times);
6417         ESTAT_ADD(tx_collide_8times);
6418         ESTAT_ADD(tx_collide_9times);
6419         ESTAT_ADD(tx_collide_10times);
6420         ESTAT_ADD(tx_collide_11times);
6421         ESTAT_ADD(tx_collide_12times);
6422         ESTAT_ADD(tx_collide_13times);
6423         ESTAT_ADD(tx_collide_14times);
6424         ESTAT_ADD(tx_collide_15times);
6425         ESTAT_ADD(tx_ucast_packets);
6426         ESTAT_ADD(tx_mcast_packets);
6427         ESTAT_ADD(tx_bcast_packets);
6428         ESTAT_ADD(tx_carrier_sense_errors);
6429         ESTAT_ADD(tx_discards);
6430         ESTAT_ADD(tx_errors);
6431
6432         ESTAT_ADD(dma_writeq_full);
6433         ESTAT_ADD(dma_write_prioq_full);
6434         ESTAT_ADD(rxbds_empty);
6435         ESTAT_ADD(rx_discards);
6436         ESTAT_ADD(rx_errors);
6437         ESTAT_ADD(rx_threshold_hit);
6438
6439         ESTAT_ADD(dma_readq_full);
6440         ESTAT_ADD(dma_read_prioq_full);
6441         ESTAT_ADD(tx_comp_queue_full);
6442
6443         ESTAT_ADD(ring_set_send_prod_index);
6444         ESTAT_ADD(ring_status_update);
6445         ESTAT_ADD(nic_irqs);
6446         ESTAT_ADD(nic_avoided_irqs);
6447         ESTAT_ADD(nic_tx_threshold_hit);
6448
6449         return estats;
6450 }
6451
6452 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6453 {
6454         struct tg3 *tp = netdev_priv(dev);
6455         struct net_device_stats *stats = &tp->net_stats;
6456         struct net_device_stats *old_stats = &tp->net_stats_prev;
6457         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6458
6459         if (!hw_stats)
6460                 return old_stats;
6461
6462         stats->rx_packets = old_stats->rx_packets +
6463                 get_stat64(&hw_stats->rx_ucast_packets) +
6464                 get_stat64(&hw_stats->rx_mcast_packets) +
6465                 get_stat64(&hw_stats->rx_bcast_packets);
6466                 
6467         stats->tx_packets = old_stats->tx_packets +
6468                 get_stat64(&hw_stats->tx_ucast_packets) +
6469                 get_stat64(&hw_stats->tx_mcast_packets) +
6470                 get_stat64(&hw_stats->tx_bcast_packets);
6471
6472         stats->rx_bytes = old_stats->rx_bytes +
6473                 get_stat64(&hw_stats->rx_octets);
6474         stats->tx_bytes = old_stats->tx_bytes +
6475                 get_stat64(&hw_stats->tx_octets);
6476
6477         stats->rx_errors = old_stats->rx_errors +
6478                 get_stat64(&hw_stats->rx_errors) +
6479                 get_stat64(&hw_stats->rx_discards);
6480         stats->tx_errors = old_stats->tx_errors +
6481                 get_stat64(&hw_stats->tx_errors) +
6482                 get_stat64(&hw_stats->tx_mac_errors) +
6483                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6484                 get_stat64(&hw_stats->tx_discards);
6485
6486         stats->multicast = old_stats->multicast +
6487                 get_stat64(&hw_stats->rx_mcast_packets);
6488         stats->collisions = old_stats->collisions +
6489                 get_stat64(&hw_stats->tx_collisions);
6490
6491         stats->rx_length_errors = old_stats->rx_length_errors +
6492                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6493                 get_stat64(&hw_stats->rx_undersize_packets);
6494
6495         stats->rx_over_errors = old_stats->rx_over_errors +
6496                 get_stat64(&hw_stats->rxbds_empty);
6497         stats->rx_frame_errors = old_stats->rx_frame_errors +
6498                 get_stat64(&hw_stats->rx_align_errors);
6499         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6500                 get_stat64(&hw_stats->tx_discards);
6501         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6502                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6503
6504         stats->rx_crc_errors = old_stats->rx_crc_errors +
6505                 calc_crc_errors(tp);
6506
6507         return stats;
6508 }
6509
6510 static inline u32 calc_crc(unsigned char *buf, int len)
6511 {
6512         u32 reg;
6513         u32 tmp;
6514         int j, k;
6515
6516         reg = 0xffffffff;
6517
6518         for (j = 0; j < len; j++) {
6519                 reg ^= buf[j];
6520
6521                 for (k = 0; k < 8; k++) {
6522                         tmp = reg & 0x01;
6523
6524                         reg >>= 1;
6525
6526                         if (tmp) {
6527                                 reg ^= 0xedb88320;
6528                         }
6529                 }
6530         }
6531
6532         return ~reg;
6533 }
6534
6535 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6536 {
6537         /* accept or reject all multicast frames */
6538         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6539         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6540         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6541         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6542 }
6543
6544 static void __tg3_set_rx_mode(struct net_device *dev)
6545 {
6546         struct tg3 *tp = netdev_priv(dev);
6547         u32 rx_mode;
6548
6549         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6550                                   RX_MODE_KEEP_VLAN_TAG);
6551
6552         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6553          * flag clear.
6554          */
6555 #if TG3_VLAN_TAG_USED
6556         if (!tp->vlgrp &&
6557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6558                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6559 #else
6560         /* By definition, VLAN is disabled always in this
6561          * case.
6562          */
6563         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6564                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6565 #endif
6566
6567         if (dev->flags & IFF_PROMISC) {
6568                 /* Promiscuous mode. */
6569                 rx_mode |= RX_MODE_PROMISC;
6570         } else if (dev->flags & IFF_ALLMULTI) {
6571                 /* Accept all multicast. */
6572                 tg3_set_multi (tp, 1);
6573         } else if (dev->mc_count < 1) {
6574                 /* Reject all multicast. */
6575                 tg3_set_multi (tp, 0);
6576         } else {
6577                 /* Accept one or more multicast(s). */
6578                 struct dev_mc_list *mclist;
6579                 unsigned int i;
6580                 u32 mc_filter[4] = { 0, };
6581                 u32 regidx;
6582                 u32 bit;
6583                 u32 crc;
6584
6585                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6586                      i++, mclist = mclist->next) {
6587
6588                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6589                         bit = ~crc & 0x7f;
6590                         regidx = (bit & 0x60) >> 5;
6591                         bit &= 0x1f;
6592                         mc_filter[regidx] |= (1 << bit);
6593                 }
6594
6595                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6596                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6597                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6598                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6599         }
6600
6601         if (rx_mode != tp->rx_mode) {
6602                 tp->rx_mode = rx_mode;
6603                 tw32_f(MAC_RX_MODE, rx_mode);
6604                 udelay(10);
6605         }
6606 }
6607
6608 static void tg3_set_rx_mode(struct net_device *dev)
6609 {
6610         struct tg3 *tp = netdev_priv(dev);
6611
6612         spin_lock_irq(&tp->lock);
6613         spin_lock(&tp->tx_lock);
6614         __tg3_set_rx_mode(dev);
6615         spin_unlock(&tp->tx_lock);
6616         spin_unlock_irq(&tp->lock);
6617 }
6618
6619 #define TG3_REGDUMP_LEN         (32 * 1024)
6620
6621 static int tg3_get_regs_len(struct net_device *dev)
6622 {
6623         return TG3_REGDUMP_LEN;
6624 }
6625
6626 static void tg3_get_regs(struct net_device *dev,
6627                 struct ethtool_regs *regs, void *_p)
6628 {
6629         u32 *p = _p;
6630         struct tg3 *tp = netdev_priv(dev);
6631         u8 *orig_p = _p;
6632         int i;
6633
6634         regs->version = 0;
6635
6636         memset(p, 0, TG3_REGDUMP_LEN);
6637
6638         spin_lock_irq(&tp->lock);
6639         spin_lock(&tp->tx_lock);
6640
6641 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6642 #define GET_REG32_LOOP(base,len)                \
6643 do {    p = (u32 *)(orig_p + (base));           \
6644         for (i = 0; i < len; i += 4)            \
6645                 __GET_REG32((base) + i);        \
6646 } while (0)
6647 #define GET_REG32_1(reg)                        \
6648 do {    p = (u32 *)(orig_p + (reg));            \
6649         __GET_REG32((reg));                     \
6650 } while (0)
6651
6652         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6653         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6654         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6655         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6656         GET_REG32_1(SNDDATAC_MODE);
6657         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6658         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6659         GET_REG32_1(SNDBDC_MODE);
6660         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6661         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6662         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6663         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6664         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6665         GET_REG32_1(RCVDCC_MODE);
6666         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6667         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6668         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6669         GET_REG32_1(MBFREE_MODE);
6670         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6671         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6672         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6673         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6674         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6675         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6676         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6677         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6678         GET_REG32_LOOP(FTQ_RESET, 0x120);
6679         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6680         GET_REG32_1(DMAC_MODE);
6681         GET_REG32_LOOP(GRC_MODE, 0x4c);
6682         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6683                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6684
6685 #undef __GET_REG32
6686 #undef GET_REG32_LOOP
6687 #undef GET_REG32_1
6688
6689         spin_unlock(&tp->tx_lock);
6690         spin_unlock_irq(&tp->lock);
6691 }
6692
6693 static int tg3_get_eeprom_len(struct net_device *dev)
6694 {
6695         struct tg3 *tp = netdev_priv(dev);
6696
6697         return tp->nvram_size;
6698 }
6699
6700 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6701
6702 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6703 {
6704         struct tg3 *tp = netdev_priv(dev);
6705         int ret;
6706         u8  *pd;
6707         u32 i, offset, len, val, b_offset, b_count;
6708
6709         offset = eeprom->offset;
6710         len = eeprom->len;
6711         eeprom->len = 0;
6712
6713         eeprom->magic = TG3_EEPROM_MAGIC;
6714
6715         if (offset & 3) {
6716                 /* adjustments to start on required 4 byte boundary */
6717                 b_offset = offset & 3;
6718                 b_count = 4 - b_offset;
6719                 if (b_count > len) {
6720                         /* i.e. offset=1 len=2 */
6721                         b_count = len;
6722                 }
6723                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6724                 if (ret)
6725                         return ret;
6726                 val = cpu_to_le32(val);
6727                 memcpy(data, ((char*)&val) + b_offset, b_count);
6728                 len -= b_count;
6729                 offset += b_count;
6730                 eeprom->len += b_count;
6731         }
6732
6733         /* read bytes upto the last 4 byte boundary */
6734         pd = &data[eeprom->len];
6735         for (i = 0; i < (len - (len & 3)); i += 4) {
6736                 ret = tg3_nvram_read(tp, offset + i, &val);
6737                 if (ret) {
6738                         eeprom->len += i;
6739                         return ret;
6740                 }
6741                 val = cpu_to_le32(val);
6742                 memcpy(pd + i, &val, 4);
6743         }
6744         eeprom->len += i;
6745
6746         if (len & 3) {
6747                 /* read last bytes not ending on 4 byte boundary */
6748                 pd = &data[eeprom->len];
6749                 b_count = len & 3;
6750                 b_offset = offset + len - b_count;
6751                 ret = tg3_nvram_read(tp, b_offset, &val);
6752                 if (ret)
6753                         return ret;
6754                 val = cpu_to_le32(val);
6755                 memcpy(pd, ((char*)&val), b_count);
6756                 eeprom->len += b_count;
6757         }
6758         return 0;
6759 }
6760
6761 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6762
6763 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6764 {
6765         struct tg3 *tp = netdev_priv(dev);
6766         int ret;
6767         u32 offset, len, b_offset, odd_len, start, end;
6768         u8 *buf;
6769
6770         if (eeprom->magic != TG3_EEPROM_MAGIC)
6771                 return -EINVAL;
6772
6773         offset = eeprom->offset;
6774         len = eeprom->len;
6775
6776         if ((b_offset = (offset & 3))) {
6777                 /* adjustments to start on required 4 byte boundary */
6778                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6779                 if (ret)
6780                         return ret;
6781                 start = cpu_to_le32(start);
6782                 len += b_offset;
6783                 offset &= ~3;
6784                 if (len < 4)
6785                         len = 4;
6786         }
6787
6788         odd_len = 0;
6789         if (len & 3) {
6790                 /* adjustments to end on required 4 byte boundary */
6791                 odd_len = 1;
6792                 len = (len + 3) & ~3;
6793                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6794                 if (ret)
6795                         return ret;
6796                 end = cpu_to_le32(end);
6797         }
6798
6799         buf = data;
6800         if (b_offset || odd_len) {
6801                 buf = kmalloc(len, GFP_KERNEL);
6802                 if (buf == 0)
6803                         return -ENOMEM;
6804                 if (b_offset)
6805                         memcpy(buf, &start, 4);
6806                 if (odd_len)
6807                         memcpy(buf+len-4, &end, 4);
6808                 memcpy(buf + b_offset, data, eeprom->len);
6809         }
6810
6811         ret = tg3_nvram_write_block(tp, offset, len, buf);
6812
6813         if (buf != data)
6814                 kfree(buf);
6815
6816         return ret;
6817 }
6818
6819 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6820 {
6821         struct tg3 *tp = netdev_priv(dev);
6822   
6823         cmd->supported = (SUPPORTED_Autoneg);
6824
6825         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6826                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6827                                    SUPPORTED_1000baseT_Full);
6828
6829         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6830                 cmd->supported |= (SUPPORTED_100baseT_Half |
6831                                   SUPPORTED_100baseT_Full |
6832                                   SUPPORTED_10baseT_Half |
6833                                   SUPPORTED_10baseT_Full |
6834                                   SUPPORTED_MII);
6835         else
6836                 cmd->supported |= SUPPORTED_FIBRE;
6837   
6838         cmd->advertising = tp->link_config.advertising;
6839         if (netif_running(dev)) {
6840                 cmd->speed = tp->link_config.active_speed;
6841                 cmd->duplex = tp->link_config.active_duplex;
6842         }
6843         cmd->port = 0;
6844         cmd->phy_address = PHY_ADDR;
6845         cmd->transceiver = 0;
6846         cmd->autoneg = tp->link_config.autoneg;
6847         cmd->maxtxpkt = 0;
6848         cmd->maxrxpkt = 0;
6849         return 0;
6850 }
6851   
6852 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6853 {
6854         struct tg3 *tp = netdev_priv(dev);
6855   
6856         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6857                 /* These are the only valid advertisement bits allowed.  */
6858                 if (cmd->autoneg == AUTONEG_ENABLE &&
6859                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6860                                           ADVERTISED_1000baseT_Full |
6861                                           ADVERTISED_Autoneg |
6862                                           ADVERTISED_FIBRE)))
6863                         return -EINVAL;
6864         }
6865
6866         spin_lock_irq(&tp->lock);
6867         spin_lock(&tp->tx_lock);
6868
6869         tp->link_config.autoneg = cmd->autoneg;
6870         if (cmd->autoneg == AUTONEG_ENABLE) {
6871                 tp->link_config.advertising = cmd->advertising;
6872                 tp->link_config.speed = SPEED_INVALID;
6873                 tp->link_config.duplex = DUPLEX_INVALID;
6874         } else {
6875                 tp->link_config.advertising = 0;
6876                 tp->link_config.speed = cmd->speed;
6877                 tp->link_config.duplex = cmd->duplex;
6878         }
6879   
6880         if (netif_running(dev))
6881                 tg3_setup_phy(tp, 1);
6882
6883         spin_unlock(&tp->tx_lock);
6884         spin_unlock_irq(&tp->lock);
6885   
6886         return 0;
6887 }
6888   
6889 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6890 {
6891         struct tg3 *tp = netdev_priv(dev);
6892   
6893         strcpy(info->driver, DRV_MODULE_NAME);
6894         strcpy(info->version, DRV_MODULE_VERSION);
6895         strcpy(info->bus_info, pci_name(tp->pdev));
6896 }
6897   
6898 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6899 {
6900         struct tg3 *tp = netdev_priv(dev);
6901   
6902         wol->supported = WAKE_MAGIC;
6903         wol->wolopts = 0;
6904         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6905                 wol->wolopts = WAKE_MAGIC;
6906         memset(&wol->sopass, 0, sizeof(wol->sopass));
6907 }
6908   
6909 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6910 {
6911         struct tg3 *tp = netdev_priv(dev);
6912   
6913         if (wol->wolopts & ~WAKE_MAGIC)
6914                 return -EINVAL;
6915         if ((wol->wolopts & WAKE_MAGIC) &&
6916             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6917             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6918                 return -EINVAL;
6919   
6920         spin_lock_irq(&tp->lock);
6921         if (wol->wolopts & WAKE_MAGIC)
6922                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6923         else
6924                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6925         spin_unlock_irq(&tp->lock);
6926   
6927         return 0;
6928 }
6929   
6930 static u32 tg3_get_msglevel(struct net_device *dev)
6931 {
6932         struct tg3 *tp = netdev_priv(dev);
6933         return tp->msg_enable;
6934 }
6935   
6936 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6937 {
6938         struct tg3 *tp = netdev_priv(dev);
6939         tp->msg_enable = value;
6940 }
6941   
6942 #if TG3_TSO_SUPPORT != 0
6943 static int tg3_set_tso(struct net_device *dev, u32 value)
6944 {
6945         struct tg3 *tp = netdev_priv(dev);
6946
6947         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6948                 if (value)
6949                         return -EINVAL;
6950                 return 0;
6951         }
6952         return ethtool_op_set_tso(dev, value);
6953 }
6954 #endif
6955   
6956 static int tg3_nway_reset(struct net_device *dev)
6957 {
6958         struct tg3 *tp = netdev_priv(dev);
6959         u32 bmcr;
6960         int r;
6961   
6962         if (!netif_running(dev))
6963                 return -EAGAIN;
6964
6965         spin_lock_irq(&tp->lock);
6966         r = -EINVAL;
6967         tg3_readphy(tp, MII_BMCR, &bmcr);
6968         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6969             (bmcr & BMCR_ANENABLE)) {
6970                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6971                 r = 0;
6972         }
6973         spin_unlock_irq(&tp->lock);
6974   
6975         return r;
6976 }
6977   
6978 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6979 {
6980         struct tg3 *tp = netdev_priv(dev);
6981   
6982         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6983         ering->rx_mini_max_pending = 0;
6984         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6985
6986         ering->rx_pending = tp->rx_pending;
6987         ering->rx_mini_pending = 0;
6988         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6989         ering->tx_pending = tp->tx_pending;
6990 }
6991   
6992 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6993 {
6994         struct tg3 *tp = netdev_priv(dev);
6995   
6996         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6997             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6998             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6999                 return -EINVAL;
7000   
7001         if (netif_running(dev))
7002                 tg3_netif_stop(tp);
7003
7004         spin_lock_irq(&tp->lock);
7005         spin_lock(&tp->tx_lock);
7006   
7007         tp->rx_pending = ering->rx_pending;
7008
7009         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7010             tp->rx_pending > 63)
7011                 tp->rx_pending = 63;
7012         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7013         tp->tx_pending = ering->tx_pending;
7014
7015         if (netif_running(dev)) {
7016                 tg3_halt(tp);
7017                 tg3_init_hw(tp);
7018                 tg3_netif_start(tp);
7019         }
7020
7021         spin_unlock(&tp->tx_lock);
7022         spin_unlock_irq(&tp->lock);
7023   
7024         return 0;
7025 }
7026   
7027 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030   
7031         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7032         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7033         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7034 }
7035   
7036 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7037 {
7038         struct tg3 *tp = netdev_priv(dev);
7039   
7040         if (netif_running(dev))
7041                 tg3_netif_stop(tp);
7042
7043         spin_lock_irq(&tp->lock);
7044         spin_lock(&tp->tx_lock);
7045         if (epause->autoneg)
7046                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7047         else
7048                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7049         if (epause->rx_pause)
7050                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7051         else
7052                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7053         if (epause->tx_pause)
7054                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7055         else
7056                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7057
7058         if (netif_running(dev)) {
7059                 tg3_halt(tp);
7060                 tg3_init_hw(tp);
7061                 tg3_netif_start(tp);
7062         }
7063         spin_unlock(&tp->tx_lock);
7064         spin_unlock_irq(&tp->lock);
7065   
7066         return 0;
7067 }
7068   
7069 static u32 tg3_get_rx_csum(struct net_device *dev)
7070 {
7071         struct tg3 *tp = netdev_priv(dev);
7072         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7073 }
7074   
7075 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7076 {
7077         struct tg3 *tp = netdev_priv(dev);
7078   
7079         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7080                 if (data != 0)
7081                         return -EINVAL;
7082                 return 0;
7083         }
7084   
7085         spin_lock_irq(&tp->lock);
7086         if (data)
7087                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7088         else
7089                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7090         spin_unlock_irq(&tp->lock);
7091   
7092         return 0;
7093 }
7094   
7095 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7096 {
7097         struct tg3 *tp = netdev_priv(dev);
7098   
7099         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7100                 if (data != 0)
7101                         return -EINVAL;
7102                 return 0;
7103         }
7104   
7105         if (data)
7106                 dev->features |= NETIF_F_IP_CSUM;
7107         else
7108                 dev->features &= ~NETIF_F_IP_CSUM;
7109
7110         return 0;
7111 }
7112
7113 static int tg3_get_stats_count (struct net_device *dev)
7114 {
7115         return TG3_NUM_STATS;
7116 }
7117
7118 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7119 {
7120         switch (stringset) {
7121         case ETH_SS_STATS:
7122                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7123                 break;
7124         default:
7125                 WARN_ON(1);     /* we need a WARN() */
7126                 break;
7127         }
7128 }
7129
7130 static void tg3_get_ethtool_stats (struct net_device *dev,
7131                                    struct ethtool_stats *estats, u64 *tmp_stats)
7132 {
7133         struct tg3 *tp = netdev_priv(dev);
7134         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7135 }
7136
7137 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7138 {
7139         struct mii_ioctl_data *data = if_mii(ifr);
7140         struct tg3 *tp = netdev_priv(dev);
7141         int err;
7142
7143         switch(cmd) {
7144         case SIOCGMIIPHY:
7145                 data->phy_id = PHY_ADDR;
7146
7147                 /* fallthru */
7148         case SIOCGMIIREG: {
7149                 u32 mii_regval;
7150
7151                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7152                         break;                  /* We have no PHY */
7153
7154                 spin_lock_irq(&tp->lock);
7155                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7156                 spin_unlock_irq(&tp->lock);
7157
7158                 data->val_out = mii_regval;
7159
7160                 return err;
7161         }
7162
7163         case SIOCSMIIREG:
7164                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7165                         break;                  /* We have no PHY */
7166
7167                 if (!capable(CAP_NET_ADMIN))
7168                         return -EPERM;
7169
7170                 spin_lock_irq(&tp->lock);
7171                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7172                 spin_unlock_irq(&tp->lock);
7173
7174                 return err;
7175
7176         default:
7177                 /* do nothing */
7178                 break;
7179         }
7180         return -EOPNOTSUPP;
7181 }
7182
7183 #if TG3_VLAN_TAG_USED
7184 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7185 {
7186         struct tg3 *tp = netdev_priv(dev);
7187
7188         spin_lock_irq(&tp->lock);
7189         spin_lock(&tp->tx_lock);
7190
7191         tp->vlgrp = grp;
7192
7193         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7194         __tg3_set_rx_mode(dev);
7195
7196         spin_unlock(&tp->tx_lock);
7197         spin_unlock_irq(&tp->lock);
7198 }
7199
7200 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7201 {
7202         struct tg3 *tp = netdev_priv(dev);
7203
7204         spin_lock_irq(&tp->lock);
7205         spin_lock(&tp->tx_lock);
7206         if (tp->vlgrp)
7207                 tp->vlgrp->vlan_devices[vid] = NULL;
7208         spin_unlock(&tp->tx_lock);
7209         spin_unlock_irq(&tp->lock);
7210 }
7211 #endif
7212
7213 static struct ethtool_ops tg3_ethtool_ops = {
7214         .get_settings           = tg3_get_settings,
7215         .set_settings           = tg3_set_settings,
7216         .get_drvinfo            = tg3_get_drvinfo,
7217         .get_regs_len           = tg3_get_regs_len,
7218         .get_regs               = tg3_get_regs,
7219         .get_wol                = tg3_get_wol,
7220         .set_wol                = tg3_set_wol,
7221         .get_msglevel           = tg3_get_msglevel,
7222         .set_msglevel           = tg3_set_msglevel,
7223         .nway_reset             = tg3_nway_reset,
7224         .get_link               = ethtool_op_get_link,
7225         .get_eeprom_len         = tg3_get_eeprom_len,
7226         .get_eeprom             = tg3_get_eeprom,
7227         .set_eeprom             = tg3_set_eeprom,
7228         .get_ringparam          = tg3_get_ringparam,
7229         .set_ringparam          = tg3_set_ringparam,
7230         .get_pauseparam         = tg3_get_pauseparam,
7231         .set_pauseparam         = tg3_set_pauseparam,
7232         .get_rx_csum            = tg3_get_rx_csum,
7233         .set_rx_csum            = tg3_set_rx_csum,
7234         .get_tx_csum            = ethtool_op_get_tx_csum,
7235         .set_tx_csum            = tg3_set_tx_csum,
7236         .get_sg                 = ethtool_op_get_sg,
7237         .set_sg                 = ethtool_op_set_sg,
7238 #if TG3_TSO_SUPPORT != 0
7239         .get_tso                = ethtool_op_get_tso,
7240         .set_tso                = tg3_set_tso,
7241 #endif
7242         .get_strings            = tg3_get_strings,
7243         .get_stats_count        = tg3_get_stats_count,
7244         .get_ethtool_stats      = tg3_get_ethtool_stats,
7245 };
7246
7247 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7248 {
7249         u32 cursize, val;
7250
7251         tp->nvram_size = EEPROM_CHIP_SIZE;
7252
7253         if (tg3_nvram_read(tp, 0, &val) != 0)
7254                 return;
7255
7256         if (swab32(val) != TG3_EEPROM_MAGIC)
7257                 return;
7258
7259         /*
7260          * Size the chip by reading offsets at increasing powers of two.
7261          * When we encounter our validation signature, we know the addressing
7262          * has wrapped around, and thus have our chip size.
7263          */
7264         cursize = 0x800;
7265
7266         while (cursize < tp->nvram_size) {
7267                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7268                         return;
7269
7270                 if (swab32(val) == TG3_EEPROM_MAGIC)
7271                         break;
7272
7273                 cursize <<= 1;
7274         }
7275
7276         tp->nvram_size = cursize;
7277 }
7278                 
7279 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7280 {
7281         u32 val;
7282
7283         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7284                 if (val != 0) {
7285                         tp->nvram_size = (val >> 16) * 1024;
7286                         return;
7287                 }
7288         }
7289         tp->nvram_size = 0x20000;
7290 }
7291
7292 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7293 {
7294         u32 nvcfg1;
7295
7296         nvcfg1 = tr32(NVRAM_CFG1);
7297         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7298                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7299         }
7300         else {
7301                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7302                 tw32(NVRAM_CFG1, nvcfg1);
7303         }
7304
7305         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7306                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7307                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7308                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7309                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7310                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7311                                 break;
7312                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7313                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7314                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7315                                 break;
7316                         case FLASH_VENDOR_ATMEL_EEPROM:
7317                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7318                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7319                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7320                                 break;
7321                         case FLASH_VENDOR_ST:
7322                                 tp->nvram_jedecnum = JEDEC_ST;
7323                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7324                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7325                                 break;
7326                         case FLASH_VENDOR_SAIFUN:
7327                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7328                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7329                                 break;
7330                         case FLASH_VENDOR_SST_SMALL:
7331                         case FLASH_VENDOR_SST_LARGE:
7332                                 tp->nvram_jedecnum = JEDEC_SST;
7333                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7334                                 break;
7335                 }
7336         }
7337         else {
7338                 tp->nvram_jedecnum = JEDEC_ATMEL;
7339                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7340                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7341         }
7342 }
7343
7344 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7345 {
7346         u32 nvcfg1;
7347
7348         nvcfg1 = tr32(NVRAM_CFG1);
7349
7350         /* NVRAM protection for TPM */
7351         if (nvcfg1 & (1 << 27))
7352                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7353
7354         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7355                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7356                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7357                         tp->nvram_jedecnum = JEDEC_ATMEL;
7358                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7359                         break;
7360                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7361                         tp->nvram_jedecnum = JEDEC_ATMEL;
7362                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7363                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7364                         break;
7365                 case FLASH_5752VENDOR_ST_M45PE10:
7366                 case FLASH_5752VENDOR_ST_M45PE20:
7367                 case FLASH_5752VENDOR_ST_M45PE40:
7368                         tp->nvram_jedecnum = JEDEC_ST;
7369                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7370                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7371                         break;
7372         }
7373
7374         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7375                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7376                         case FLASH_5752PAGE_SIZE_256:
7377                                 tp->nvram_pagesize = 256;
7378                                 break;
7379                         case FLASH_5752PAGE_SIZE_512:
7380                                 tp->nvram_pagesize = 512;
7381                                 break;
7382                         case FLASH_5752PAGE_SIZE_1K:
7383                                 tp->nvram_pagesize = 1024;
7384                                 break;
7385                         case FLASH_5752PAGE_SIZE_2K:
7386                                 tp->nvram_pagesize = 2048;
7387                                 break;
7388                         case FLASH_5752PAGE_SIZE_4K:
7389                                 tp->nvram_pagesize = 4096;
7390                                 break;
7391                         case FLASH_5752PAGE_SIZE_264:
7392                                 tp->nvram_pagesize = 264;
7393                                 break;
7394                 }
7395         }
7396         else {
7397                 /* For eeprom, set pagesize to maximum eeprom size */
7398                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7399
7400                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7401                 tw32(NVRAM_CFG1, nvcfg1);
7402         }
7403 }
7404
7405 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7406 static void __devinit tg3_nvram_init(struct tg3 *tp)
7407 {
7408         int j;
7409
7410         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7411                 return;
7412
7413         tw32_f(GRC_EEPROM_ADDR,
7414              (EEPROM_ADDR_FSM_RESET |
7415               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7416                EEPROM_ADDR_CLKPERD_SHIFT)));
7417
7418         /* XXX schedule_timeout() ... */
7419         for (j = 0; j < 100; j++)
7420                 udelay(10);
7421
7422         /* Enable seeprom accesses. */
7423         tw32_f(GRC_LOCAL_CTRL,
7424              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7425         udelay(100);
7426
7427         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7428             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7429                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7430
7431                 tg3_enable_nvram_access(tp);
7432
7433                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7434                         tg3_get_5752_nvram_info(tp);
7435                 else
7436                         tg3_get_nvram_info(tp);
7437
7438                 tg3_get_nvram_size(tp);
7439
7440                 tg3_disable_nvram_access(tp);
7441
7442         } else {
7443                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7444
7445                 tg3_get_eeprom_size(tp);
7446         }
7447 }
7448
7449 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7450                                         u32 offset, u32 *val)
7451 {
7452         u32 tmp;
7453         int i;
7454
7455         if (offset > EEPROM_ADDR_ADDR_MASK ||
7456             (offset % 4) != 0)
7457                 return -EINVAL;
7458
7459         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7460                                         EEPROM_ADDR_DEVID_MASK |
7461                                         EEPROM_ADDR_READ);
7462         tw32(GRC_EEPROM_ADDR,
7463              tmp |
7464              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7465              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7466               EEPROM_ADDR_ADDR_MASK) |
7467              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7468
7469         for (i = 0; i < 10000; i++) {
7470                 tmp = tr32(GRC_EEPROM_ADDR);
7471
7472                 if (tmp & EEPROM_ADDR_COMPLETE)
7473                         break;
7474                 udelay(100);
7475         }
7476         if (!(tmp & EEPROM_ADDR_COMPLETE))
7477                 return -EBUSY;
7478
7479         *val = tr32(GRC_EEPROM_DATA);
7480         return 0;
7481 }
7482
7483 #define NVRAM_CMD_TIMEOUT 10000
7484
7485 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7486 {
7487         int i;
7488
7489         tw32(NVRAM_CMD, nvram_cmd);
7490         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7491                 udelay(10);
7492                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7493                         udelay(10);
7494                         break;
7495                 }
7496         }
7497         if (i == NVRAM_CMD_TIMEOUT) {
7498                 return -EBUSY;
7499         }
7500         return 0;
7501 }
7502
7503 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7504 {
7505         int ret;
7506
7507         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7508                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7509                 return -EINVAL;
7510         }
7511
7512         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7513                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7514
7515         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7516                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7517                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7518
7519                 offset = ((offset / tp->nvram_pagesize) <<
7520                           ATMEL_AT45DB0X1B_PAGE_POS) +
7521                         (offset % tp->nvram_pagesize);
7522         }
7523
7524         if (offset > NVRAM_ADDR_MSK)
7525                 return -EINVAL;
7526
7527         tg3_nvram_lock(tp);
7528
7529         tg3_enable_nvram_access(tp);
7530
7531         tw32(NVRAM_ADDR, offset);
7532         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7533                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7534
7535         if (ret == 0)
7536                 *val = swab32(tr32(NVRAM_RDDATA));
7537
7538         tg3_nvram_unlock(tp);
7539
7540         tg3_disable_nvram_access(tp);
7541
7542         return ret;
7543 }
7544
7545 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7546                                     u32 offset, u32 len, u8 *buf)
7547 {
7548         int i, j, rc = 0;
7549         u32 val;
7550
7551         for (i = 0; i < len; i += 4) {
7552                 u32 addr, data;
7553
7554                 addr = offset + i;
7555
7556                 memcpy(&data, buf + i, 4);
7557
7558                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7559
7560                 val = tr32(GRC_EEPROM_ADDR);
7561                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7562
7563                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7564                         EEPROM_ADDR_READ);
7565                 tw32(GRC_EEPROM_ADDR, val |
7566                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7567                         (addr & EEPROM_ADDR_ADDR_MASK) |
7568                         EEPROM_ADDR_START |
7569                         EEPROM_ADDR_WRITE);
7570                 
7571                 for (j = 0; j < 10000; j++) {
7572                         val = tr32(GRC_EEPROM_ADDR);
7573
7574                         if (val & EEPROM_ADDR_COMPLETE)
7575                                 break;
7576                         udelay(100);
7577                 }
7578                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7579                         rc = -EBUSY;
7580                         break;
7581                 }
7582         }
7583
7584         return rc;
7585 }
7586
7587 /* offset and length are dword aligned */
7588 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7589                 u8 *buf)
7590 {
7591         int ret = 0;
7592         u32 pagesize = tp->nvram_pagesize;
7593         u32 pagemask = pagesize - 1;
7594         u32 nvram_cmd;
7595         u8 *tmp;
7596
7597         tmp = kmalloc(pagesize, GFP_KERNEL);
7598         if (tmp == NULL)
7599                 return -ENOMEM;
7600
7601         while (len) {
7602                 int j;
7603                 u32 phy_addr, page_off, size;
7604
7605                 phy_addr = offset & ~pagemask;
7606         
7607                 for (j = 0; j < pagesize; j += 4) {
7608                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7609                                                 (u32 *) (tmp + j))))
7610                                 break;
7611                 }
7612                 if (ret)
7613                         break;
7614
7615                 page_off = offset & pagemask;
7616                 size = pagesize;
7617                 if (len < size)
7618                         size = len;
7619
7620                 len -= size;
7621
7622                 memcpy(tmp + page_off, buf, size);
7623
7624                 offset = offset + (pagesize - page_off);
7625
7626                 tg3_enable_nvram_access(tp);
7627
7628                 /*
7629                  * Before we can erase the flash page, we need
7630                  * to issue a special "write enable" command.
7631                  */
7632                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7633
7634                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7635                         break;
7636
7637                 /* Erase the target page */
7638                 tw32(NVRAM_ADDR, phy_addr);
7639
7640                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7641                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7642
7643                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7644                         break;
7645
7646                 /* Issue another write enable to start the write. */
7647                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7648
7649                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7650                         break;
7651
7652                 for (j = 0; j < pagesize; j += 4) {
7653                         u32 data;
7654
7655                         data = *((u32 *) (tmp + j));
7656                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7657
7658                         tw32(NVRAM_ADDR, phy_addr + j);
7659
7660                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7661                                 NVRAM_CMD_WR;
7662
7663                         if (j == 0)
7664                                 nvram_cmd |= NVRAM_CMD_FIRST;
7665                         else if (j == (pagesize - 4))
7666                                 nvram_cmd |= NVRAM_CMD_LAST;
7667
7668                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7669                                 break;
7670                 }
7671                 if (ret)
7672                         break;
7673         }
7674
7675         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7676         tg3_nvram_exec_cmd(tp, nvram_cmd);
7677
7678         kfree(tmp);
7679
7680         return ret;
7681 }
7682
7683 /* offset and length are dword aligned */
7684 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7685                 u8 *buf)
7686 {
7687         int i, ret = 0;
7688
7689         for (i = 0; i < len; i += 4, offset += 4) {
7690                 u32 data, page_off, phy_addr, nvram_cmd;
7691
7692                 memcpy(&data, buf + i, 4);
7693                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7694
7695                 page_off = offset % tp->nvram_pagesize;
7696
7697                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7698                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7699
7700                         phy_addr = ((offset / tp->nvram_pagesize) <<
7701                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7702                 }
7703                 else {
7704                         phy_addr = offset;
7705                 }
7706
7707                 tw32(NVRAM_ADDR, phy_addr);
7708
7709                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7710
7711                 if ((page_off == 0) || (i == 0))
7712                         nvram_cmd |= NVRAM_CMD_FIRST;
7713                 else if (page_off == (tp->nvram_pagesize - 4))
7714                         nvram_cmd |= NVRAM_CMD_LAST;
7715
7716                 if (i == (len - 4))
7717                         nvram_cmd |= NVRAM_CMD_LAST;
7718
7719                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7720                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7721
7722                         if ((ret = tg3_nvram_exec_cmd(tp,
7723                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7724                                 NVRAM_CMD_DONE)))
7725
7726                                 break;
7727                 }
7728                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7729                         /* We always do complete word writes to eeprom. */
7730                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7731                 }
7732
7733                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7734                         break;
7735         }
7736         return ret;
7737 }
7738
7739 /* offset and length are dword aligned */
7740 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7741 {
7742         int ret;
7743
7744         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7745                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7746                 return -EINVAL;
7747         }
7748
7749         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7750                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7751                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7752                 udelay(40);
7753         }
7754
7755         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7756                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7757         }
7758         else {
7759                 u32 grc_mode;
7760
7761                 tg3_nvram_lock(tp);
7762
7763                 tg3_enable_nvram_access(tp);
7764                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7765                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7766                         tw32(NVRAM_WRITE1, 0x406);
7767
7768                 grc_mode = tr32(GRC_MODE);
7769                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7770
7771                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7772                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7773
7774                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7775                                 buf);
7776                 }
7777                 else {
7778                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7779                                 buf);
7780                 }
7781
7782                 grc_mode = tr32(GRC_MODE);
7783                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7784
7785                 tg3_disable_nvram_access(tp);
7786                 tg3_nvram_unlock(tp);
7787         }
7788
7789         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7790                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7791                 udelay(40);
7792         }
7793
7794         return ret;
7795 }
7796
7797 struct subsys_tbl_ent {
7798         u16 subsys_vendor, subsys_devid;
7799         u32 phy_id;
7800 };
7801
7802 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7803         /* Broadcom boards. */
7804         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7805         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7806         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7807         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7808         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7809         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7810         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7811         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7812         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7813         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7814         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7815
7816         /* 3com boards. */
7817         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7818         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7819         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7820         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7821         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7822
7823         /* DELL boards. */
7824         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7825         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7826         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7827         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7828
7829         /* Compaq boards. */
7830         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7831         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7832         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7833         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7834         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7835
7836         /* IBM boards. */
7837         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7838 };
7839
7840 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7841 {
7842         int i;
7843
7844         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7845                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7846                      tp->pdev->subsystem_vendor) &&
7847                     (subsys_id_to_phy_id[i].subsys_devid ==
7848                      tp->pdev->subsystem_device))
7849                         return &subsys_id_to_phy_id[i];
7850         }
7851         return NULL;
7852 }
7853
7854 /* Since this function may be called in D3-hot power state during
7855  * tg3_init_one(), only config cycles are allowed.
7856  */
7857 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7858 {
7859         u32 val;
7860
7861         /* Make sure register accesses (indirect or otherwise)
7862          * will function correctly.
7863          */
7864         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7865                                tp->misc_host_ctrl);
7866
7867         tp->phy_id = PHY_ID_INVALID;
7868         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7869
7870         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7871         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7872                 u32 nic_cfg, led_cfg;
7873                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7874                 int eeprom_phy_serdes = 0;
7875
7876                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7877                 tp->nic_sram_data_cfg = nic_cfg;
7878
7879                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7880                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7881                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7882                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7883                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7884                     (ver > 0) && (ver < 0x100))
7885                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7886
7887                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7888                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7889                         eeprom_phy_serdes = 1;
7890
7891                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7892                 if (nic_phy_id != 0) {
7893                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7894                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7895
7896                         eeprom_phy_id  = (id1 >> 16) << 10;
7897                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7898                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7899                 } else
7900                         eeprom_phy_id = 0;
7901
7902                 tp->phy_id = eeprom_phy_id;
7903                 if (eeprom_phy_serdes)
7904                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7905
7906                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7907                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7908                                     SHASTA_EXT_LED_MODE_MASK);
7909                 else
7910                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7911
7912                 switch (led_cfg) {
7913                 default:
7914                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7915                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7916                         break;
7917
7918                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7919                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7920                         break;
7921
7922                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7923                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7924                         break;
7925
7926                 case SHASTA_EXT_LED_SHARED:
7927                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7928                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7929                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7930                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7931                                                  LED_CTRL_MODE_PHY_2);
7932                         break;
7933
7934                 case SHASTA_EXT_LED_MAC:
7935                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7936                         break;
7937
7938                 case SHASTA_EXT_LED_COMBO:
7939                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7940                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7941                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7942                                                  LED_CTRL_MODE_PHY_2);
7943                         break;
7944
7945                 };
7946
7947                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7948                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7949                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7950                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7951
7952                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7953                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7954                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7955                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7956
7957                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7958                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7959                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7960                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7961                 }
7962                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7963                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7964
7965                 if (cfg2 & (1 << 17))
7966                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7967
7968                 /* serdes signal pre-emphasis in register 0x590 set by */
7969                 /* bootcode if bit 18 is set */
7970                 if (cfg2 & (1 << 18))
7971                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7972         }
7973 }
7974
7975 static int __devinit tg3_phy_probe(struct tg3 *tp)
7976 {
7977         u32 hw_phy_id_1, hw_phy_id_2;
7978         u32 hw_phy_id, hw_phy_id_masked;
7979         int err;
7980
7981         /* Reading the PHY ID register can conflict with ASF
7982          * firwmare access to the PHY hardware.
7983          */
7984         err = 0;
7985         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7986                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7987         } else {
7988                 /* Now read the physical PHY_ID from the chip and verify
7989                  * that it is sane.  If it doesn't look good, we fall back
7990                  * to either the hard-coded table based PHY_ID and failing
7991                  * that the value found in the eeprom area.
7992                  */
7993                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7994                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7995
7996                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7997                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7998                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7999
8000                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8001         }
8002
8003         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8004                 tp->phy_id = hw_phy_id;
8005                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8006                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8007         } else {
8008                 if (tp->phy_id != PHY_ID_INVALID) {
8009                         /* Do nothing, phy ID already set up in
8010                          * tg3_get_eeprom_hw_cfg().
8011                          */
8012                 } else {
8013                         struct subsys_tbl_ent *p;
8014
8015                         /* No eeprom signature?  Try the hardcoded
8016                          * subsys device table.
8017                          */
8018                         p = lookup_by_subsys(tp);
8019                         if (!p)
8020                                 return -ENODEV;
8021
8022                         tp->phy_id = p->phy_id;
8023                         if (!tp->phy_id ||
8024                             tp->phy_id == PHY_ID_BCM8002)
8025                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8026                 }
8027         }
8028
8029         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8030             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8031                 u32 bmsr, adv_reg, tg3_ctrl;
8032
8033                 tg3_readphy(tp, MII_BMSR, &bmsr);
8034                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8035                     (bmsr & BMSR_LSTATUS))
8036                         goto skip_phy_reset;
8037                     
8038                 err = tg3_phy_reset(tp);
8039                 if (err)
8040                         return err;
8041
8042                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8043                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8044                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8045                 tg3_ctrl = 0;
8046                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8047                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8048                                     MII_TG3_CTRL_ADV_1000_FULL);
8049                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8050                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8051                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8052                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8053                 }
8054
8055                 if (!tg3_copper_is_advertising_all(tp)) {
8056                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8057
8058                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8059                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8060
8061                         tg3_writephy(tp, MII_BMCR,
8062                                      BMCR_ANENABLE | BMCR_ANRESTART);
8063                 }
8064                 tg3_phy_set_wirespeed(tp);
8065
8066                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8067                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8068                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8069         }
8070
8071 skip_phy_reset:
8072         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8073                 err = tg3_init_5401phy_dsp(tp);
8074                 if (err)
8075                         return err;
8076         }
8077
8078         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8079                 err = tg3_init_5401phy_dsp(tp);
8080         }
8081
8082         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8083                 tp->link_config.advertising =
8084                         (ADVERTISED_1000baseT_Half |
8085                          ADVERTISED_1000baseT_Full |
8086                          ADVERTISED_Autoneg |
8087                          ADVERTISED_FIBRE);
8088         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8089                 tp->link_config.advertising &=
8090                         ~(ADVERTISED_1000baseT_Half |
8091                           ADVERTISED_1000baseT_Full);
8092
8093         return err;
8094 }
8095
8096 static void __devinit tg3_read_partno(struct tg3 *tp)
8097 {
8098         unsigned char vpd_data[256];
8099         int i;
8100
8101         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8102                 /* Sun decided not to put the necessary bits in the
8103                  * NVRAM of their onboard tg3 parts :(
8104                  */
8105                 strcpy(tp->board_part_number, "Sun 570X");
8106                 return;
8107         }
8108
8109         for (i = 0; i < 256; i += 4) {
8110                 u32 tmp;
8111
8112                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8113                         goto out_not_found;
8114
8115                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8116                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8117                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8118                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8119         }
8120
8121         /* Now parse and find the part number. */
8122         for (i = 0; i < 256; ) {
8123                 unsigned char val = vpd_data[i];
8124                 int block_end;
8125
8126                 if (val == 0x82 || val == 0x91) {
8127                         i = (i + 3 +
8128                              (vpd_data[i + 1] +
8129                               (vpd_data[i + 2] << 8)));
8130                         continue;
8131                 }
8132
8133                 if (val != 0x90)
8134                         goto out_not_found;
8135
8136                 block_end = (i + 3 +
8137                              (vpd_data[i + 1] +
8138                               (vpd_data[i + 2] << 8)));
8139                 i += 3;
8140                 while (i < block_end) {
8141                         if (vpd_data[i + 0] == 'P' &&
8142                             vpd_data[i + 1] == 'N') {
8143                                 int partno_len = vpd_data[i + 2];
8144
8145                                 if (partno_len > 24)
8146                                         goto out_not_found;
8147
8148                                 memcpy(tp->board_part_number,
8149                                        &vpd_data[i + 3],
8150                                        partno_len);
8151
8152                                 /* Success. */
8153                                 return;
8154                         }
8155                 }
8156
8157                 /* Part number not found. */
8158                 goto out_not_found;
8159         }
8160
8161 out_not_found:
8162         strcpy(tp->board_part_number, "none");
8163 }
8164
8165 #ifdef CONFIG_SPARC64
8166 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8167 {
8168         struct pci_dev *pdev = tp->pdev;
8169         struct pcidev_cookie *pcp = pdev->sysdata;
8170
8171         if (pcp != NULL) {
8172                 int node = pcp->prom_node;
8173                 u32 venid;
8174                 int err;
8175
8176                 err = prom_getproperty(node, "subsystem-vendor-id",
8177                                        (char *) &venid, sizeof(venid));
8178                 if (err == 0 || err == -1)
8179                         return 0;
8180                 if (venid == PCI_VENDOR_ID_SUN)
8181                         return 1;
8182         }
8183         return 0;
8184 }
8185 #endif
8186
8187 static int __devinit tg3_get_invariants(struct tg3 *tp)
8188 {
8189         static struct pci_device_id write_reorder_chipsets[] = {
8190                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8191                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8192                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8193                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8194                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8195                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8196                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8197                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8198                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8199                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8200                 { },
8201         };
8202         u32 misc_ctrl_reg;
8203         u32 cacheline_sz_reg;
8204         u32 pci_state_reg, grc_misc_cfg;
8205         u32 val;
8206         u16 pci_cmd;
8207         int err;
8208
8209 #ifdef CONFIG_SPARC64
8210         if (tg3_is_sun_570X(tp))
8211                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8212 #endif
8213
8214         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8215          * reordering to the mailbox registers done by the host
8216          * controller can cause major troubles.  We read back from
8217          * every mailbox register write to force the writes to be
8218          * posted to the chip in order.
8219          */
8220         if (pci_dev_present(write_reorder_chipsets))
8221                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8222
8223         /* Force memory write invalidate off.  If we leave it on,
8224          * then on 5700_BX chips we have to enable a workaround.
8225          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8226          * to match the cacheline size.  The Broadcom driver have this
8227          * workaround but turns MWI off all the times so never uses
8228          * it.  This seems to suggest that the workaround is insufficient.
8229          */
8230         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8231         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8232         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8233
8234         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8235          * has the register indirect write enable bit set before
8236          * we try to access any of the MMIO registers.  It is also
8237          * critical that the PCI-X hw workaround situation is decided
8238          * before that as well.
8239          */
8240         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8241                               &misc_ctrl_reg);
8242
8243         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8244                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8245
8246         /* Wrong chip ID in 5752 A0. This code can be removed later
8247          * as A0 is not in production.
8248          */
8249         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8250                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8251
8252         /* Initialize misc host control in PCI block. */
8253         tp->misc_host_ctrl |= (misc_ctrl_reg &
8254                                MISC_HOST_CTRL_CHIPREV);
8255         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8256                                tp->misc_host_ctrl);
8257
8258         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8259                               &cacheline_sz_reg);
8260
8261         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8262         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8263         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8264         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8265
8266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8268                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8269
8270         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8271             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8272                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8273
8274         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8275                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8276
8277         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8278                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8279
8280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8281             tp->pci_lat_timer < 64) {
8282                 tp->pci_lat_timer = 64;
8283
8284                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8285                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8286                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8287                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8288
8289                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8290                                        cacheline_sz_reg);
8291         }
8292
8293         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8294                               &pci_state_reg);
8295
8296         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8297                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8298
8299                 /* If this is a 5700 BX chipset, and we are in PCI-X
8300                  * mode, enable register write workaround.
8301                  *
8302                  * The workaround is to use indirect register accesses
8303                  * for all chip writes not to mailbox registers.
8304                  */
8305                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8306                         u32 pm_reg;
8307                         u16 pci_cmd;
8308
8309                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8310
8311                         /* The chip can have it's power management PCI config
8312                          * space registers clobbered due to this bug.
8313                          * So explicitly force the chip into D0 here.
8314                          */
8315                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8316                                               &pm_reg);
8317                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8318                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8319                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8320                                                pm_reg);
8321
8322                         /* Also, force SERR#/PERR# in PCI command. */
8323                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8324                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8325                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8326                 }
8327         }
8328
8329         /* Back to back register writes can cause problems on this chip,
8330          * the workaround is to read back all reg writes except those to
8331          * mailbox regs.  See tg3_write_indirect_reg32().
8332          *
8333          * PCI Express 5750_A0 rev chips need this workaround too.
8334          */
8335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8336             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8337              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8338                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8339
8340         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8341                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8342         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8343                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8344
8345         /* Chip-specific fixup from Broadcom driver */
8346         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8347             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8348                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8349                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8350         }
8351
8352         /* Get eeprom hw config before calling tg3_set_power_state().
8353          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8354          * determined before calling tg3_set_power_state() so that
8355          * we know whether or not to switch out of Vaux power.
8356          * When the flag is set, it means that GPIO1 is used for eeprom
8357          * write protect and also implies that it is a LOM where GPIOs
8358          * are not used to switch power.
8359          */ 
8360         tg3_get_eeprom_hw_cfg(tp);
8361
8362         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8363          * GPIO1 driven high will bring 5700's external PHY out of reset.
8364          * It is also used as eeprom write protect on LOMs.
8365          */
8366         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8367         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8368             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8369                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8370                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8371         /* Unused GPIO3 must be driven as output on 5752 because there
8372          * are no pull-up resistors on unused GPIO pins.
8373          */
8374         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8375                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8376
8377         /* Force the chip into D0. */
8378         err = tg3_set_power_state(tp, 0);
8379         if (err) {
8380                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8381                        pci_name(tp->pdev));
8382                 return err;
8383         }
8384
8385         /* 5700 B0 chips do not support checksumming correctly due
8386          * to hardware bugs.
8387          */
8388         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8389                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8390
8391         /* Pseudo-header checksum is done by hardware logic and not
8392          * the offload processers, so make the chip do the pseudo-
8393          * header checksums on receive.  For transmit it is more
8394          * convenient to do the pseudo-header checksum in software
8395          * as Linux does that on transmit for us in all cases.
8396          */
8397         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8398         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8399
8400         /* Derive initial jumbo mode from MTU assigned in
8401          * ether_setup() via the alloc_etherdev() call
8402          */
8403         if (tp->dev->mtu > ETH_DATA_LEN)
8404                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8405
8406         /* Determine WakeOnLan speed to use. */
8407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8408             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8409             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8410             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8411                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8412         } else {
8413                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8414         }
8415
8416         /* A few boards don't want Ethernet@WireSpeed phy feature */
8417         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8418             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8419              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8420              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8421                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8422
8423         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8424             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8425                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8426         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8427                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8428
8429         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8430                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8431
8432         /* Only 5701 and later support tagged irq status mode.
8433          * Also, 5788 chips cannot use tagged irq status.
8434          *
8435          * However, since we are using NAPI avoid tagged irq status
8436          * because the interrupt condition is more difficult to
8437          * fully clear in that mode.
8438          */
8439         tp->coalesce_mode = 0;
8440
8441         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8442             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8443                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8444
8445         /* Initialize MAC MI mode, polling disabled. */
8446         tw32_f(MAC_MI_MODE, tp->mi_mode);
8447         udelay(80);
8448
8449         /* Initialize data/descriptor byte/word swapping. */
8450         val = tr32(GRC_MODE);
8451         val &= GRC_MODE_HOST_STACKUP;
8452         tw32(GRC_MODE, val | tp->grc_mode);
8453
8454         tg3_switch_clocks(tp);
8455
8456         /* Clear this out for sanity. */
8457         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8458
8459         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8460                               &pci_state_reg);
8461         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8462             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8463                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8464
8465                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8466                     chiprevid == CHIPREV_ID_5701_B0 ||
8467                     chiprevid == CHIPREV_ID_5701_B2 ||
8468                     chiprevid == CHIPREV_ID_5701_B5) {
8469                         void __iomem *sram_base;
8470
8471                         /* Write some dummy words into the SRAM status block
8472                          * area, see if it reads back correctly.  If the return
8473                          * value is bad, force enable the PCIX workaround.
8474                          */
8475                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8476
8477                         writel(0x00000000, sram_base);
8478                         writel(0x00000000, sram_base + 4);
8479                         writel(0xffffffff, sram_base + 4);
8480                         if (readl(sram_base) != 0x00000000)
8481                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8482                 }
8483         }
8484
8485         udelay(50);
8486         tg3_nvram_init(tp);
8487
8488         grc_misc_cfg = tr32(GRC_MISC_CFG);
8489         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8490
8491         /* Broadcom's driver says that CIOBE multisplit has a bug */
8492 #if 0
8493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8494             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8495                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8496                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8497         }
8498 #endif
8499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8500             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8501              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8502                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8503
8504         /* these are limited to 10/100 only */
8505         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8506              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8507             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8508              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8509              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8510               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8511               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8512             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8513              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8514               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8515                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8516
8517         err = tg3_phy_probe(tp);
8518         if (err) {
8519                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8520                        pci_name(tp->pdev), err);
8521                 /* ... but do not return immediately ... */
8522         }
8523
8524         tg3_read_partno(tp);
8525
8526         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8527                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8528         } else {
8529                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8530                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8531                 else
8532                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8533         }
8534
8535         /* 5700 {AX,BX} chips have a broken status block link
8536          * change bit implementation, so we must use the
8537          * status register in those cases.
8538          */
8539         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8540                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8541         else
8542                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8543
8544         /* The led_ctrl is set during tg3_phy_probe, here we might
8545          * have to force the link status polling mechanism based
8546          * upon subsystem IDs.
8547          */
8548         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8549             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8550                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8551                                   TG3_FLAG_USE_LINKCHG_REG);
8552         }
8553
8554         /* For all SERDES we poll the MAC status register. */
8555         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8556                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8557         else
8558                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8559
8560         /* 5700 BX chips need to have their TX producer index mailboxes
8561          * written twice to workaround a bug.
8562          */
8563         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8564                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8565         else
8566                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8567
8568         /* It seems all chips can get confused if TX buffers
8569          * straddle the 4GB address boundary in some cases.
8570          */
8571         tp->dev->hard_start_xmit = tg3_start_xmit;
8572
8573         tp->rx_offset = 2;
8574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8575             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8576                 tp->rx_offset = 0;
8577
8578         /* By default, disable wake-on-lan.  User can change this
8579          * using ETHTOOL_SWOL.
8580          */
8581         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8582
8583         return err;
8584 }
8585
8586 #ifdef CONFIG_SPARC64
8587 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8588 {
8589         struct net_device *dev = tp->dev;
8590         struct pci_dev *pdev = tp->pdev;
8591         struct pcidev_cookie *pcp = pdev->sysdata;
8592
8593         if (pcp != NULL) {
8594                 int node = pcp->prom_node;
8595
8596                 if (prom_getproplen(node, "local-mac-address") == 6) {
8597                         prom_getproperty(node, "local-mac-address",
8598                                          dev->dev_addr, 6);
8599                         return 0;
8600                 }
8601         }
8602         return -ENODEV;
8603 }
8604
8605 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8606 {
8607         struct net_device *dev = tp->dev;
8608
8609         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8610         return 0;
8611 }
8612 #endif
8613
8614 static int __devinit tg3_get_device_address(struct tg3 *tp)
8615 {
8616         struct net_device *dev = tp->dev;
8617         u32 hi, lo, mac_offset;
8618
8619 #ifdef CONFIG_SPARC64
8620         if (!tg3_get_macaddr_sparc(tp))
8621                 return 0;
8622 #endif
8623
8624         mac_offset = 0x7c;
8625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8626             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8627                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8628                         mac_offset = 0xcc;
8629                 if (tg3_nvram_lock(tp))
8630                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8631                 else
8632                         tg3_nvram_unlock(tp);
8633         }
8634
8635         /* First try to get it from MAC address mailbox. */
8636         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8637         if ((hi >> 16) == 0x484b) {
8638                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8639                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8640
8641                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8642                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8643                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8644                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8645                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8646         }
8647         /* Next, try NVRAM. */
8648         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8649                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8650                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8651                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8652                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8653                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8654                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8655                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8656                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8657         }
8658         /* Finally just fetch it out of the MAC control regs. */
8659         else {
8660                 hi = tr32(MAC_ADDR_0_HIGH);
8661                 lo = tr32(MAC_ADDR_0_LOW);
8662
8663                 dev->dev_addr[5] = lo & 0xff;
8664                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8665                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8666                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8667                 dev->dev_addr[1] = hi & 0xff;
8668                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8669         }
8670
8671         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8672 #ifdef CONFIG_SPARC64
8673                 if (!tg3_get_default_macaddr_sparc(tp))
8674                         return 0;
8675 #endif
8676                 return -EINVAL;
8677         }
8678         return 0;
8679 }
8680
8681 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8682 {
8683         struct tg3_internal_buffer_desc test_desc;
8684         u32 sram_dma_descs;
8685         int i, ret;
8686
8687         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8688
8689         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8690         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8691         tw32(RDMAC_STATUS, 0);
8692         tw32(WDMAC_STATUS, 0);
8693
8694         tw32(BUFMGR_MODE, 0);
8695         tw32(FTQ_RESET, 0);
8696
8697         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8698         test_desc.addr_lo = buf_dma & 0xffffffff;
8699         test_desc.nic_mbuf = 0x00002100;
8700         test_desc.len = size;
8701
8702         /*
8703          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8704          * the *second* time the tg3 driver was getting loaded after an
8705          * initial scan.
8706          *
8707          * Broadcom tells me:
8708          *   ...the DMA engine is connected to the GRC block and a DMA
8709          *   reset may affect the GRC block in some unpredictable way...
8710          *   The behavior of resets to individual blocks has not been tested.
8711          *
8712          * Broadcom noted the GRC reset will also reset all sub-components.
8713          */
8714         if (to_device) {
8715                 test_desc.cqid_sqid = (13 << 8) | 2;
8716
8717                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8718                 udelay(40);
8719         } else {
8720                 test_desc.cqid_sqid = (16 << 8) | 7;
8721
8722                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8723                 udelay(40);
8724         }
8725         test_desc.flags = 0x00000005;
8726
8727         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8728                 u32 val;
8729
8730                 val = *(((u32 *)&test_desc) + i);
8731                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8732                                        sram_dma_descs + (i * sizeof(u32)));
8733                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8734         }
8735         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8736
8737         if (to_device) {
8738                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8739         } else {
8740                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8741         }
8742
8743         ret = -ENODEV;
8744         for (i = 0; i < 40; i++) {
8745                 u32 val;
8746
8747                 if (to_device)
8748                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8749                 else
8750                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8751                 if ((val & 0xffff) == sram_dma_descs) {
8752                         ret = 0;
8753                         break;
8754                 }
8755
8756                 udelay(100);
8757         }
8758
8759         return ret;
8760 }
8761
8762 #define TEST_BUFFER_SIZE        0x400
8763
8764 static int __devinit tg3_test_dma(struct tg3 *tp)
8765 {
8766         dma_addr_t buf_dma;
8767         u32 *buf;
8768         int ret;
8769
8770         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8771         if (!buf) {
8772                 ret = -ENOMEM;
8773                 goto out_nofree;
8774         }
8775
8776         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8777                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8778
8779 #ifndef CONFIG_X86
8780         {
8781                 u8 byte;
8782                 int cacheline_size;
8783                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8784
8785                 if (byte == 0)
8786                         cacheline_size = 1024;
8787                 else
8788                         cacheline_size = (int) byte * 4;
8789
8790                 switch (cacheline_size) {
8791                 case 16:
8792                 case 32:
8793                 case 64:
8794                 case 128:
8795                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8796                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8797                                 tp->dma_rwctrl |=
8798                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8799                                 break;
8800                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8801                                 tp->dma_rwctrl &=
8802                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8803                                 tp->dma_rwctrl |=
8804                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8805                                 break;
8806                         }
8807                         /* fallthrough */
8808                 case 256:
8809                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8810                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8811                                 tp->dma_rwctrl |=
8812                                         DMA_RWCTRL_WRITE_BNDRY_256;
8813                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8814                                 tp->dma_rwctrl |=
8815                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8816                 };
8817         }
8818 #endif
8819
8820         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8821                 /* DMA read watermark not used on PCIE */
8822                 tp->dma_rwctrl |= 0x00180000;
8823         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8824                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8825                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8826                         tp->dma_rwctrl |= 0x003f0000;
8827                 else
8828                         tp->dma_rwctrl |= 0x003f000f;
8829         } else {
8830                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8831                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8832                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8833
8834                         if (ccval == 0x6 || ccval == 0x7)
8835                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8836
8837                         /* Set bit 23 to renable PCIX hw bug fix */
8838                         tp->dma_rwctrl |= 0x009f0000;
8839                 } else {
8840                         tp->dma_rwctrl |= 0x001b000f;
8841                 }
8842         }
8843
8844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8845             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8846                 tp->dma_rwctrl &= 0xfffffff0;
8847
8848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8850                 /* Remove this if it causes problems for some boards. */
8851                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8852
8853                 /* On 5700/5701 chips, we need to set this bit.
8854                  * Otherwise the chip will issue cacheline transactions
8855                  * to streamable DMA memory with not all the byte
8856                  * enables turned on.  This is an error on several
8857                  * RISC PCI controllers, in particular sparc64.
8858                  *
8859                  * On 5703/5704 chips, this bit has been reassigned
8860                  * a different meaning.  In particular, it is used
8861                  * on those chips to enable a PCI-X workaround.
8862                  */
8863                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8864         }
8865
8866         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8867
8868 #if 0
8869         /* Unneeded, already done by tg3_get_invariants.  */
8870         tg3_switch_clocks(tp);
8871 #endif
8872
8873         ret = 0;
8874         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8875             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8876                 goto out;
8877
8878         while (1) {
8879                 u32 *p = buf, i;
8880
8881                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8882                         p[i] = i;
8883
8884                 /* Send the buffer to the chip. */
8885                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8886                 if (ret) {
8887                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8888                         break;
8889                 }
8890
8891 #if 0
8892                 /* validate data reached card RAM correctly. */
8893                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8894                         u32 val;
8895                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8896                         if (le32_to_cpu(val) != p[i]) {
8897                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8898                                 /* ret = -ENODEV here? */
8899                         }
8900                         p[i] = 0;
8901                 }
8902 #endif
8903                 /* Now read it back. */
8904                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8905                 if (ret) {
8906                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8907
8908                         break;
8909                 }
8910
8911                 /* Verify it. */
8912                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8913                         if (p[i] == i)
8914                                 continue;
8915
8916                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8917                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8918                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8919                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8920                                 break;
8921                         } else {
8922                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8923                                 ret = -ENODEV;
8924                                 goto out;
8925                         }
8926                 }
8927
8928                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8929                         /* Success. */
8930                         ret = 0;
8931                         break;
8932                 }
8933         }
8934
8935 out:
8936         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8937 out_nofree:
8938         return ret;
8939 }
8940
8941 static void __devinit tg3_init_link_config(struct tg3 *tp)
8942 {
8943         tp->link_config.advertising =
8944                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8945                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8946                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8947                  ADVERTISED_Autoneg | ADVERTISED_MII);
8948         tp->link_config.speed = SPEED_INVALID;
8949         tp->link_config.duplex = DUPLEX_INVALID;
8950         tp->link_config.autoneg = AUTONEG_ENABLE;
8951         netif_carrier_off(tp->dev);
8952         tp->link_config.active_speed = SPEED_INVALID;
8953         tp->link_config.active_duplex = DUPLEX_INVALID;
8954         tp->link_config.phy_is_low_power = 0;
8955         tp->link_config.orig_speed = SPEED_INVALID;
8956         tp->link_config.orig_duplex = DUPLEX_INVALID;
8957         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8958 }
8959
8960 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8961 {
8962         tp->bufmgr_config.mbuf_read_dma_low_water =
8963                 DEFAULT_MB_RDMA_LOW_WATER;
8964         tp->bufmgr_config.mbuf_mac_rx_low_water =
8965                 DEFAULT_MB_MACRX_LOW_WATER;
8966         tp->bufmgr_config.mbuf_high_water =
8967                 DEFAULT_MB_HIGH_WATER;
8968
8969         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8970                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8971         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8972                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8973         tp->bufmgr_config.mbuf_high_water_jumbo =
8974                 DEFAULT_MB_HIGH_WATER_JUMBO;
8975
8976         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8977         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8978 }
8979
8980 static char * __devinit tg3_phy_string(struct tg3 *tp)
8981 {
8982         switch (tp->phy_id & PHY_ID_MASK) {
8983         case PHY_ID_BCM5400:    return "5400";
8984         case PHY_ID_BCM5401:    return "5401";
8985         case PHY_ID_BCM5411:    return "5411";
8986         case PHY_ID_BCM5701:    return "5701";
8987         case PHY_ID_BCM5703:    return "5703";
8988         case PHY_ID_BCM5704:    return "5704";
8989         case PHY_ID_BCM5705:    return "5705";
8990         case PHY_ID_BCM5750:    return "5750";
8991         case PHY_ID_BCM5752:    return "5752";
8992         case PHY_ID_BCM8002:    return "8002/serdes";
8993         case 0:                 return "serdes";
8994         default:                return "unknown";
8995         };
8996 }
8997
8998 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8999 {
9000         struct pci_dev *peer;
9001         unsigned int func, devnr = tp->pdev->devfn & ~7;
9002
9003         for (func = 0; func < 8; func++) {
9004                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9005                 if (peer && peer != tp->pdev)
9006                         break;
9007                 pci_dev_put(peer);
9008         }
9009         if (!peer || peer == tp->pdev)
9010                 BUG();
9011
9012         /*
9013          * We don't need to keep the refcount elevated; there's no way
9014          * to remove one half of this device without removing the other
9015          */
9016         pci_dev_put(peer);
9017
9018         return peer;
9019 }
9020
9021 static int __devinit tg3_init_one(struct pci_dev *pdev,
9022                                   const struct pci_device_id *ent)
9023 {
9024         static int tg3_version_printed = 0;
9025         unsigned long tg3reg_base, tg3reg_len;
9026         struct net_device *dev;
9027         struct tg3 *tp;
9028         int i, err, pci_using_dac, pm_cap;
9029
9030         if (tg3_version_printed++ == 0)
9031                 printk(KERN_INFO "%s", version);
9032
9033         err = pci_enable_device(pdev);
9034         if (err) {
9035                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9036                        "aborting.\n");
9037                 return err;
9038         }
9039
9040         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9041                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9042                        "base address, aborting.\n");
9043                 err = -ENODEV;
9044                 goto err_out_disable_pdev;
9045         }
9046
9047         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9048         if (err) {
9049                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9050                        "aborting.\n");
9051                 goto err_out_disable_pdev;
9052         }
9053
9054         pci_set_master(pdev);
9055
9056         /* Find power-management capability. */
9057         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9058         if (pm_cap == 0) {
9059                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9060                        "aborting.\n");
9061                 err = -EIO;
9062                 goto err_out_free_res;
9063         }
9064
9065         /* Configure DMA attributes. */
9066         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9067         if (!err) {
9068                 pci_using_dac = 1;
9069                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9070                 if (err < 0) {
9071                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9072                                "for consistent allocations\n");
9073                         goto err_out_free_res;
9074                 }
9075         } else {
9076                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9077                 if (err) {
9078                         printk(KERN_ERR PFX "No usable DMA configuration, "
9079                                "aborting.\n");
9080                         goto err_out_free_res;
9081                 }
9082                 pci_using_dac = 0;
9083         }
9084
9085         tg3reg_base = pci_resource_start(pdev, 0);
9086         tg3reg_len = pci_resource_len(pdev, 0);
9087
9088         dev = alloc_etherdev(sizeof(*tp));
9089         if (!dev) {
9090                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9091                 err = -ENOMEM;
9092                 goto err_out_free_res;
9093         }
9094
9095         SET_MODULE_OWNER(dev);
9096         SET_NETDEV_DEV(dev, &pdev->dev);
9097
9098         if (pci_using_dac)
9099                 dev->features |= NETIF_F_HIGHDMA;
9100         dev->features |= NETIF_F_LLTX;
9101 #if TG3_VLAN_TAG_USED
9102         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9103         dev->vlan_rx_register = tg3_vlan_rx_register;
9104         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9105 #endif
9106
9107         tp = netdev_priv(dev);
9108         tp->pdev = pdev;
9109         tp->dev = dev;
9110         tp->pm_cap = pm_cap;
9111         tp->mac_mode = TG3_DEF_MAC_MODE;
9112         tp->rx_mode = TG3_DEF_RX_MODE;
9113         tp->tx_mode = TG3_DEF_TX_MODE;
9114         tp->mi_mode = MAC_MI_MODE_BASE;
9115         if (tg3_debug > 0)
9116                 tp->msg_enable = tg3_debug;
9117         else
9118                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9119
9120         /* The word/byte swap controls here control register access byte
9121          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9122          * setting below.
9123          */
9124         tp->misc_host_ctrl =
9125                 MISC_HOST_CTRL_MASK_PCI_INT |
9126                 MISC_HOST_CTRL_WORD_SWAP |
9127                 MISC_HOST_CTRL_INDIR_ACCESS |
9128                 MISC_HOST_CTRL_PCISTATE_RW;
9129
9130         /* The NONFRM (non-frame) byte/word swap controls take effect
9131          * on descriptor entries, anything which isn't packet data.
9132          *
9133          * The StrongARM chips on the board (one for tx, one for rx)
9134          * are running in big-endian mode.
9135          */
9136         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9137                         GRC_MODE_WSWAP_NONFRM_DATA);
9138 #ifdef __BIG_ENDIAN
9139         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9140 #endif
9141         spin_lock_init(&tp->lock);
9142         spin_lock_init(&tp->tx_lock);
9143         spin_lock_init(&tp->indirect_lock);
9144         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9145
9146         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9147         if (tp->regs == 0UL) {
9148                 printk(KERN_ERR PFX "Cannot map device registers, "
9149                        "aborting.\n");
9150                 err = -ENOMEM;
9151                 goto err_out_free_dev;
9152         }
9153
9154         tg3_init_link_config(tp);
9155
9156         tg3_init_bufmgr_config(tp);
9157
9158         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9159         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9160         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9161
9162         dev->open = tg3_open;
9163         dev->stop = tg3_close;
9164         dev->get_stats = tg3_get_stats;
9165         dev->set_multicast_list = tg3_set_rx_mode;
9166         dev->set_mac_address = tg3_set_mac_addr;
9167         dev->do_ioctl = tg3_ioctl;
9168         dev->tx_timeout = tg3_tx_timeout;
9169         dev->poll = tg3_poll;
9170         dev->ethtool_ops = &tg3_ethtool_ops;
9171         dev->weight = 64;
9172         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9173         dev->change_mtu = tg3_change_mtu;
9174         dev->irq = pdev->irq;
9175 #ifdef CONFIG_NET_POLL_CONTROLLER
9176         dev->poll_controller = tg3_poll_controller;
9177 #endif
9178
9179         err = tg3_get_invariants(tp);
9180         if (err) {
9181                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9182                        "aborting.\n");
9183                 goto err_out_iounmap;
9184         }
9185
9186         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9187                 tp->bufmgr_config.mbuf_read_dma_low_water =
9188                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9189                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9190                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9191                 tp->bufmgr_config.mbuf_high_water =
9192                         DEFAULT_MB_HIGH_WATER_5705;
9193         }
9194
9195 #if TG3_TSO_SUPPORT != 0
9196         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9197                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9198         }
9199         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9200             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9201             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9202             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9203                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9204         } else {
9205                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9206         }
9207
9208         /* TSO is off by default, user can enable using ethtool.  */
9209 #if 0
9210         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9211                 dev->features |= NETIF_F_TSO;
9212 #endif
9213
9214 #endif
9215
9216         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9217             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9218             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9219                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9220                 tp->rx_pending = 63;
9221         }
9222
9223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9224                 tp->pdev_peer = tg3_find_5704_peer(tp);
9225
9226         err = tg3_get_device_address(tp);
9227         if (err) {
9228                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9229                        "aborting.\n");
9230                 goto err_out_iounmap;
9231         }
9232
9233         /*
9234          * Reset chip in case UNDI or EFI driver did not shutdown
9235          * DMA self test will enable WDMAC and we'll see (spurious)
9236          * pending DMA on the PCI bus at that point.
9237          */
9238         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9239             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9240                 pci_save_state(tp->pdev);
9241                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9242                 tg3_halt(tp);
9243         }
9244
9245         err = tg3_test_dma(tp);
9246         if (err) {
9247                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9248                 goto err_out_iounmap;
9249         }
9250
9251         /* Tigon3 can do ipv4 only... and some chips have buggy
9252          * checksumming.
9253          */
9254         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9255                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9256                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9257         } else
9258                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9259
9260         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9261                 dev->features &= ~NETIF_F_HIGHDMA;
9262
9263         /* flow control autonegotiation is default behavior */
9264         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9265
9266         err = register_netdev(dev);
9267         if (err) {
9268                 printk(KERN_ERR PFX "Cannot register net device, "
9269                        "aborting.\n");
9270                 goto err_out_iounmap;
9271         }
9272
9273         pci_set_drvdata(pdev, dev);
9274
9275         /* Now that we have fully setup the chip, save away a snapshot
9276          * of the PCI config space.  We need to restore this after
9277          * GRC_MISC_CFG core clock resets and some resume events.
9278          */
9279         pci_save_state(tp->pdev);
9280
9281         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9282                dev->name,
9283                tp->board_part_number,
9284                tp->pci_chip_rev_id,
9285                tg3_phy_string(tp),
9286                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9287                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9288                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9289                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9290                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9291                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9292
9293         for (i = 0; i < 6; i++)
9294                 printk("%2.2x%c", dev->dev_addr[i],
9295                        i == 5 ? '\n' : ':');
9296
9297         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9298                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9299                "TSOcap[%d] \n",
9300                dev->name,
9301                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9302                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9303                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9304                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9305                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9306                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9307                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9308
9309         return 0;
9310
9311 err_out_iounmap:
9312         iounmap(tp->regs);
9313
9314 err_out_free_dev:
9315         free_netdev(dev);
9316
9317 err_out_free_res:
9318         pci_release_regions(pdev);
9319
9320 err_out_disable_pdev:
9321         pci_disable_device(pdev);
9322         pci_set_drvdata(pdev, NULL);
9323         return err;
9324 }
9325
9326 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9327 {
9328         struct net_device *dev = pci_get_drvdata(pdev);
9329
9330         if (dev) {
9331                 struct tg3 *tp = netdev_priv(dev);
9332
9333                 unregister_netdev(dev);
9334                 iounmap(tp->regs);
9335                 free_netdev(dev);
9336                 pci_release_regions(pdev);
9337                 pci_disable_device(pdev);
9338                 pci_set_drvdata(pdev, NULL);
9339         }
9340 }
9341
9342 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9343 {
9344         struct net_device *dev = pci_get_drvdata(pdev);
9345         struct tg3 *tp = netdev_priv(dev);
9346         int err;
9347
9348         if (!netif_running(dev))
9349                 return 0;
9350
9351         tg3_netif_stop(tp);
9352
9353         del_timer_sync(&tp->timer);
9354
9355         spin_lock_irq(&tp->lock);
9356         spin_lock(&tp->tx_lock);
9357         tg3_disable_ints(tp);
9358         spin_unlock(&tp->tx_lock);
9359         spin_unlock_irq(&tp->lock);
9360
9361         netif_device_detach(dev);
9362
9363         spin_lock_irq(&tp->lock);
9364         spin_lock(&tp->tx_lock);
9365         tg3_halt(tp);
9366         spin_unlock(&tp->tx_lock);
9367         spin_unlock_irq(&tp->lock);
9368
9369         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9370         if (err) {
9371                 spin_lock_irq(&tp->lock);
9372                 spin_lock(&tp->tx_lock);
9373
9374                 tg3_init_hw(tp);
9375
9376                 tp->timer.expires = jiffies + tp->timer_offset;
9377                 add_timer(&tp->timer);
9378
9379                 netif_device_attach(dev);
9380                 tg3_netif_start(tp);
9381
9382                 spin_unlock(&tp->tx_lock);
9383                 spin_unlock_irq(&tp->lock);
9384         }
9385
9386         return err;
9387 }
9388
9389 static int tg3_resume(struct pci_dev *pdev)
9390 {
9391         struct net_device *dev = pci_get_drvdata(pdev);
9392         struct tg3 *tp = netdev_priv(dev);
9393         int err;
9394
9395         if (!netif_running(dev))
9396                 return 0;
9397
9398         pci_restore_state(tp->pdev);
9399
9400         err = tg3_set_power_state(tp, 0);
9401         if (err)
9402                 return err;
9403
9404         netif_device_attach(dev);
9405
9406         spin_lock_irq(&tp->lock);
9407         spin_lock(&tp->tx_lock);
9408
9409         tg3_init_hw(tp);
9410
9411         tp->timer.expires = jiffies + tp->timer_offset;
9412         add_timer(&tp->timer);
9413
9414         tg3_enable_ints(tp);
9415
9416         tg3_netif_start(tp);
9417
9418         spin_unlock(&tp->tx_lock);
9419         spin_unlock_irq(&tp->lock);
9420
9421         return 0;
9422 }
9423
9424 static struct pci_driver tg3_driver = {
9425         .name           = DRV_MODULE_NAME,
9426         .id_table       = tg3_pci_tbl,
9427         .probe          = tg3_init_one,
9428         .remove         = __devexit_p(tg3_remove_one),
9429         .suspend        = tg3_suspend,
9430         .resume         = tg3_resume
9431 };
9432
9433 static int __init tg3_init(void)
9434 {
9435         return pci_module_init(&tg3_driver);
9436 }
9437
9438 static void __exit tg3_cleanup(void)
9439 {
9440         pci_unregister_driver(&tg3_driver);
9441 }
9442
9443 module_init(tg3_init);
9444 module_exit(tg3_cleanup);